cyd0806 commited on
Commit
b3952b6
·
verified ·
1 Parent(s): 92d7777

Upload src/SubjectGeniusPipeline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/SubjectGeniusPipeline.py +246 -0
src/SubjectGeniusPipeline.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ipdb
2
+ from accelerate import Accelerator
3
+ from diffusers.configuration_utils import register_to_config
4
+ from diffusers.pipelines import FluxPipeline
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+ import torch
7
+ from .condition import Condition
8
+ from diffusers.pipelines.flux.pipeline_flux import (
9
+ FluxPipelineOutput,
10
+ calculate_shift,
11
+ retrieve_timesteps,
12
+ np,
13
+ )
14
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
15
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
16
+ from diffusers.models import AutoencoderKL,FluxTransformer2DModel
17
+
18
+
19
+ class SubjectGeniusPipeline(FluxPipeline):
20
+ @register_to_config
21
+ def __init__(
22
+ self,
23
+ scheduler: FlowMatchEulerDiscreteScheduler,
24
+ vae: AutoencoderKL,
25
+ text_encoder: CLIPTextModel,
26
+ tokenizer: CLIPTokenizer,
27
+ text_encoder_2: T5EncoderModel,
28
+ tokenizer_2: T5TokenizerFast,
29
+ transformer: FluxTransformer2DModel,
30
+ image_encoder = None,
31
+ feature_extractor = None,
32
+ ):
33
+ super().__init__(
34
+ scheduler=scheduler,
35
+ vae=vae,
36
+ text_encoder=text_encoder,
37
+ tokenizer=tokenizer,
38
+ text_encoder_2=text_encoder_2,
39
+ tokenizer_2=tokenizer_2,
40
+ transformer=transformer,
41
+ image_encoder = image_encoder,
42
+ feature_extractor = feature_extractor,
43
+ )
44
+ @property
45
+ def all_adapters(self):
46
+ list_adapters = self.get_list_adapters() # eg {"unet": ["adapter1", "adapter2"], "text_encoder": ["adapter2"]}
47
+ # eg ["adapter1", "adapter2"]
48
+ all_adapters = list({adapter for adapters in list_adapters.values() for adapter in adapters})
49
+ return all_adapters
50
+
51
+ @torch.no_grad()
52
+ def __call__(self,
53
+ prompt: Union[str, List[str]] = None,
54
+ prompt_2: Optional[Union[str, List[str]]] = None,
55
+ # additional begin
56
+ conditions: List[Condition] = None,
57
+ model_config: Optional[Dict[str, Any]] = {},
58
+ condition_scale: float = 1.0,
59
+ # additional over
60
+ height: Optional[int] = 512,
61
+ width: Optional[int] = 512,
62
+ num_inference_steps: int = 28,
63
+ timesteps: List[int] = None,
64
+ guidance_scale: float = 3.5,
65
+ num_images_per_prompt: Optional[int] = 1,
66
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
67
+ latents: Optional[torch.FloatTensor] = None,
68
+ prompt_embeds: Optional[torch.FloatTensor] = None,
69
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
70
+ output_type: Optional[str] = "pil",
71
+ return_dict: bool = True,
72
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
73
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
74
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
75
+ max_sequence_length: int = 512,
76
+ accelerator: Accelerator = None,
77
+ ):
78
+ # self.block_mask_routers = nn.ModuleList(
79
+ # [nn.Sequential(nn.Linear(self.transformer.config.attention_head_dim * self.transformer.config.num_attention_heads, 1, bias=False), nn.Tanh()) for _ in
80
+ # range(self.transformer.config.num_layers)]
81
+ # ).to(accelerator.device,dtype=torch.bfloat16)
82
+ # self.single_block_mask_routers = nn.ModuleList(
83
+ # [nn.Sequential(nn.Linear(self.transformer.config.attention_head_dim * self.transformer.config.num_attention_heads, 1, bias=False), nn.Tanh()) for _ in
84
+ # range(self.transformer.config.num_single_layers)]
85
+ # ).to(accelerator.device,dtype=torch.bfloat16)
86
+
87
+ height = height or self.default_sample_size * self.vae_scale_factor
88
+ width = width or self.default_sample_size * self.vae_scale_factor
89
+
90
+ # 1. Check inputs. Raise error if not correct
91
+ self.check_inputs(
92
+ prompt,
93
+ prompt_2,
94
+ height,
95
+ width,
96
+ prompt_embeds=prompt_embeds,
97
+ pooled_prompt_embeds=pooled_prompt_embeds,
98
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
99
+ max_sequence_length=max_sequence_length,
100
+ )
101
+
102
+ self._guidance_scale = guidance_scale
103
+ self._joint_attention_kwargs = joint_attention_kwargs
104
+ self._interrupt = False
105
+
106
+ # 2. Define call parameters
107
+ if prompt is not None and isinstance(prompt, str):
108
+ batch_size = 1
109
+ elif prompt is not None and isinstance(prompt, list):
110
+ batch_size = len(prompt)
111
+ else:
112
+ batch_size = prompt_embeds.shape[0]
113
+ device = self._execution_device
114
+
115
+ lora_scale = (
116
+ self.joint_attention_kwargs.get("scale", None)
117
+ if self.joint_attention_kwargs is not None
118
+ else None
119
+ )
120
+ (
121
+ prompt_embeds,
122
+ pooled_prompt_embeds,
123
+ text_ids,
124
+ ) = self.encode_prompt(
125
+ prompt=prompt,
126
+ prompt_2=prompt_2,
127
+ prompt_embeds=prompt_embeds,
128
+ pooled_prompt_embeds=pooled_prompt_embeds,
129
+ device=device,
130
+ num_images_per_prompt=num_images_per_prompt,
131
+ max_sequence_length=max_sequence_length,
132
+ lora_scale=lora_scale,
133
+ )
134
+
135
+ # 3. Prepare latent variables
136
+ num_channels_latents = self.transformer.config.in_channels // 4
137
+ latents, latent_image_ids = self.prepare_latents(
138
+ batch_size * num_images_per_prompt,
139
+ num_channels_latents,
140
+ height,
141
+ width,
142
+ prompt_embeds.dtype,
143
+ device,
144
+ generator,
145
+ latents,
146
+ )
147
+ # 4. Prepare conditions
148
+ condition_latents, condition_ids, condition_type_ids, condition_types = ([] for _ in range(4))
149
+ use_condition = conditions is not None
150
+
151
+ if use_condition:
152
+ for condition in conditions:
153
+ tokens,ids,type_id = condition.encode(self)
154
+ condition_latents.append(tokens)
155
+ condition_ids.append(ids)
156
+ condition_type_ids.append(type_id)
157
+ condition_types.append(condition.condition_type)
158
+
159
+ # 5. Prepare timesteps
160
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
161
+ image_seq_len = latents.shape[1]
162
+ mu = calculate_shift(
163
+ image_seq_len,
164
+ self.scheduler.config.base_image_seq_len,
165
+ self.scheduler.config.max_image_seq_len,
166
+ self.scheduler.config.base_shift,
167
+ self.scheduler.config.max_shift,
168
+ )
169
+ timesteps, num_inference_steps = retrieve_timesteps(
170
+ self.scheduler,
171
+ num_inference_steps,
172
+ device,
173
+ timesteps,
174
+ sigmas,
175
+ mu=mu,
176
+ )
177
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
178
+ self._num_timesteps = len(timesteps)
179
+
180
+ # handle guidance: Decide whether to enable guidance according to the configuration in base model's config file.
181
+ # example: Flux-dev: True ; Flux-schnell: False.
182
+ if self.transformer.config.guidance_embeds:
183
+ guidance = torch.full([1], guidance_scale, device=device, dtype=latents.dtype)
184
+ guidance = guidance.expand(latents.shape[0])
185
+ else:
186
+ guidance = None
187
+
188
+ # 6. Denoising loop
189
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
190
+ for i, t in enumerate(timesteps):
191
+ if self.interrupt:
192
+ continue
193
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
194
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
195
+ noise_pred, conditional_output = self.transformer(
196
+ model_config=model_config,
197
+ # Inputs of the condition (new feature)
198
+ condition_latents=condition_latents if use_condition else None,
199
+ condition_ids=condition_ids if use_condition else None,
200
+ condition_type_ids=condition_type_ids if use_condition else None, # the condition_type_ids is not used so far.
201
+ condition_types = condition_types if use_condition else None,
202
+ return_condition_latents = model_config.get("return_condition_latents",False),
203
+ # Inputs to the original transformer
204
+ hidden_states=latents,
205
+ timestep=timestep / 1000,
206
+ guidance=guidance,
207
+ pooled_projections=pooled_prompt_embeds,
208
+ encoder_hidden_states=prompt_embeds,
209
+ txt_ids=text_ids,
210
+ img_ids=latent_image_ids,
211
+ joint_attention_kwargs=self.joint_attention_kwargs,
212
+ return_dict=False,
213
+ )
214
+ # compute the previous noisy sample x_t -> x_t-1
215
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
216
+
217
+ # prepare for callback
218
+ if callback_on_step_end is not None:
219
+ callback_kwargs = {}
220
+ for k in callback_on_step_end_tensor_inputs:
221
+ callback_kwargs[k] = locals()[k]
222
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
223
+
224
+ latents = callback_outputs.pop("latents", latents)
225
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
226
+
227
+ # call the callback, if provided
228
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
229
+ progress_bar.update()
230
+
231
+ # 7 finish denoising process
232
+ if output_type == "latent":
233
+ image = latents
234
+ else:
235
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
236
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
237
+ image = self.vae.decode(latents, return_dict=False)[0]
238
+ image = self.image_processor.postprocess(image, output_type=output_type)
239
+
240
+ # Offload all models
241
+ self.maybe_free_model_hooks()
242
+
243
+ if not return_dict:
244
+ return (image,conditional_output) if model_config.get("return_condition_latents",False) else (image,)
245
+
246
+ return FluxPipelineOutput(images=image)