eeuuia commited on
Commit
20e5901
·
verified ·
1 Parent(s): 345823d

Update api/ltx/ltx_aduc_pipeline.py

Browse files
Files changed (1) hide show
  1. api/ltx/ltx_aduc_pipeline.py +6 -9
api/ltx/ltx_aduc_pipeline.py CHANGED
@@ -232,8 +232,12 @@ class LtxAducPipeline:
232
  if kwargs.get("ltx_configs_override"):
233
  self._apply_ui_overrides(first_pass_config, kwargs.get("ltx_configs_override"))
234
 
 
235
  # 3. Monta o dicionário de argumentos SEM conditioning_items primeiro
236
  pipeline_kwargs = {
 
 
 
237
  "prompt": kwargs['prompt'],
238
  "negative_prompt": kwargs['negative_prompt'],
239
  "height": downscaled_height,
@@ -242,25 +246,18 @@ class LtxAducPipeline:
242
  "frame_rate": int(DEFAULT_FPS),
243
  "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
244
  "output_type": "latent",
245
- #"conditioning_items": conditioning_items if conditioning_items else None,
246
  "media_items": None,
247
  "decode_timestep": self.config["decode_timestep"],
248
  "decode_noise_scale": self.config["decode_noise_scale"],
249
  "stochastic_sampling": self.config["stochastic_sampling"],
250
- "image_cond_noise_scale": 0.01,
251
  "is_video": True,
252
  "vae_per_channel_normalize": True,
253
  "mixed_precision": (self.config["precision"] == "mixed_precision"),
254
  "offload_to_cpu": False,
255
  "enhance_prompt": False,
256
- #"skip_layer_strategy": SkipLayerStrategy.AttentionValues,
257
- **first_pass_config
258
  }
259
-
260
- # --- Bloco de Logging para Depuração ---
261
- # 4. Loga os argumentos do pipeline (sem os tensores de condição)
262
- logging.info(f"\n[Info] Pipeline Arguments (BASE):\n {json.dumps(pipeline_kwargs, indent=2, default=str)}\n")
263
-
264
  # Loga os conditioning_items separadamente com a nossa função helper
265
  conditioning_items_list = kwargs.get('conditioning_items')
266
  self._log_conditioning_items(conditioning_items_list)
 
232
  if kwargs.get("ltx_configs_override"):
233
  self._apply_ui_overrides(first_pass_config, kwargs.get("ltx_configs_override"))
234
 
235
+
236
  # 3. Monta o dicionário de argumentos SEM conditioning_items primeiro
237
  pipeline_kwargs = {
238
+ "num_inference_steps": first_pass_config.get("num_inference_steps"),
239
+ "skip_final_inference_steps": first_pass_config.get("skip_final_inference_steps")
240
+ 'cfg_star_rescale": "true",
241
  "prompt": kwargs['prompt'],
242
  "negative_prompt": kwargs['negative_prompt'],
243
  "height": downscaled_height,
 
246
  "frame_rate": int(DEFAULT_FPS),
247
  "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
248
  "output_type": "latent",
 
249
  "media_items": None,
250
  "decode_timestep": self.config["decode_timestep"],
251
  "decode_noise_scale": self.config["decode_noise_scale"],
252
  "stochastic_sampling": self.config["stochastic_sampling"],
253
+ "image_cond_noise_scale": 0.05,
254
  "is_video": True,
255
  "vae_per_channel_normalize": True,
256
  "mixed_precision": (self.config["precision"] == "mixed_precision"),
257
  "offload_to_cpu": False,
258
  "enhance_prompt": False,
 
 
259
  }
260
+
 
 
 
 
261
  # Loga os conditioning_items separadamente com a nossa função helper
262
  conditioning_items_list = kwargs.get('conditioning_items')
263
  self._log_conditioning_items(conditioning_items_list)