euiia commited on
Commit
8894134
·
verified ·
1 Parent(s): d354454

Update ltx_manager_helpers.py

Browse files
Files changed (1) hide show
  1. ltx_manager_helpers.py +29 -15
ltx_manager_helpers.py CHANGED
@@ -1,6 +1,15 @@
1
  # ltx_manager_helpers.py
2
  # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
- # (Licenciamento e cabeçalhos permanecem os mesmos)
 
 
 
 
 
 
 
 
 
4
 
5
  import torch
6
  import gc
@@ -111,7 +120,6 @@ class LtxPoolManager:
111
 
112
  first_pass_config = worker_to_use.config.get("first_pass", {})
113
 
114
- # Correção para o modo de refinamento: não recalcular padding
115
  if 'latents' in kwargs and kwargs['latents'] is not None:
116
  padded_h, padded_w = height, width
117
  padding_vals = (0, 0, 0, 0)
@@ -146,7 +154,6 @@ class LtxPoolManager:
146
 
147
  logger.info("="*60)
148
  logger.info(f"CHAMADA AO PIPELINE LTX NO DISPOSITIVO: {worker_to_use.device}")
149
- # ... (resto do logging)
150
 
151
  return pipeline_params, padding_vals
152
 
@@ -184,29 +191,36 @@ class LtxPoolManager:
184
  def execution_logic(worker, **inner_kwargs):
185
  pipeline_params, padding_vals = self._prepare_and_log_params(worker, **inner_kwargs)
186
 
187
- # --- LÓGICA DE REFINAMENTO EXPLÍCITA E CORRIGIDA ---
188
  strength = inner_kwargs.get('denoise_strength', 0.4)
189
- num_refine_steps = int(inner_kwargs.get('refine_steps', 10))
190
 
191
- scheduler = worker.pipeline.scheduler
192
- scheduler.set_timesteps(num_refine_steps, device=worker.device)
193
- timesteps = scheduler.timesteps
 
 
 
 
 
 
 
 
 
 
194
 
195
- start_timestep_idx = int(num_refine_steps * strength)
196
- if start_timestep_idx >= len(timesteps):
197
- start_timestep_idx = len(timesteps) - 1
198
  start_timestep = timesteps[start_timestep_idx]
199
 
200
  noise = torch.randn_like(upscaled_latents, device=worker.device)
201
- noisy_latents = scheduler.add_noise(upscaled_latents.to(worker.device), noise, start_timestep)
202
 
 
203
  pipeline_params['latents'] = noisy_latents.to(worker.device, dtype=worker.pipeline.transformer.dtype)
204
- pipeline_params['timesteps'] = timesteps[start_timestep_idx:]
205
- pipeline_params['num_inference_steps'] = len(pipeline_params['timesteps'])
206
  pipeline_params.pop('strength', None)
207
  pipeline_params['output_type'] = "latent"
208
 
209
- logger.info("LTX POOL MANAGER: Iniciando passe de refinamento (denoise) com controle manual de ruído.")
210
 
211
  with torch.no_grad():
212
  refined_tensor = worker.generate_video_fragment_internal(**pipeline_params)
 
1
  # ltx_manager_helpers.py
2
  # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
+ #
4
+ # ORIGINAL SOURCE: LTX-Video by Lightricks Ltd. & other open-source projects.
5
+ # Licensed under the Apache License, Version 2.0
6
+ # https://github.com/Lightricks/LTX-Video
7
+ #
8
+ # MODIFICATIONS FOR ADUC-SDR_Video:
9
+ # This file is part of ADUC-SDR_Video, a derivative work based on LTX-Video.
10
+ # It has been modified to manage pools of LTX workers, handle GPU memory,
11
+ # and prepare parameters for the ADUC-SDR orchestration framework.
12
+ # All modifications are also licensed under the Apache License, Version 2.0.
13
 
14
  import torch
15
  import gc
 
120
 
121
  first_pass_config = worker_to_use.config.get("first_pass", {})
122
 
 
123
  if 'latents' in kwargs and kwargs['latents'] is not None:
124
  padded_h, padded_w = height, width
125
  padding_vals = (0, 0, 0, 0)
 
154
 
155
  logger.info("="*60)
156
  logger.info(f"CHAMADA AO PIPELINE LTX NO DISPOSITIVO: {worker_to_use.device}")
 
157
 
158
  return pipeline_params, padding_vals
159
 
 
191
  def execution_logic(worker, **inner_kwargs):
192
  pipeline_params, padding_vals = self._prepare_and_log_params(worker, **inner_kwargs)
193
 
 
194
  strength = inner_kwargs.get('denoise_strength', 0.4)
195
+ num_refine_steps_requested = int(inner_kwargs.get('refine_steps', 10))
196
 
197
+ allowed_timesteps = worker.config.get("first_pass", {}).get("timesteps")
198
+
199
+ if allowed_timesteps is None:
200
+ scheduler = worker.pipeline.scheduler
201
+ scheduler.set_timesteps(num_refine_steps_requested, device=worker.device)
202
+ timesteps = scheduler.timesteps
203
+ else:
204
+ timesteps = torch.tensor(allowed_timesteps, device=worker.device)
205
+
206
+ num_total_timesteps = len(timesteps)
207
+ start_timestep_idx = int(num_total_timesteps * strength)
208
+ if start_timestep_idx >= num_total_timesteps:
209
+ start_timestep_idx = num_total_timesteps - 1
210
 
 
 
 
211
  start_timestep = timesteps[start_timestep_idx]
212
 
213
  noise = torch.randn_like(upscaled_latents, device=worker.device)
214
+ noisy_latents = worker.pipeline.scheduler.add_noise(upscaled_latents.to(worker.device), noise, start_timestep)
215
 
216
+ final_timesteps = timesteps[start_timestep_idx:]
217
  pipeline_params['latents'] = noisy_latents.to(worker.device, dtype=worker.pipeline.transformer.dtype)
218
+ pipeline_params['timesteps'] = final_timesteps
219
+ pipeline_params['num_inference_steps'] = len(final_timesteps)
220
  pipeline_params.pop('strength', None)
221
  pipeline_params['output_type'] = "latent"
222
 
223
+ logger.info(f"LTX POOL MANAGER: Iniciando refinamento com {len(final_timesteps)} passos a partir do timestep {start_timestep.item():.4f}.")
224
 
225
  with torch.no_grad():
226
  refined_tensor = worker.generate_video_fragment_internal(**pipeline_params)