eeuuia commited on
Commit
5105909
·
verified ·
1 Parent(s): 5bca1ed

Rename api/ltx/ltx_pool_manager.py to api/ltx/ltx_aduc_manager.py

Browse files
api/ltx/{ltx_pool_manager.py → ltx_aduc_manager.py} RENAMED
@@ -1,4 +1,4 @@
1
- # FILE: api/ltx/ltx_pool_manager.py
2
  # DESCRIPTION: The "secret weapon". A pool manager for LTX that applies
3
  # runtime patches to the pipeline for full control and ADUC-SDR compatibility.
4
 
@@ -9,7 +9,7 @@ import torch
9
  from diffusers.utils.torch_utils import randn_tensor
10
 
11
  # --- Importações da nossa arquitetura ---
12
- from manager.gpu_manager import gpu_manager
13
  from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu
14
 
15
  def add_deps_to_path():
@@ -43,10 +43,9 @@ class LatentConditioningItem:
43
  # --- O MONKEY PATCH ---
44
  # Esta é a nossa versão customizada de `prepare_conditioning`
45
  # ==============================================================================
46
-
47
  def _aduc_prepare_conditioning_patch(
48
  self: "LTXVideoPipeline",
49
- conditioning_items: Optional[List[Union[ConditioningItem, LatentConditioningItem]]],
50
  init_latents: torch.Tensor,
51
  num_frames: int,
52
  height: int,
@@ -54,13 +53,52 @@ def _aduc_prepare_conditioning_patch(
54
  vae_per_channel_normalize: bool = False,
55
  generator=None,
56
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
 
 
 
 
57
 
58
- # Esta função é uma cópia modificada da sua, com logging e pequenas melhorias.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- # (O código do patch que você forneceu vai aqui, ligeiramente ajustado)
61
- # ...
62
-
63
  return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents
 
 
64
 
65
 
66
  # ==============================================================================
 
1
+ # FILE: api/ltx/ltx_aduc_manager.py
2
  # DESCRIPTION: The "secret weapon". A pool manager for LTX that applies
3
  # runtime patches to the pipeline for full control and ADUC-SDR compatibility.
4
 
 
9
  from diffusers.utils.torch_utils import randn_tensor
10
 
11
  # --- Importações da nossa arquitetura ---
12
+ from managers.gpu_manager import gpu_manager
13
  from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu
14
 
15
  def add_deps_to_path():
 
43
  # --- O MONKEY PATCH ---
44
  # Esta é a nossa versão customizada de `prepare_conditioning`
45
  # ==============================================================================
 
46
  def _aduc_prepare_conditioning_patch(
47
  self: "LTXVideoPipeline",
48
+ conditioning_items: Optional[List[Union["ConditioningItem", "LatentConditioningItem"]]],
49
  init_latents: torch.Tensor,
50
  num_frames: int,
51
  height: int,
 
53
  vae_per_channel_normalize: bool = False,
54
  generator=None,
55
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
56
+ if not conditioning_items:
57
+ init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents)
58
+ init_pixel_coords = latent_to_pixel_coords(init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
59
+ return init_latents, init_pixel_coords, None, 0
60
 
61
+ init_conditioning_mask = torch.zeros_like(init_latents[:, 0, ...], dtype=torch.float32, device=init_latents.device)
62
+ extra_conditioning_latents, extra_conditioning_pixel_coords, extra_conditioning_mask = [], [], []
63
+ extra_conditioning_num_latents = 0
64
+
65
+ for item in conditioning_items:
66
+ if not isinstance(item, LatentConditioningItem):
67
+ logger.warning("Patch ADUC: Item de condicionamento não é um LatentConditioningItem e será ignorado.")
68
+ continue
69
+
70
+ media_item_latents = item.latent_tensor.to(dtype=init_latents.dtype, device=init_latents.device)
71
+ media_frame_number, strength = item.media_frame_number, item.conditioning_strength
72
+
73
+ if media_frame_number == 0:
74
+ f_l, h_l, w_l = media_item_latents.shape[-3:]
75
+ init_latents[..., :f_l, :h_l, :w_l] = torch.lerp(init_latents[..., :f_l, :h_l, :w_l], media_item_latents, strength)
76
+ init_conditioning_mask[..., :f_l, :h_l, :w_l] = strength
77
+ else:
78
+ noise = randn_tensor(media_item_latents.shape, generator=generator, device=media_item_latents.device, dtype=media_item_latents.dtype)
79
+ media_item_latents = torch.lerp(noise, media_item_latents, strength)
80
+ patched_latents, latent_coords = self.patchifier.patchify(latents=media_item_latents)
81
+ pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
82
+ pixel_coords[:, 0] += media_frame_number
83
+ extra_conditioning_num_latents += patched_latents.shape[1]
84
+ new_mask = torch.full(patched_latents.shape[:2], strength, dtype=torch.float32, device=init_latents.device)
85
+ extra_conditioning_latents.append(patched_latents)
86
+ extra_conditioning_pixel_coords.append(pixel_coords)
87
+ extra_conditioning_mask.append(new_mask)
88
+
89
+ init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents)
90
+ init_pixel_coords = latent_to_pixel_coords(init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
91
+ init_conditioning_mask, _ = self.patchifier.patchify(latents=init_conditioning_mask.unsqueeze(1))
92
+ init_conditioning_mask = init_conditioning_mask.squeeze(-1)
93
+
94
+ if extra_conditioning_latents:
95
+ init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1)
96
+ init_pixel_coords = torch.cat([*extra_conditioning_pixel_coords, init_pixel_coords], dim=2)
97
+ init_conditioning_mask = torch.cat([*extra_conditioning_mask, init_conditioning_mask], dim=1)
98
 
 
 
 
99
  return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents
100
+
101
+
102
 
103
 
104
  # ==============================================================================