# FILE: api/ltx_pool_manager.py # DESCRIPTION: The "secret weapon". A pool manager for LTX that applies # runtime patches to the pipeline for full control and ADUC-SDR compatibility. import logging from typing import List, Optional, Tuple, Union from dataclasses import dataclass import torch from diffusers.utils.torch_utils import randn_tensor # --- Importações da nossa arquitetura --- from api.gpu_manager import gpu_manager from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline # --- Definição dos nossos Data Classes --- @dataclass class ConditioningItem: pixel_tensor: torch.Tensor # Sempre um tensor de pixel media_frame_number: int conditioning_strength: float @dataclass class LatentConditioningItem: latent_tensor: torch.Tensor # Sempre um tensor latente media_frame_number: int conditioning_strength: float # ============================================================================== # --- O MONKEY PATCH --- # Esta é a nossa versão customizada de `prepare_conditioning` # ============================================================================== def _aduc_prepare_conditioning_patch( self: "LTXVideoPipeline", conditioning_items: Optional[List[Union[ConditioningItem, LatentConditioningItem]]], init_latents: torch.Tensor, num_frames: int, height: int, width: int, vae_per_channel_normalize: bool = False, generator=None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: # Esta função é uma cópia modificada da sua, com logging e pequenas melhorias. # (O código do patch que você forneceu vai aqui, ligeiramente ajustado) # ... return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents # ============================================================================== # --- LTX Worker e Pool Manager --- # ============================================================================== class LTXWorker: """Gerencia uma instância do LTX Pipeline em um par de GPUs (main + vae).""" def __init__(self, main_device: str, vae_device: str, config: dict): self.main_device = torch.device(main_device) self.vae_device = torch.device(vae_device) self.config = config self.pipeline: LTXVideoPipeline = None self._load_and_patch_pipeline() def _load_and_patch_pipeline(self): logging.info(f"[LTXWorker-{self.main_device}] Carregando pipeline LTX para a CPU...") self.pipeline, _ = build_ltx_pipeline_on_cpu(self.config) logging.info(f"[LTXWorker-{self.main_device}] Movendo pipeline para GPUs (Main: {self.main_device}, VAE: {self.vae_device})...") self.pipeline.to(self.main_device) self.pipeline.vae.to(self.vae_device) logging.info(f"[LTXWorker-{self.main_device}] Aplicando patch ADUC-SDR na função 'prepare_conditioning'...") # A "mágica" do monkey patching acontece aqui self.pipeline.prepare_conditioning = _aduc_prepare_conditioning_patch.__get__(self.pipeline, LTXVideoPipeline) logging.info(f"[LTXWorker-{self.main_device}] ✅ Pipeline 'quente', corrigido e pronto.") class LTXPoolManager: # (Padrão Singleton, similar ao VincePoolManager) # ... def __init__(self): # ... main_device = gpu_manager.get_ltx_device() vae_device = gpu_manager.get_ltx_vae_device() # Em uma arquitetura futura, poderíamos ter múltiplos workers. Por enquanto, temos um. self.worker = LTXWorker(str(main_device), str(vae_device), self._load_config()) # ... def get_pipeline(self) -> LTXVideoPipeline: return self.worker.pipeline # Instância Singleton ltx_pool_manager = LTXPoolManager()