Spaces:
Paused
Paused
File size: 13,910 Bytes
8c98072 655068e c9413de 655068e c9413de 655068e 4f4406c 655068e 9a6b3d7 a7e6912 4f4406c 655068e 2a6997e 655068e 2193363 2a6997e 1a0f5ad 655068e a7e6912 d3e0bc3 7809765 140e6ff 9a6b3d7 655068e 9a6b3d7 140e6ff a7e6912 655068e c9413de 655068e 9a6b3d7 a7e6912 655068e c9413de a7e6912 655068e a7e6912 655068e c9413de 655068e c9413de 655068e c9413de 655068e c9413de 655068e c9413de 655068e c9413de 655068e c9413de 655068e edd6b83 655068e a7e6912 655068e 7809765 655068e 7809765 94c4c10 7809765 43aedae 7809765 37709cf 7809765 b5c3584 655068e 37709cf 7809765 37709cf 7809765 655068e e6999b3 655068e 7809765 37709cf 7809765 37709cf 655068e 4f4406c 655068e e6999b3 34ff926 655068e a7e6912 655068e a9f0d87 e6999b3 655068e a7e6912 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
# FILE: api/ltx/ltx_aduc_pipeline.py
# DESCRIPTION: Final high-level orchestrator for LTX-Video generation.
# This version acts as a client to the specialized managers (LTX, VAE),
# focusing solely on the business logic of video generation workflows.
import gc
import json
import os
import shutil
import sys
import tempfile
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import random
import torch
import yaml
import numpy as np
from PIL import Image
from api.ltx.ltx_utils import seed_everything
from utils.debug_utils import log_function_io
from managers.gpu_manager import gpu_manager
from api.ltx.ltx_aduc_manager import ltx_aduc_manager, LatentConditioningItem
from api.ltx.vae_aduc_pipeline import vae_aduc_pipeline
from tools.video_encode_tool import video_encode_tool_singleton
# ==============================================================================
# --- SETUP E IMPORTAÇÕES DO PROJETO ---
# ==============================================================================
# Configuração de logging e supressão de warnings
import logging
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", message=".*")
from huggingface_hub import logging as ll
ll.set_verbosity_error()
ll.set_verbosity_warning()
ll.set_verbosity_info()
ll.set_verbosity_debug()
logger = logging.getLogger("AducDebug")
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
# --- Constantes de Configuração ---
DEPS_DIR = Path("/data")
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
RESULTS_DIR = Path("/app/output")
DEFAULT_FPS = 24.0
FRAMES_ALIGNMENT = 8
# Garante que a biblioteca LTX-Video seja importável
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
if repo_path not in sys.path:
sys.path.insert(0, repo_path)
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
# ==============================================================================
# --- CLASSE DE SERVIÇO (O ORQUESTRADOR) ---
# ==============================================================================
class LtxAducPipeline:
"""
Orchestrates the high-level logic of video generation, delegating all
low-level tasks to specialized managers and utility modules.
"""
@log_function_io
def __init__(self):
t0 = time.time()
logging.info("Initializing VideoService Orchestrator...")
if ltx_aduc_manager is None or vae_aduc_pipeline is None:
raise RuntimeError("A required manager (LTX or VAE) failed to initialize. Aborting.")
self.pipeline = ltx_aduc_manager.get_pipeline()
self.main_device = self.pipeline.device
self.vae_device = self.pipeline.vae.device
self.config = ltx_aduc_manager.config
self._apply_precision_policy()
logging.info(f"VideoService ready. Using Main: {self.main_device}, VAE: {self.vae_device}. Startup time: {time.time() - t0:.2f}s")
def finalize(self):
"""Cleans up GPU memory after a generation task."""
gc.collect()
if torch.cuda.is_available():
with torch.cuda.device(self.main_device):
torch.cuda.empty_cache()
with torch.cuda.device(self.vae_device):
torch.cuda.empty_cache()
try: torch.cuda.ipc_collect()
except Exception: pass
# ==========================================================================
# --- LÓGICA DE NEGÓCIO: ORQUESTRADOR PÚBLICO UNIFICADO ---
# ==========================================================================
@log_function_io
def generate_low_resolution(
self,
prompt_list: List[str],
initial_media_items: Optional[List[Tuple[Union[str, Image.Image, torch.Tensor], int, float]]] = None,
**kwargs
) -> Tuple[Optional[str], Optional[str], Optional[int]]:
"""
[UNIFIED ORCHESTRATOR] Generates a video from a list of prompts and raw media items.
"""
logging.info("Starting unified low-resolution generation...")
used_seed = self._get_random_seed()
seed_everything(used_seed)
logging.info(f"Using randomly generated seed: {used_seed}")
if not prompt_list: raise ValueError("Prompt list cannot be empty.")
is_narrative = len(prompt_list) > 1
num_chunks = len(prompt_list)
total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0))
frames_per_chunk = max(FRAMES_ALIGNMENT, (total_frames // num_chunks // FRAMES_ALIGNMENT) * FRAMES_ALIGNMENT)
overlap_frames = 8 if is_narrative else 0
initial_conditions = []
if initial_media_items:
logging.info("Delegating to VaeServer to prepare initial conditioning items...")
initial_conditions = vae_aduc_pipeline.generate_conditioning_items(
media_items=[item[0] for item in initial_media_items],
target_frames=[item[1] for item in initial_media_items],
strengths=[item[2] for item in initial_media_items],
target_resolution=(kwargs['height'], kwargs['width'])
)
height_padded, width_padded = (self._align(d) for d in (kwargs['height'], kwargs['width']))
downscale_factor = self.config.get("downscale_factor", 0.6666666)
vae_scale_factor = self.pipeline.vae_scale_factor
downscaled_height = self._align(int(height_padded * downscale_factor), vae_scale_factor)
downscaled_width = self._align(int(width_padded * downscale_factor), vae_scale_factor)
call_kwargs = self.config.get("first_pass", {}).copy()
stg_mode_str = self.config.get("stg_mode", "attention_values")
if stg_mode_str.lower() in ["stg_av", "attention_values"]:
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionValues
elif stg_mode_str.lower() in ["stg_as", "attention_skip"]:
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionSkip
elif stg_mode_str.lower() in ["stg_r", "residual"]:
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.Residual
elif stg_mode_str.lower() in ["stg_t", "transformer_block"]:
call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.TransformerBlock
call_kwargs = {
"skip_initial_inference_steps": 0,
"skip_final_inference_steps": 0,
"num_inference_steps": 20,
"negative_prompt": kwargs['negative_prompt'],
"height": downscaled_height,
"width": downscaled_width,
"guidance_scale": 4,
"stg_scale": self.config.get("stg_scale", 4),
"rescaling_scale": self.config.get("rescaling_scale", None),
"skip_block_list": self.config.get("skip_block_list", None),
"frame_rate": int(DEFAULT_FPS),
"generator": torch.Generator(device=self.main_device).manual_seed(self._get_random_seed()),
"output_type": "latent",
"media_items": None,
"decode_timestep": self.config.get("decode_timestep", None),
"decode_noise_scale": self.config.get("decode_noise_scale", None),
"stochastic_sampling": self.config.get("stochastic_sampling", None),
"image_cond_noise_scale": 0.15,
"is_video": True,
"vae_per_channel_normalize": True,
"mixed_precision": (self.config["precision"] == "mixed_precision"),
"offload_to_cpu": False,
"enhance_prompt": False,
}
ltx_configs_override = self.config.get("ltx_configs_override", {}).copy()
call_kwargs.update(ltx_configs_override)
if initial_conditions is not None:
call_kwargs["conditioning_items"] = initial_conditions
temp_latent_paths = []
#try:
if True:
for i, chunk_prompt in enumerate(prompt_list):
logging.info(f"Processing scene {i+1}/{num_chunks}: '{chunk_prompt[:50]}...'")
current_frames_base = frames_per_chunk if i < num_chunks - 1 else total_frames - ((num_chunks - 1) * frames_per_chunk)
current_frames = current_frames_base + (overlap_frames if i > 0 else 0)
current_frames = self._align(current_frames, alignment_rule='n*8+1')
call_kwargs.pop("prompt", None)
call_kwargs.pop("num_frames", None)
call_kwargs["prompt"] = chunk_prompt
call_kwargs["num_frames"] = current_frames
with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type):
chunk_latents = self.pipeline(**call_kwargs).images
if chunk_latents is None: raise RuntimeError(f"Failed to generate latents for scene {i+1}.")
if is_narrative and i < num_chunks - 1:
overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
overlap_condition_item = LatentConditioningItem(
latent_tensor=overlap_latents,
media_frame_number=0,
conditioning_strength=1.0
)
call_kwargs.pop("conditioning_items", None)
call_kwargs["conditioning_items"] = overlap_condition_item
else:
call_kwargsl.pop("conditioning_items", None)
if i > 0: chunk_latents = chunk_latents[:, :, overlap_frames:, :, :]
chunk_path = RESULTS_DIR / f"temp_chunk_{i}_{used_seed}.pt"
torch.save(chunk_latents.cpu(), chunk_path)
temp_latent_paths.append(chunk_path)
base_filename = "narrative_video" if is_narrative else "single_video"
all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
final_latents = torch.cat(all_tensors_cpu, dim=2)
video_path, latents_path = self._finalize_generation(final_latents, base_filename, used_seed)
return video_path, latents_path, used_seed
# ==========================================================================
# --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
# ==========================================================================
@log_function_io
def _finalize_generation(self, final_latents: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
"""Delegates final decoding and encoding to specialist services."""
logging.info("Finalizing generation: decoding latents and encoding video.")
final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
torch.save(final_latents, final_latents_path)
logging.info(f"Final latents saved to: {final_latents_path}")
pixel_tensor = vae_aduc_pipeline.decode_to_pixels(
final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05))
)
video_path = self._save_and_log_video(pixel_tensor, f"{base_filename}_{seed}")
return str(video_path), str(final_latents_path)
def _apply_ui_overrides(self, config_dict: Dict, overrides: Dict):
"""Applies advanced settings from the UI to a config dictionary."""
# Override step counts
for key in ["num_inference_steps", "skip_initial_inference_steps", "skip_final_inference_steps"]:
ui_value = overrides.get(key)
if ui_value and ui_value > 0:
config_dict[key] = ui_value
logging.info(f"Override: '{key}' set to {ui_value} by UI.")
@log_function_io
def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = os.path.join(temp_dir, f"{base_filename}.mp4")
video_encode_tool_singleton.save_video_from_tensor(pixel_tensor, temp_path, fps=DEFAULT_FPS)
final_path = RESULTS_DIR / f"{base_filename}.mp4"
shutil.move(temp_path, final_path)
logging.info(f"Video saved successfully to: {final_path}")
return final_path
def _apply_precision_policy(self):
precision = str(self.config.get("precision", "bfloat16")).lower()
if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16
elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16
else: self.runtime_autocast_dtype = torch.float32
logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}")
def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT, alignment_rule: str = 'default') -> int:
if alignment_rule == 'n*8+1':
return ((dim - 1) // alignment) * alignment + 1
return ((dim - 1) // alignment + 1) * alignment
def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int:
num_frames = int(round(duration_s * DEFAULT_FPS))
aligned_frames = self._align(num_frames, alignment=FRAMES_ALIGNMENT)
return max(aligned_frames, min_frames)
def _get_random_seed(self) -> int:
return random.randint(0, 2**32 - 1)
# ==============================================================================
# --- INSTANCIAÇÃO SINGLETON ---
# ==============================================================================
ltx_aduc_pipeline = LtxAducPipeline()
logging.info("Global VideoService orchestrator instance created successfully.") |