# deformes4D_engine.py # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos # # MODIFICATIONS FOR ADUC-SDR: # Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved. # # This file is part of the ADUC-SDR project. It contains the core logic for # video fragment generation, latent manipulation, and dynamic editing, # governed by the ADUC orchestrator. # This component is licensed under the GNU Affero General Public License v3.0. import os import time import imageio import numpy as np import torch import logging from PIL import Image, ImageOps from dataclasses import dataclass import gradio as gr import subprocess import random import gc from ltx_manager_helpers import ltx_manager_singleton from gemini_helpers import gemini_singleton from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode logger = logging.getLogger(__name__) @dataclass class LatentConditioningItem: latent_tensor: torch.Tensor media_frame_number: int conditioning_strength: float class Deformes4DEngine: def __init__(self, ltx_manager, workspace_dir="deformes_workspace"): self.ltx_manager = ltx_manager self.workspace_dir = workspace_dir self._vae = None self.device = 'cuda' if torch.cuda.is_available() else 'cpu' logger.info("Especialista Deformes4D (SDR Executor) inicializado.") @property def vae(self): if self._vae is None: self._vae = self.ltx_manager.workers[0].pipeline.vae self._vae.to(self.device); self._vae.eval() return self._vae # ... (métodos auxiliares como save/load/pixels_to_latents permanecem os mesmos) ... def save_latent_tensor(self, tensor: torch.Tensor, path: str): torch.save(tensor.cpu(), path) logger.info(f"Tensor latente salvo em: {path}") def load_latent_tensor(self, path: str) -> torch.Tensor: tensor = torch.load(path, map_location=self.device) logger.info(f"Tensor latente carregado de: {path} para o dispositivo {self.device}") return tensor @torch.no_grad() def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor: tensor = tensor.to(self.device, dtype=self.vae.dtype) return vae_encode(tensor, self.vae, vae_per_channel_normalize=True) @torch.no_grad() def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor: latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype) timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype) return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True) def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24): if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: logger.warning("Tentativa de salvar um tensor de vídeo inválido. Abortando.") return video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0) video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0 video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8) with imageio.get_writer(path, fps=fps, codec='libx264', quality=8) as writer: for frame in video_np: writer.append_data(frame) logger.info(f"Vídeo salvo em: {path}") def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image: if image.size != target_resolution: logger.info(f" - AÇÃO: Redimensionando imagem de {image.size} para {target_resolution} antes da conversão para latente.") return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS) return image def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor: image_np = np.array(pil_image).astype(np.float32) / 255.0 tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2) tensor = (tensor * 2.0) - 1.0 return self.pixels_to_latents(tensor) def _generate_video_from_latents(self, latent_tensor, base_name): silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent.mp4") pixel_tensor = self.latents_to_pixels(latent_tensor) self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24) del pixel_tensor; gc.collect() return silent_video_path def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate): final_ltx_params = {**ltx_params, 'width': target_resolution[0], 'height': target_resolution[1], 'video_total_frames': total_frames_to_generate, 'video_fps': 24, 'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items} new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params) return new_full_latents def concatenate_videos_ffmpeg(self, video_paths: list[str], output_path: str) -> str: if not video_paths: raise gr.Error("Nenhum fragmento de vídeo para montar.") list_file_path = os.path.join(self.workspace_dir, "concat_list.txt") with open(list_file_path, 'w', encoding='utf-8') as f: for path in video_paths: f.write(f"file '{os.path.abspath(path)}'\n") cmd_list = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', list_file_path, '-c', 'copy', output_path] logger.info("Executando concatenação FFmpeg...") try: subprocess.run(cmd_list, check=True, capture_output=True, text=True) except subprocess.CalledProcessError as e: logger.error(f"Erro no FFmpeg: {e.stderr}") raise gr.Error(f"Falha na montagem final do vídeo. Detalhes: {e.stderr}") return output_path def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list, seconds_per_fragment: float, trim_percent: int, handler_strength: float, destination_convergence_strength: float, video_resolution: int, use_continuity_director: bool, progress: gr.Progress = gr.Progress()): # --- [INÍCIO] Lógica de Controle Dinâmico Baseada em Dois Sliders --- # 1. Calcular o total de chunks a serem gerados a partir dos segundos total_chunks_gerados = max(5, int(round(seconds_per_fragment * 24 / 8))) # 2. Calcular o número de chunks a podar com base na porcentagem, com mínimo de 4 trim_chunks = max(4, int(round(total_chunks_gerados * (trim_percent / 100)))) # Regra de segurança para evitar que a poda consuma o vídeo inteiro if trim_chunks >= total_chunks_gerados: trim_chunks = total_chunks_gerados - 1 logger.warning(f"A poda ({trim_percent}%) era muito grande. Ajustada para {trim_chunks} chunks para deixar 1 chunk de vídeo.") # 3. Definir fatias e alvos com base nos cálculos VIDEO_CHUNK_COUNT = total_chunks_gerados - trim_chunks HANDLER_CHUNK_INDICES = slice(total_chunks_gerados - 2, total_chunks_gerados) ECO_CHUNK_INDICES = slice(total_chunks_gerados - 4, total_chunks_gerados - 2) HANDLER_FRAME_TARGET = (trim_chunks - 2) * 8 FRAMES_TO_GENERATE = (total_chunks_gerados - 1) * 8 + 1 DESTINATION_FRAME_TARGET = FRAMES_TO_GENERATE - 1 logger.info("="*60) logger.info("MODO DE GERAÇÃO: Estratégia de Cauda Longa Dinâmica") logger.info(f" - Duração Solicitada: {seconds_per_fragment}s -> Geração Bruta: {total_chunks_gerados} chunks") logger.info(f" - Poda Solicitada: {trim_percent}% -> Chunks de Poda (Cauda): {trim_chunks}") logger.info(f" - Clipe Final por Fragmento: {VIDEO_CHUNK_COUNT} chunks") logger.info(f" - Guia de Eco (Memória): Chunks {ECO_CHUNK_INDICES.start}-{ECO_CHUNK_INDICES.stop-1}") logger.info(f" - Guia de Handler (Evolução): Chunks {HANDLER_CHUNK_INDICES.start}-{HANDLER_CHUNK_INDICES.stop-1}") logger.info(f" - PONTO DE APLICAÇÃO DO HANDLER (DINÂMICO): Frame {HANDLER_FRAME_TARGET}") logger.info("="*60) base_ltx_params = {"guidance_scale": 1.0, "stg_scale": 0.0, "rescaling_scale": 0.15, "num_inference_steps": 20} keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes] video_clips_paths, story_history = [], "" target_resolution_tuple = (video_resolution, video_resolution) eco_latent_for_next_loop = None handler_latent_for_next_loop = None if len(keyframe_paths) < 3: raise gr.Error(f"O modelo de geração requer no mínimo 3 keyframes (Passado, Presente, Futuro). Você forneceu {len(keyframe_paths)}.") num_transitions_to_generate = len(keyframe_paths) - 2 for i in range(num_transitions_to_generate): start_keyframe_index = i + 1 logger.info(f"--- INICIANDO FRAGMENTO {i+1}/{num_transitions_to_generate} ---") progress((i + 1) / num_transitions_to_generate, desc=f"Produzindo Transição {i+1}/{num_transitions_to_generate}") past_keyframe_path = keyframe_paths[start_keyframe_index - 1] start_keyframe_path = keyframe_paths[start_keyframe_index] destination_keyframe_path = keyframe_paths[start_keyframe_index + 1] future_story_prompt = storyboard[start_keyframe_index + 1] if (start_keyframe_index + 1) < len(storyboard) else "A cena final." decision = gemini_singleton.get_cinematic_decision( global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path, storyboard[start_keyframe_index - 1], storyboard[start_keyframe_index], future_story_prompt ) _, motion_prompt = decision["transition_type"], decision["motion_prompt"] story_history += f"\n- Ato {i+1}: {motion_prompt}" conditioning_items = [] logger.info(" [0. PREPARAÇÃO] Montando itens de condicionamento...") if i == 0: img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple) conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0)) else: conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0)) conditioning_items.append(LatentConditioningItem(handler_latent_for_next_loop, HANDLER_FRAME_TARGET, handler_strength)) img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple) conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength)) current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt} new_full_latents = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, FRAMES_TO_GENERATE) logger.info(f" [1. GERAÇÃO] Tensor latente bruto gerado com shape: {new_full_latents.shape}.") eco_latent_for_next_loop = new_full_latents[:, :, ECO_CHUNK_INDICES, :, :].clone() handler_latent_for_next_loop = new_full_latents[:, :, HANDLER_CHUNK_INDICES, :, :].clone() logger.info(f" [GUIAS] Guias para a próxima iteração extraídas. Eco shape: {eco_latent_for_next_loop.shape}, Handler shape: {handler_latent_for_next_loop.shape}.") latents_for_video = new_full_latents[:, :, :VIDEO_CHUNK_COUNT, :, :] logger.info(f" [2. EDIÇÃO] Tensor final para vídeo extraído com {latents_for_video.shape[2]} chunks.") base_name = f"fragment_{i}_{int(time.time())}" video_path = self._generate_video_from_latents(latents_for_video, base_name) video_clips_paths.append(video_path) yield {"fragment_path": video_path} final_movie_path = os.path.join(self.workspace_dir, f"final_movie_silent_{int(time.time())}.mp4") self.concatenate_videos_ffmpeg(video_clips_paths, final_movie_path) logger.info(f"Filme completo salvo em: {final_movie_path}") yield {"final_path": final_movie_path} def _quantize_to_multiple(self, n, m): if m == 0: return n quantized = int(round(n / m) * m) return m if n > 0 and quantized == 0 else quantized