Aduc_sdr / deformes4D_engine.py
euiia's picture
Update deformes4D_engine.py
48d9f58 verified
raw
history blame
15.5 kB
# deformes4D_engine.py
# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
#
# MODIFICATIONS FOR ADUC-SDR:
# Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved.
#
# This file is part of the ADUC-SDR project. It contains the core logic for
# video fragment generation, latent manipulation, and dynamic editing,
# governed by the ADUC orchestrator.
# This component is licensed under the GNU Affero General Public License v3.0.
import os
import time
import imageio
import numpy as np
import torch
import logging
from PIL import Image, ImageOps
from dataclasses import dataclass
import gradio as gr
import subprocess
import gc
from ltx_manager_helpers import ltx_manager_singleton
from gemini_helpers import gemini_singleton
from latent_enhancer_specialist import latent_enhancer_specialist_singleton
from hd_specialist import hd_specialist_singleton
from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
from audio_specialist import audio_specialist_singleton
logger = logging.getLogger(__name__)
@dataclass
class LatentConditioningItem:
"""Representa uma âncora de condicionamento no espaço latente para a Câmera (Ψ)."""
latent_tensor: torch.Tensor
media_frame_number: int
conditioning_strength: float
class Deformes4DEngine:
"""
Implementa a Câmera (Ψ) e o Destilador (Δ) da arquitetura ADUC-SDR.
Orquestra a geração, pós-produção latente e renderização final dos fragmentos de vídeo.
"""
def __init__(self, ltx_manager, workspace_dir="deformes_workspace"):
self.ltx_manager = ltx_manager
self.workspace_dir = workspace_dir
self._vae = None
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
logger.info("Especialista Deformes4D (Executor ADUC-SDR) inicializado.")
@property
def vae(self):
if self._vae is None:
self._vae = self.ltx_manager.workers[0].pipeline.vae
self._vae.to(self.device); self._vae.eval()
return self._vae
# --- MÉTODOS AUXILIARES ---
@torch.no_grad()
def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor:
tensor = tensor.to(self.device, dtype=self.vae.dtype)
return vae_encode(tensor, self.vae, vae_per_channel_normalize=True)
@torch.no_grad()
def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype)
timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype)
return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True)
def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24):
if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: return
video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0)
video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0
video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8)
with imageio.get_writer(path, fps=fps, codec='libx264', quality=8, output_params=['-pix_fmt', 'yuv420p']) as writer:
for frame in video_np: writer.append_data(frame)
def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image:
if image.size != target_resolution:
return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS)
return image
def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor:
image_np = np.array(pil_image).astype(np.float32) / 255.0
tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2)
tensor = (tensor * 2.0) - 1.0
return self.pixels_to_latents(tensor)
def concatenate_videos_ffmpeg(self, video_paths: list[str], output_path: str):
if not video_paths: raise gr.Error("Nenhum fragmento de vídeo para montar.")
list_file_path = os.path.join(self.workspace_dir, "concat_list.txt")
with open(list_file_path, 'w', encoding='utf-8') as f:
for path in video_paths: f.write(f"file '{os.path.abspath(path)}'\n")
cmd_list = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', list_file_path, '-c', 'copy', output_path]
logger.info(f"Concatenando {len(video_paths)} clipes de vídeo em {output_path}...")
try:
subprocess.run(cmd_list, check=True, capture_output=True, text=True)
except subprocess.CalledProcessError as e:
logger.error(f"Erro no FFmpeg: {e.stderr}")
raise gr.Error(f"Falha na montagem final do vídeo. Detalhes: {e.stderr}")
# --- NÚCLEO DA LÓGICA ADUC-SDR ---
def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list,
seconds_per_fragment: float, trim_percent: int,
handler_strength: float, destination_convergence_strength: float,
use_upscaler: bool, use_refiner: bool, use_hd: bool, use_audio: bool,
video_resolution: int, use_continuity_director: bool,
progress: gr.Progress = gr.Progress()):
FPS = 24
FRAMES_PER_LATENT_CHUNK = 8
ECO_LATENT_CHUNKS = 2
total_frames_brutos = self._quantize_to_multiple(int(seconds_per_fragment * FPS), FRAMES_PER_LATENT_CHUNK)
frames_a_podar = self._quantize_to_multiple(int(total_frames_brutos * (trim_percent / 100)), FRAMES_PER_LATENT_CHUNK)
latents_a_podar = frames_a_podar // FRAMES_PER_LATENT_CHUNK
DEJAVU_FRAME_TARGET = frames_a_podar - 1 if frames_a_podar > 0 else 0
DESTINATION_FRAME_TARGET = total_frames_brutos - 1
base_ltx_params = {"guidance_scale": 2.0, "stg_scale": 0.025, "rescaling_scale": 0.15, "num_inference_steps": 20, "image_cond_noise_scale": 0.00}
refine_ltx_params = {"motion_prompt": "", "guidance_scale": 1.0, "denoise_strength": 0.35, "refine_steps": 12}
keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes]
story_history = ""
target_resolution_tuple = (video_resolution, video_resolution)
eco_latent_for_next_loop, dejavu_latent_for_next_loop = None, None
latent_fragments, latent_fragment_lengths = [], []
if len(keyframe_paths) < 2: raise gr.Error(f"A geração requer no mínimo 2 keyframes. Você forneceu {len(keyframe_paths)}.")
num_transitions_to_generate = len(keyframe_paths) - 1
for i in range(num_transitions_to_generate):
fragment_index = i + 1
progress(i / num_transitions_to_generate, desc=f"Gerando Latentes {fragment_index}/{num_transitions_to_generate}")
# ... (Lógica de decisão do Gemini e preparação de âncoras) ...
past_keyframe_path = keyframe_paths[i - 1] if i > 0 else keyframe_paths[i]
start_keyframe_path = keyframe_paths[i]
destination_keyframe_path = keyframe_paths[i + 1]
future_story_prompt = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final."
decision = gemini_singleton.get_cinematic_decision(global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path, storyboard[i - 1] if i > 0 else "O início.", storyboard[i], future_story_prompt)
transition_type, motion_prompt = decision["transition_type"], decision["motion_prompt"]
story_history += f"\n- Ato {fragment_index}: {motion_prompt}"
conditioning_items = []
if eco_latent_for_next_loop is None:
img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple)
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0))
else:
conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0))
conditioning_items.append(LatentConditioningItem(dejavu_latent_for_next_loop, DEJAVU_FRAME_TARGET, handler_strength))
img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength))
current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
latents_brutos = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, total_frames_brutos)
last_trim = latents_brutos[:, :, -(latents_a_podar+1):, :, :].clone()
eco_latent_for_next_loop = last_trim[:, :, :2, :, :].clone()
dejavu_latent_for_next_loop = last_trim[:, :, -1:, :, :].clone()
latents_video = latents_brutos[:, :, :-(latents_a_podar-1), :, :].clone()
latents_video = latents_video[:, :, 1:, :, :]
del last_trim, latents_brutos
gc.collect(); torch.cuda.empty_cache()
if transition_type == "cut":
eco_latent_for_next_loop, dejavu_latent_for_next_loop = None, None
if use_upscaler:
# [REATORADO] Chamada para o novo especialista
latents_video = latent_enhancer_specialist_singleton.upscale(latents_video)
latent_fragments.append(latents_video)
latent_fragment_lengths.append(latents_video.shape[2])
del eco_latent_for_next_loop, dejavu_latent_for_next_loop
gc.collect(); torch.cuda.empty_cache()
logger.info("--- CONCATENANDO E REFINANDO SUPER-LATENTE ---")
tensors_para_concatenar = [frag.to(self.device)[:, :, :-1, :, :] if i < len(latent_fragments) - 1 else frag.to(self.device) for i, frag in enumerate(latent_fragments)]
del latent_fragments; gc.collect(); torch.cuda.empty_cache()
processed_latents = torch.cat(tensors_para_concatenar, dim=2)
del tensors_para_concatenar; gc.collect(); torch.cuda.empty_cache()
logger.info(f"Concatenação concluída. Shape do super-latente: {processed_latents.shape}")
if use_refiner:
progress(0.8, desc="Refinando continuidade visual...")
# [REATORADO] Chamada para o novo especialista
processed_latents = latent_enhancer_specialist_singleton.refine(processed_latents, **refine_ltx_params)
logger.info("--- DIVIDINDO SUPER-LATENTE E PROCESSANDO FRAGMENTOS INDIVIDUALMENTE ---")
adjusted_lengths = [l - 1 if i < len(latent_fragment_lengths) - 1 else l for i, l in enumerate(latent_fragment_lengths)]
refined_fragments = torch.split(processed_latents, adjusted_lengths, dim=2)
del processed_latents; gc.collect(); torch.cuda.empty_cache()
final_video_paths = []
num_final_fragments = len(refined_fragments)
for i, fragment_latent in enumerate(refined_fragments):
progress(0.85 + (0.1 * (i / num_final_fragments)), desc=f"Finalizando Clipe {i+1}/{num_final_fragments}")
base_name = f"fragment_{i}_{int(time.time())}"
current_path = os.path.join(self.workspace_dir, f"{base_name}_temp.mp4")
if use_audio:
current_path = self._generate_video_and_audio_from_latents(fragment_latent, global_prompt, base_name)
else:
pixel_tensor = self.latents_to_pixels(fragment_latent)
self.save_video_from_tensor(pixel_tensor, current_path, fps=24)
del pixel_tensor
gc.collect(); torch.cuda.empty_cache()
if use_hd:
hd_output_path = os.path.join(self.workspace_dir, f"{base_name}_hd.mp4")
try:
hd_specialist_singleton.process_video(input_video_path=current_path, output_video_path=hd_output_path, prompt=" ")
os.remove(current_path)
final_video_paths.append(hd_output_path)
except Exception as e:
logger.error(f"Falha na masterização HD do fragmento {i+1}: {e}. Usando versão padrão.")
os.rename(current_path, hd_output_path)
final_video_paths.append(hd_output_path)
else:
final_video_paths.append(current_path)
del refined_fragments; gc.collect(); torch.cuda.empty_cache()
progress(0.98, desc="Montagem final...")
final_movie_path = os.path.join(self.workspace_dir, f"movie_{int(time.time())}_FINAL.mp4")
self.concatenate_videos_ffmpeg(final_video_paths, final_movie_path)
for path in final_video_paths:
if os.path.exists(path):
os.remove(path)
logger.info(f"Processo concluído! Vídeo final salvo em: {final_movie_path}")
yield {"final_path": final_movie_path}
def _generate_video_and_audio_from_latents(self, latent_tensor, audio_prompt, base_name):
silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent_for_audio.mp4")
pixel_tensor = self.latents_to_pixels(latent_tensor)
self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24)
del pixel_tensor; gc.collect(); torch.cuda.empty_cache()
try:
result = subprocess.run(
["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", silent_video_path],
capture_output=True, text=True, check=True)
frag_duration = float(result.stdout.strip())
except (subprocess.CalledProcessError, ValueError, FileNotFoundError):
logger.warning(f"ffprobe falhou. Calculando duração manualmente.")
num_pixel_frames = latent_tensor.shape[2] * 8
frag_duration = num_pixel_frames / 24.0
video_with_audio_path = audio_specialist_singleton.generate_audio_for_video(
video_path=silent_video_path, prompt=audio_prompt,
duration_seconds=frag_duration)
if os.path.exists(silent_video_path):
os.remove(silent_video_path)
return video_with_audio_path
def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate):
final_ltx_params = {
**ltx_params, 'width': target_resolution[0], 'height': target_resolution[1],
'video_total_frames': total_frames_to_generate, 'video_fps': 24,
'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items
}
new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params)
gc.collect()
torch.cuda.empty_cache()
return new_full_latents
def _quantize_to_multiple(self, n, m):
if m == 0: return n
quantized = int(round(n / m) * m)
return m if n > 0 and quantized == 0 else quantized