File size: 12,919 Bytes
20cd2b7 74b4ab5 20cd2b7 0254fcd 20cd2b7 74b4ab5 20cd2b7 74b4ab5 494e411 20cd2b7 74b4ab5 20cd2b7 74b4ab5 20cd2b7 74b4ab5 20cd2b7 74b4ab5 20cd2b7 c1a633c 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 96cf923 9c3367c 20cd2b7 9af8c0d 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 9c3367c 20cd2b7 494e411 c0e16fd 79981d1 c0e16fd 20cd2b7 5f67b14 79981d1 5f67b14 c0e16fd 5f67b14 79981d1 7b496ff 79981d1 0254fcd c0e16fd 5f67b14 c0e16fd a90171a 9c3367c 20cd2b7 9c3367c 20cd2b7 c0e16fd 9c3367c 8d75895 9c3367c 96cf923 20cd2b7 9c3367c c0e16fd 9c3367c 20cd2b7 9c3367c c0e16fd 9c3367c c0e16fd 20cd2b7 c0e16fd a90171a c0e16fd a90171a c0e16fd 9c3367c c0e16fd 9af8c0d 9c3367c c0e16fd 2f2cbfe 5f67b14 c0e16fd 2f2cbfe 5f67b14 c0e16fd 20cd2b7 8d75895 9c3367c 20cd2b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
# deformes4D_engine.py
# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
#
# MODIFICATIONS FOR ADUC-SDR:
# Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved.
#
# This file is part of the ADUC-SDR project. It contains the core logic for
# video fragment generation, latent manipulation, and dynamic editing,
# governed by the ADUC orchestrator.
# This component is licensed under the GNU Affero General Public License v3.0.
import os
import time
import imageio
import numpy as np
import torch
import logging
from PIL import Image, ImageOps
from dataclasses import dataclass
import gradio as gr
import subprocess
import random
import gc
from ltx_manager_helpers import ltx_manager_singleton
from gemini_helpers import gemini_singleton
from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
logger = logging.getLogger(__name__)
@dataclass
class LatentConditioningItem:
latent_tensor: torch.Tensor
media_frame_number: int
conditioning_strength: float
class Deformes4DEngine:
def __init__(self, ltx_manager, workspace_dir="deformes_workspace"):
self.ltx_manager = ltx_manager
self.workspace_dir = workspace_dir
self._vae = None
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
logger.info("Especialista Deformes4D (SDR Executor) inicializado.")
@property
def vae(self):
if self._vae is None:
self._vae = self.ltx_manager.workers[0].pipeline.vae
self._vae.to(self.device); self._vae.eval()
return self._vae
# ... (métodos auxiliares como save/load/pixels_to_latents permanecem os mesmos) ...
def save_latent_tensor(self, tensor: torch.Tensor, path: str):
torch.save(tensor.cpu(), path)
logger.info(f"Tensor latente salvo em: {path}")
def load_latent_tensor(self, path: str) -> torch.Tensor:
tensor = torch.load(path, map_location=self.device)
logger.info(f"Tensor latente carregado de: {path} para o dispositivo {self.device}")
return tensor
@torch.no_grad()
def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor:
tensor = tensor.to(self.device, dtype=self.vae.dtype)
return vae_encode(tensor, self.vae, vae_per_channel_normalize=True)
@torch.no_grad()
def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype)
timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype)
return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True)
def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24):
if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0:
logger.warning("Tentativa de salvar um tensor de vídeo inválido. Abortando.")
return
video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0)
video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0
video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8)
with imageio.get_writer(path, fps=fps, codec='libx264', quality=8) as writer:
for frame in video_np: writer.append_data(frame)
logger.info(f"Vídeo salvo em: {path}")
def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image:
if image.size != target_resolution:
logger.info(f" - AÇÃO: Redimensionando imagem de {image.size} para {target_resolution} antes da conversão para latente.")
return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS)
return image
def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor:
image_np = np.array(pil_image).astype(np.float32) / 255.0
tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2)
tensor = (tensor * 2.0) - 1.0
return self.pixels_to_latents(tensor)
def _generate_video_from_latents(self, latent_tensor, base_name):
silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent.mp4")
pixel_tensor = self.latents_to_pixels(latent_tensor)
self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24)
del pixel_tensor; gc.collect()
return silent_video_path
def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate):
final_ltx_params = {**ltx_params, 'width': target_resolution[0], 'height': target_resolution[1], 'video_total_frames': total_frames_to_generate, 'video_fps': 24, 'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items}
new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params)
return new_full_latents
def concatenate_videos_ffmpeg(self, video_paths: list[str], output_path: str) -> str:
if not video_paths: raise gr.Error("Nenhum fragmento de vídeo para montar.")
list_file_path = os.path.join(self.workspace_dir, "concat_list.txt")
with open(list_file_path, 'w', encoding='utf-8') as f:
for path in video_paths: f.write(f"file '{os.path.abspath(path)}'\n")
cmd_list = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', list_file_path, '-c', 'copy', output_path]
logger.info("Executando concatenação FFmpeg...")
try:
subprocess.run(cmd_list, check=True, capture_output=True, text=True)
except subprocess.CalledProcessError as e:
logger.error(f"Erro no FFmpeg: {e.stderr}")
raise gr.Error(f"Falha na montagem final do vídeo. Detalhes: {e.stderr}")
return output_path
def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list,
seconds_per_fragment: float, trim_percent: int,
handler_strength: float, destination_convergence_strength: float,
video_resolution: int, use_continuity_director: bool,
progress: gr.Progress = gr.Progress()):
# --- [INÍCIO] Lógica de Controle Dinâmico Baseada em Dois Sliders ---
# 1. Calcular o total de chunks a serem gerados a partir dos segundos
total_chunks_gerados = max(5, int(round(seconds_per_fragment * 24 / 8)))
# 2. Calcular o número de chunks a podar com base na porcentagem, com mínimo de 4
trim_chunks = max(4, int(round(total_chunks_gerados * (trim_percent / 100))))
# Regra de segurança para evitar que a poda consuma o vídeo inteiro
if trim_chunks >= total_chunks_gerados:
trim_chunks = total_chunks_gerados - 1
logger.warning(f"A poda ({trim_percent}%) era muito grande. Ajustada para {trim_chunks} chunks para deixar 1 chunk de vídeo.")
# 3. Definir fatias e alvos com base nos cálculos
VIDEO_CHUNK_COUNT = total_chunks_gerados - trim_chunks
HANDLER_CHUNK_INDICES = slice(total_chunks_gerados - 2, total_chunks_gerados)
ECO_CHUNK_INDICES = slice(total_chunks_gerados - 4, total_chunks_gerados - 2)
HANDLER_FRAME_TARGET = (trim_chunks - 2) * 8
FRAMES_TO_GENERATE = (total_chunks_gerados - 1) * 8 + 1
DESTINATION_FRAME_TARGET = FRAMES_TO_GENERATE - 1
logger.info("="*60)
logger.info("MODO DE GERAÇÃO: Estratégia de Cauda Longa Dinâmica")
logger.info(f" - Duração Solicitada: {seconds_per_fragment}s -> Geração Bruta: {total_chunks_gerados} chunks")
logger.info(f" - Poda Solicitada: {trim_percent}% -> Chunks de Poda (Cauda): {trim_chunks}")
logger.info(f" - Clipe Final por Fragmento: {VIDEO_CHUNK_COUNT} chunks")
logger.info(f" - Guia de Eco (Memória): Chunks {ECO_CHUNK_INDICES.start}-{ECO_CHUNK_INDICES.stop-1}")
logger.info(f" - Guia de Handler (Evolução): Chunks {HANDLER_CHUNK_INDICES.start}-{HANDLER_CHUNK_INDICES.stop-1}")
logger.info(f" - PONTO DE APLICAÇÃO DO HANDLER (DINÂMICO): Frame {HANDLER_FRAME_TARGET}")
logger.info("="*60)
base_ltx_params = {"guidance_scale": 1.0, "stg_scale": 0.0, "rescaling_scale": 0.15, "num_inference_steps": 20}
keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes]
video_clips_paths, story_history = [], ""
target_resolution_tuple = (video_resolution, video_resolution)
eco_latent_for_next_loop = None
handler_latent_for_next_loop = None
if len(keyframe_paths) < 3:
raise gr.Error(f"O modelo de geração requer no mínimo 3 keyframes (Passado, Presente, Futuro). Você forneceu {len(keyframe_paths)}.")
num_transitions_to_generate = len(keyframe_paths) - 2
for i in range(num_transitions_to_generate):
start_keyframe_index = i + 1
logger.info(f"--- INICIANDO FRAGMENTO {i+1}/{num_transitions_to_generate} ---")
progress((i + 1) / num_transitions_to_generate, desc=f"Produzindo Transição {i+1}/{num_transitions_to_generate}")
past_keyframe_path = keyframe_paths[start_keyframe_index - 1]
start_keyframe_path = keyframe_paths[start_keyframe_index]
destination_keyframe_path = keyframe_paths[start_keyframe_index + 1]
future_story_prompt = storyboard[start_keyframe_index + 1] if (start_keyframe_index + 1) < len(storyboard) else "A cena final."
decision = gemini_singleton.get_cinematic_decision(
global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path,
storyboard[start_keyframe_index - 1], storyboard[start_keyframe_index], future_story_prompt
)
_, motion_prompt = decision["transition_type"], decision["motion_prompt"]
story_history += f"\n- Ato {i+1}: {motion_prompt}"
conditioning_items = []
logger.info(" [0. PREPARAÇÃO] Montando itens de condicionamento...")
if i == 0:
img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple)
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0))
else:
conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0))
conditioning_items.append(LatentConditioningItem(handler_latent_for_next_loop, HANDLER_FRAME_TARGET, handler_strength))
img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength))
current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
new_full_latents = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, FRAMES_TO_GENERATE)
logger.info(f" [1. GERAÇÃO] Tensor latente bruto gerado com shape: {new_full_latents.shape}.")
eco_latent_for_next_loop = new_full_latents[:, :, ECO_CHUNK_INDICES, :, :].clone()
handler_latent_for_next_loop = new_full_latents[:, :, HANDLER_CHUNK_INDICES, :, :].clone()
logger.info(f" [GUIAS] Guias para a próxima iteração extraídas. Eco shape: {eco_latent_for_next_loop.shape}, Handler shape: {handler_latent_for_next_loop.shape}.")
latents_for_video = new_full_latents[:, :, :VIDEO_CHUNK_COUNT, :, :]
logger.info(f" [2. EDIÇÃO] Tensor final para vídeo extraído com {latents_for_video.shape[2]} chunks.")
base_name = f"fragment_{i}_{int(time.time())}"
video_path = self._generate_video_from_latents(latents_for_video, base_name)
video_clips_paths.append(video_path)
yield {"fragment_path": video_path}
final_movie_path = os.path.join(self.workspace_dir, f"final_movie_silent_{int(time.time())}.mp4")
self.concatenate_videos_ffmpeg(video_clips_paths, final_movie_path)
logger.info(f"Filme completo salvo em: {final_movie_path}")
yield {"final_path": final_movie_path}
def _quantize_to_multiple(self, n, m):
if m == 0: return n
quantized = int(round(n / m) * m)
return m if n > 0 and quantized == 0 else quantized |