|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import time |
|
|
import imageio |
|
|
import numpy as np |
|
|
import torch |
|
|
import logging |
|
|
from PIL import Image, ImageOps |
|
|
from dataclasses import dataclass |
|
|
import gradio as gr |
|
|
import subprocess |
|
|
import gc |
|
|
|
|
|
from ltx_manager_helpers import ltx_manager_singleton |
|
|
from gemini_helpers import gemini_singleton |
|
|
from upscaler_specialist import upscaler_specialist_singleton |
|
|
from hd_specialist import hd_specialist_singleton |
|
|
from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder |
|
|
from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode |
|
|
from audio_specialist import audio_specialist_singleton |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
@dataclass |
|
|
class LatentConditioningItem: |
|
|
"""Representa uma âncora de condicionamento no espaço latente para a Câmera (Ψ).""" |
|
|
latent_tensor: torch.Tensor |
|
|
media_frame_number: int |
|
|
conditioning_strength: float |
|
|
|
|
|
class Deformes4DEngine: |
|
|
""" |
|
|
Implementa a Câmera (Ψ) e o Destilador (Δ) da arquitetura ADUC-SDR. |
|
|
Orquestra a geração, pós-produção latente e renderização final dos fragmentos de vídeo. |
|
|
""" |
|
|
def __init__(self, ltx_manager, workspace_dir="deformes_workspace"): |
|
|
self.ltx_manager = ltx_manager |
|
|
self.workspace_dir = workspace_dir |
|
|
self._vae = None |
|
|
self.device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
logger.info("Especialista Deformes4D (Executor ADUC-SDR) inicializado.") |
|
|
|
|
|
@property |
|
|
def vae(self): |
|
|
if self._vae is None: |
|
|
self._vae = self.ltx_manager.workers[0].pipeline.vae |
|
|
self._vae.to(self.device); self._vae.eval() |
|
|
return self._vae |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor: |
|
|
tensor = tensor.to(self.device, dtype=self.vae.dtype) |
|
|
return vae_encode(tensor, self.vae, vae_per_channel_normalize=True) |
|
|
|
|
|
@torch.no_grad() |
|
|
def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor: |
|
|
latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype) |
|
|
timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype) |
|
|
return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True) |
|
|
|
|
|
def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24): |
|
|
if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: return |
|
|
video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0) |
|
|
video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0 |
|
|
video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8) |
|
|
with imageio.get_writer(path, fps=fps, codec='libx264', quality=8, output_params=['-pix_fmt', 'yuv420p']) as writer: |
|
|
for frame in video_np: writer.append_data(frame) |
|
|
|
|
|
def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image: |
|
|
if image.size != target_resolution: |
|
|
return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS) |
|
|
return image |
|
|
|
|
|
def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor: |
|
|
image_np = np.array(pil_image).astype(np.float32) / 255.0 |
|
|
tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2) |
|
|
tensor = (tensor * 2.0) - 1.0 |
|
|
return self.pixels_to_latents(tensor) |
|
|
|
|
|
|
|
|
def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list, |
|
|
seconds_per_fragment: float, trim_percent: int, |
|
|
handler_strength: float, destination_convergence_strength: float, |
|
|
use_upscaler: bool, use_refiner: bool, use_hd: bool, use_audio: bool, |
|
|
video_resolution: int, use_continuity_director: bool, |
|
|
progress: gr.Progress = gr.Progress()): |
|
|
|
|
|
FPS = 24 |
|
|
FRAMES_PER_LATENT_CHUNK = 8 |
|
|
ECO_LATENT_CHUNKS = 2 |
|
|
|
|
|
total_frames_brutos = self._quantize_to_multiple(int(seconds_per_fragment * FPS), FRAMES_PER_LATENT_CHUNK) |
|
|
frames_a_podar = self._quantize_to_multiple(int(total_frames_brutos * (trim_percent / 100)), FRAMES_PER_LATENT_CHUNK) |
|
|
latents_a_podar = frames_a_podar // FRAMES_PER_LATENT_CHUNK |
|
|
|
|
|
DEJAVU_FRAME_TARGET = frames_a_podar - 1 if frames_a_podar > 0 else 0 |
|
|
DESTINATION_FRAME_TARGET = total_frames_brutos - 1 |
|
|
|
|
|
base_ltx_params = {"guidance_scale": 2.0, "stg_scale": 0.025, "rescaling_scale": 0.15, "num_inference_steps": 20, "image_cond_noise_scale": 0.00} |
|
|
keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes] |
|
|
story_history = "" |
|
|
target_resolution_tuple = (video_resolution, video_resolution) |
|
|
|
|
|
eco_latent_for_next_loop = None |
|
|
dejavu_latent_for_next_loop = None |
|
|
|
|
|
|
|
|
latent_fragments = [] |
|
|
|
|
|
if len(keyframe_paths) < 2: |
|
|
raise gr.Error(f"A geração requer no mínimo 2 keyframes. Você forneceu {len(keyframe_paths)}.") |
|
|
|
|
|
num_transitions_to_generate = len(keyframe_paths) - 1 |
|
|
|
|
|
for i in range(num_transitions_to_generate): |
|
|
fragment_index = i + 1 |
|
|
progress(i / num_transitions_to_generate, desc=f"Produzindo Transição {fragment_index}/{num_transitions_to_generate}") |
|
|
|
|
|
past_keyframe_path = keyframe_paths[i - 1] if i > 0 else keyframe_paths[i] |
|
|
start_keyframe_path = keyframe_paths[i] |
|
|
destination_keyframe_path = keyframe_paths[i + 1] |
|
|
future_story_prompt = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final." |
|
|
|
|
|
decision = gemini_singleton.get_cinematic_decision( |
|
|
global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path, |
|
|
storyboard[i - 1] if i > 0 else "O início.", storyboard[i], future_story_prompt |
|
|
) |
|
|
transition_type, motion_prompt = decision["transition_type"], decision["motion_prompt"] |
|
|
story_history += f"\n- Ato {fragment_index}: {motion_prompt}" |
|
|
|
|
|
conditioning_items = [] |
|
|
if eco_latent_for_next_loop is None: |
|
|
img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple) |
|
|
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0)) |
|
|
else: |
|
|
conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0)) |
|
|
conditioning_items.append(LatentConditioningItem(dejavu_latent_for_next_loop, DEJAVU_FRAME_TARGET, handler_strength)) |
|
|
|
|
|
img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple) |
|
|
conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength)) |
|
|
|
|
|
current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt} |
|
|
latents_brutos = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, total_frames_brutos) |
|
|
|
|
|
last_trim = latents_brutos[:, :, -(latents_a_podar+1):, :, :].clone() |
|
|
eco_latent_for_next_loop = last_trim[:, :, :2, :, :].clone() |
|
|
dejavu_latent_for_next_loop = last_trim[:, :, -1:, :, :].clone() |
|
|
|
|
|
latents_video = latents_brutos[:, :, :-(latents_a_podar-1), :, :].clone() |
|
|
latents_video = latents_video[:, :, 1:, :, :] |
|
|
|
|
|
if transition_type == "cut": |
|
|
eco_latent_for_next_loop = None |
|
|
dejavu_latent_for_next_loop = None |
|
|
|
|
|
if use_upscaler: |
|
|
latents_video = self.upscale_latents(latents_video) |
|
|
|
|
|
latent_fragments.append(latents_video) |
|
|
|
|
|
|
|
|
logger.info("--- CONCATENANDO nem TODOS OS FRAGMENTOS LATENTES ---") |
|
|
tensors_para_concatenar = [] |
|
|
for idx, tensor_frag in enumerate(latent_fragments): |
|
|
|
|
|
target_device = self.device |
|
|
tensor_on_target_device = tensor_frag.to(target_device) |
|
|
if idx < len(latent_fragments) - 1: |
|
|
tensors_para_concatenar.append(tensor_on_target_device[:, :, :-1, :, :]) |
|
|
else: |
|
|
tensors_para_concatenar.append(tensor_on_target_device) |
|
|
|
|
|
processed_latents = torch.cat(tensors_para_concatenar, dim=2) |
|
|
logger.info(f"Concatenação concluída. Shape final do tensor latente: {final_concatenated_latents.shape}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger.info(f"Concatenação concluída. Shape final do tensor latente: {processed_latents.shape}") |
|
|
|
|
|
if use_refiner: |
|
|
processed_latents = self.refine_latents( |
|
|
processed_latents, |
|
|
motion_prompt="", |
|
|
guidance_scale=1.0 |
|
|
) |
|
|
|
|
|
|
|
|
base_name = f"movie_{int(time.time())}" |
|
|
|
|
|
intermediate_video_path = os.path.join(self.workspace_dir, f"{base_name}_intermediate.mp4") |
|
|
|
|
|
if use_audio: |
|
|
|
|
|
intermediate_video_path = self._generate_video_and_audio_from_latents(processed_latents, global_prompt, base_name) |
|
|
else: |
|
|
logger.info("Etapa de sonoplastia desativada. Renderizando vídeo silencioso.") |
|
|
pixel_tensor = self.latents_to_pixels(processed_latents) |
|
|
self.save_video_from_tensor(pixel_tensor, intermediate_video_path, fps=24) |
|
|
del pixel_tensor |
|
|
|
|
|
del processed_latents; gc.collect(); torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
final_video_path = os.path.join(self.workspace_dir, f"{base_name}_FINAL.mp4") |
|
|
|
|
|
if use_hd: |
|
|
progress(0.9, desc="Masterização Final (HD)...") |
|
|
try: |
|
|
|
|
|
hd_specialist_singleton.process_video( |
|
|
input_video_path=intermediate_video_path, |
|
|
output_video_path=final_video_path, |
|
|
prompt=" " |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Falha na masterização HD: {e}. Usando vídeo de qualidade padrão.") |
|
|
os.rename(intermediate_video_path, final_video_path) |
|
|
else: |
|
|
logger.info("Etapa de edição HD desativada.") |
|
|
|
|
|
os.rename(intermediate_video_path, final_video_path) |
|
|
|
|
|
|
|
|
logger.info(f"Processo concluído! Vídeo final salvo em: {final_video_path}") |
|
|
yield {"final_path": final_video_path} |
|
|
|
|
|
def _generate_video_and_audio_from_latents(self, latent_tensor, audio_prompt, base_name): |
|
|
|
|
|
silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent_for_audio.mp4") |
|
|
pixel_tensor = self.latents_to_pixels(latent_tensor) |
|
|
self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24) |
|
|
del pixel_tensor; gc.collect() |
|
|
|
|
|
try: |
|
|
result = subprocess.run( |
|
|
["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", silent_video_path], |
|
|
capture_output=True, text=True, check=True) |
|
|
frag_duration = float(result.stdout.strip()) |
|
|
except (subprocess.CalledProcessError, ValueError, FileNotFoundError): |
|
|
logger.warning(f"ffprobe falhou. Calculando duração manualmente.") |
|
|
num_pixel_frames = latent_tensor.shape[2] * 8 |
|
|
frag_duration = num_pixel_frames / 24.0 |
|
|
|
|
|
video_with_audio_path = audio_specialist_singleton.generate_audio_for_video( |
|
|
video_path=silent_video_path, prompt=audio_prompt, |
|
|
duration_seconds=frag_duration) |
|
|
|
|
|
if os.path.exists(silent_video_path): |
|
|
os.remove(silent_video_path) |
|
|
return video_with_audio_path |
|
|
|
|
|
def refine_latents(self, latents: torch.Tensor, fps: int = 24, denoise_strength: float = 0.35, refine_steps: int = 12, motion_prompt: str = "...", **kwargs) -> torch.Tensor: |
|
|
logger.info(f"Refinando tensor latente com shape {latents.shape}.") |
|
|
_, _, num_latent_frames, latent_h, latent_w = latents.shape |
|
|
video_scale_factor = getattr(self.vae.config, 'temporal_scale_factor', 8) |
|
|
vae_scale_factor = getattr(self.vae.config, 'spatial_downscale_factor', 8) |
|
|
|
|
|
pixel_height = latent_h * vae_scale_factor |
|
|
pixel_width = latent_w * vae_scale_factor |
|
|
pixel_frames = (num_latent_frames - 1) * video_scale_factor |
|
|
|
|
|
final_ltx_params = { |
|
|
"height": pixel_height, "width": pixel_width, "video_total_frames": pixel_frames, |
|
|
"video_fps": fps, "motion_prompt": motion_prompt, "current_fragment_index": int(time.time()), |
|
|
"denoise_strength": denoise_strength, "refine_steps": refine_steps, |
|
|
"guidance_scale": kwargs.get('guidance_scale', 2.0) |
|
|
} |
|
|
|
|
|
refined_latents_tensor, _ = self.ltx_manager.refine_latents(latents, **final_ltx_params) |
|
|
|
|
|
logger.info(f"Retornando tensor latente refinado com shape: {refined_latents_tensor.shape}") |
|
|
return refined_latents_tensor |
|
|
|
|
|
def upscale_latents(self, latents: torch.Tensor) -> torch.Tensor: |
|
|
logger.info(f"Realizando upscale em tensor latente com shape {latents.shape}.") |
|
|
return upscaler_specialist_singleton.upscale(latents) |
|
|
|
|
|
def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate): |
|
|
final_ltx_params = { |
|
|
**ltx_params, 'width': target_resolution[0], 'height': target_resolution[1], |
|
|
'video_total_frames': total_frames_to_generate, 'video_fps': 24, |
|
|
'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items |
|
|
} |
|
|
new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params) |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
return new_full_latents |
|
|
|
|
|
def _quantize_to_multiple(self, n, m): |
|
|
if m == 0: return n |
|
|
quantized = int(round(n / m) * m) |
|
|
return m if n > 0 and quantized == 0 else quantized |