File size: 16,067 Bytes
e03c986
b664155
e03c986
 
 
 
 
 
 
 
b664155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5a3825
887690a
f5a3825
b664155
768a8fe
b664155
 
 
 
 
e03c986
b664155
 
 
 
 
e03c986
 
 
 
b664155
 
 
 
 
82712d4
b664155
 
 
 
 
 
 
 
887690a
431a182
 
 
 
b664155
 
 
 
 
 
 
b7e85da
 
 
 
 
 
 
 
b664155
 
 
 
 
 
 
 
 
 
768a8fe
887690a
b7e85da
b664155
 
e03c986
b664155
 
 
 
 
 
 
 
 
 
 
b7e85da
b664155
 
768a8fe
 
b7e85da
768a8fe
 
b664155
 
768a8fe
b7e85da
 
768a8fe
 
 
 
 
 
b664155
 
b7e85da
b664155
 
 
 
 
768a8fe
b664155
 
768a8fe
 
b664155
 
 
 
 
5da5952
b664155
 
 
5da5952
b664155
5da5952
b664155
768a8fe
b664155
 
5da5952
b664155
768a8fe
 
b664155
768a8fe
b664155
768a8fe
b664155
 
 
 
768a8fe
 
351cd3f
768a8fe
 
 
 
82712d4
f06d030
 
 
 
 
 
 
 
 
 
 
 
768a8fe
f06d030
 
 
 
 
b7e85da
 
768a8fe
e03c986
 
 
 
 
 
768a8fe
b7e85da
d2de905
b7e85da
 
768a8fe
 
b7e85da
 
768a8fe
b7e85da
768a8fe
b7e85da
 
 
 
 
 
 
431a182
e03c986
b7e85da
e03c986
b7e85da
e03c986
b7e85da
 
768a8fe
e03c986
 
 
b7e85da
e03c986
b7e85da
 
 
 
768a8fe
b7e85da
 
d2de905
768a8fe
b7e85da
 
768a8fe
 
 
 
 
 
 
 
 
 
b7e85da
768a8fe
 
3b91b34
768a8fe
 
 
 
 
 
 
 
82712d4
 
7ac3581
887690a
 
7ac3581
 
 
3b91b34
 
82712d4
 
 
 
cf49283
82712d4
 
 
7ac3581
 
 
 
ea16365
9cdf9d7
 
ea16365
b664155
768a8fe
 
b664155
 
 
768a8fe
 
 
 
 
b664155
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
# deformes4D_engine.py
# Copyright (C) 4 de Agosto de 2025  Carlos Rodrigues dos Santos
#
# MODIFICATIONS FOR ADUC-SDR:
# Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved.
#
# This file is part of the ADUC-SDR project. It contains the core logic for
# video fragment generation, latent manipulation, and dynamic editing, 
# governed by the ADUC orchestrator.
# This component is licensed under the GNU Affero General Public License v3.0.

import os
import time
import imageio
import numpy as np
import torch
import logging
from PIL import Image, ImageOps
from dataclasses import dataclass
import gradio as gr
import subprocess
import gc

from ltx_manager_helpers import ltx_manager_singleton
from gemini_helpers import gemini_singleton 
from upscaler_specialist import upscaler_specialist_singleton
from hd_specialist import hd_specialist_singleton
from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
from audio_specialist import audio_specialist_singleton

logger = logging.getLogger(__name__)

@dataclass
class LatentConditioningItem:
    """Representa uma âncora de condicionamento no espaço latente para a Câmera (Ψ)."""
    latent_tensor: torch.Tensor
    media_frame_number: int
    conditioning_strength: float

class Deformes4DEngine:
    """
    Implementa a Câmera (Ψ) e o Destilador (Δ) da arquitetura ADUC-SDR.
    Orquestra a geração, pós-produção latente e renderização final dos fragmentos de vídeo.
    """
    def __init__(self, ltx_manager, workspace_dir="deformes_workspace"):
        self.ltx_manager = ltx_manager
        self.workspace_dir = workspace_dir
        self._vae = None
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        logger.info("Especialista Deformes4D (Executor ADUC-SDR) inicializado.")

    @property
    def vae(self):
        if self._vae is None:
            self._vae = self.ltx_manager.workers[0].pipeline.vae
        self._vae.to(self.device); self._vae.eval()
        return self._vae

    # --- MÉTODOS AUXILIARES ---
    @torch.no_grad()
    def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor:
        tensor = tensor.to(self.device, dtype=self.vae.dtype)
        return vae_encode(tensor, self.vae, vae_per_channel_normalize=True)

    @torch.no_grad()
    def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
        latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype)
        timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype)
        return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True)

    def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24):
        if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: return
        video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0)
        video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0
        video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8)
        with imageio.get_writer(path, fps=fps, codec='libx264', quality=8, output_params=['-pix_fmt', 'yuv420p']) as writer:
            for frame in video_np: writer.append_data(frame)

    def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image:
        if image.size != target_resolution:
            return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS)
        return image

    def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor:
        image_np = np.array(pil_image).astype(np.float32) / 255.0
        tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2)
        tensor = (tensor * 2.0) - 1.0
        return self.pixels_to_latents(tensor)
        
    # --- NÚCLEO DA LÓGICA ADUC-SDR ---
    def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list, 
                            seconds_per_fragment: float, trim_percent: int,
                            handler_strength: float, destination_convergence_strength: float, 
                            use_upscaler: bool, use_refiner: bool, use_hd: bool, use_audio: bool,
                            video_resolution: int, use_continuity_director: bool, 
                            progress: gr.Progress = gr.Progress()):
        
        FPS = 24
        FRAMES_PER_LATENT_CHUNK = 8
        ECO_LATENT_CHUNKS = 2
        
        total_frames_brutos = self._quantize_to_multiple(int(seconds_per_fragment * FPS), FRAMES_PER_LATENT_CHUNK)
        frames_a_podar = self._quantize_to_multiple(int(total_frames_brutos * (trim_percent / 100)), FRAMES_PER_LATENT_CHUNK)
        latents_a_podar = frames_a_podar // FRAMES_PER_LATENT_CHUNK

        DEJAVU_FRAME_TARGET = frames_a_podar - 1 if frames_a_podar > 0 else 0
        DESTINATION_FRAME_TARGET = total_frames_brutos - 1
        
        base_ltx_params = {"guidance_scale": 2.0, "stg_scale": 0.025, "rescaling_scale": 0.15, "num_inference_steps": 20, "image_cond_noise_scale": 0.00}
        keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes]
        story_history = ""
        target_resolution_tuple = (video_resolution, video_resolution) 
        
        eco_latent_for_next_loop = None
        dejavu_latent_for_next_loop = None
        
        # [CORREÇÃO 1] Inicialização correta da lista
        latent_fragments = []
        
        if len(keyframe_paths) < 2:
            raise gr.Error(f"A geração requer no mínimo 2 keyframes. Você forneceu {len(keyframe_paths)}.")
        
        num_transitions_to_generate = len(keyframe_paths) - 1
        
        for i in range(num_transitions_to_generate):
            fragment_index = i + 1
            progress(i / num_transitions_to_generate, desc=f"Produzindo Transição {fragment_index}/{num_transitions_to_generate}")
            
            past_keyframe_path = keyframe_paths[i - 1] if i > 0 else keyframe_paths[i]
            start_keyframe_path = keyframe_paths[i]
            destination_keyframe_path = keyframe_paths[i + 1]
            future_story_prompt = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final."
            
            decision = gemini_singleton.get_cinematic_decision(
                global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path,
                storyboard[i - 1] if i > 0 else "O início.", storyboard[i], future_story_prompt
            )
            transition_type, motion_prompt = decision["transition_type"], decision["motion_prompt"]
            story_history += f"\n- Ato {fragment_index}: {motion_prompt}"

            conditioning_items = []
            if eco_latent_for_next_loop is None:
               logger.info("    - Primeiro fragmento: Usando Keyframe inicial como âncora de partida.")
               img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple)
               conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0))
            else:
               logger.info("    - Âncora 1: Eco Causal (C) - Herança do passado.")
               conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0))
               logger.info("    - Âncora 2: Déjà-Vu (D) - Memória de um futuro idealizado.")
               conditioning_items.append(LatentConditioningItem(dejavu_latent_for_next_loop, DEJAVU_FRAME_TARGET, handler_strength))
            
            img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
            conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength))
            
            current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
            latents_brutos = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, total_frames_brutos)

            last_trim = latents_brutos[:, :, -(latents_a_podar+1):, :, :].clone()
            eco_latent_for_next_loop = last_trim[:, :, :2, :, :].clone()   
            dejavu_latent_for_next_loop = last_trim[:, :, -1:, :, :].clone()
            
            latents_video = latents_brutos[:, :, :-(latents_a_podar-1), :, :].clone()
            latents_video = latents_video[:, :, 1:, :, :]

            if transition_type == "cut":
                eco_latent_for_next_loop = None
                dejavu_latent_for_next_loop = None

            if use_upscaler:
                latents_video = self.upscale_latents(latents_video)
            
            latent_fragments.append(latents_video)
        
    
        logger.info("--- CONCATENANDO nem TODOS OS FRAGMENTOS LATENTES ---")
        tensors_para_concatenar = []
        for idx, tensor_frag in enumerate(latent_fragments):
            # Move cada tensor para o dispositivo de destino antes de adicioná-lo à lista.
            target_device = self.device
            tensor_on_target_device = tensor_frag.to(target_device)
            if idx < len(latent_fragments) - 1:
                tensors_para_concatenar.append(tensor_on_target_device[:, :, :-1, :, :]) 
            else:
                tensors_para_concatenar.append(tensor_on_target_device)

        processed_latents = torch.cat(tensors_para_concatenar, dim=2)
        logger.info(f"Concatenação concluída. Shape final do tensor latente: {final_concatenated_latents.shape}")
        
        
        
        
        # [CORREÇÃO 2] Referência correta da variável no log
        logger.info(f"Concatenação concluída. Shape final do tensor latente: {processed_latents.shape}")
        
        if use_refiner:
            processed_latents = self.refine_latents(
                processed_latents, 
                motion_prompt="", 
                guidance_scale=1.0
            )
        
        # --- [INÍCIO DA SEÇÃO CORRIGIDA PARA EXECUÇÃO] ---
        base_name = f"movie_{int(time.time())}"
        # Define um caminho único para o vídeo que sai desta etapa, antes do HD.
        intermediate_video_path = os.path.join(self.workspace_dir, f"{base_name}_intermediate.mp4")
        
        if use_audio:
             # A função de áudio agora salva o vídeo com áudio no caminho intermediário
             intermediate_video_path = self._generate_video_and_audio_from_latents(processed_latents, global_prompt, base_name)
        else:
            logger.info("Etapa de sonoplastia desativada. Renderizando vídeo silencioso.")
            pixel_tensor = self.latents_to_pixels(processed_latents)
            self.save_video_from_tensor(pixel_tensor, intermediate_video_path, fps=24)
            del pixel_tensor
        
        del processed_latents; gc.collect(); torch.cuda.empty_cache()
        
        # Define o caminho final
        final_video_path = os.path.join(self.workspace_dir, f"{base_name}_FINAL.mp4")
        
        if use_hd:
            progress(0.9, desc="Masterização Final (HD)...")
            try:
                # O HD agora lê o intermediate_video_path e salva no final_video_path
                hd_specialist_singleton.process_video(
                    input_video_path=intermediate_video_path,
                    output_video_path=final_video_path,
                    prompt=" "
                )
            except Exception as e:
                logger.error(f"Falha na masterização HD: {e}. Usando vídeo de qualidade padrão.")
                os.rename(intermediate_video_path, final_video_path)
        else:
            logger.info("Etapa de edição HD desativada.")
            # Se o HD não for usado, o vídeo intermediário se torna o final.
            os.rename(intermediate_video_path, final_video_path)
        # --- [FIM DA SEÇÃO CORRIGIDA] ---
        
        logger.info(f"Processo concluído! Vídeo final salvo em: {final_video_path}")
        yield {"final_path": final_video_path}

    def _generate_video_and_audio_from_latents(self, latent_tensor, audio_prompt, base_name):
        # Esta função foi movida para cima, mas sua lógica interna permanece a mesma.
        silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent_for_audio.mp4")
        pixel_tensor = self.latents_to_pixels(latent_tensor)
        self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24)
        del pixel_tensor; gc.collect()
        
        try:
            result = subprocess.run(
                ["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", silent_video_path],
                capture_output=True, text=True, check=True)
            frag_duration = float(result.stdout.strip())
        except (subprocess.CalledProcessError, ValueError, FileNotFoundError):
             logger.warning(f"ffprobe falhou. Calculando duração manualmente.")
             num_pixel_frames = latent_tensor.shape[2] * 8
             frag_duration = num_pixel_frames / 24.0

        video_with_audio_path = audio_specialist_singleton.generate_audio_for_video(
            video_path=silent_video_path, prompt=audio_prompt,
            duration_seconds=frag_duration)
        
        if os.path.exists(silent_video_path):
             os.remove(silent_video_path)
        return video_with_audio_path
        
    def refine_latents(self, latents: torch.Tensor, fps: int = 24, denoise_strength: float = 0.35, refine_steps: int = 12, motion_prompt: str = "...", **kwargs) -> torch.Tensor:
        logger.info(f"Refinando tensor latente com shape {latents.shape}.")
        _, _, num_latent_frames, latent_h, latent_w = latents.shape
        video_scale_factor = getattr(self.vae.config, 'temporal_scale_factor', 8)
        vae_scale_factor = getattr(self.vae.config, 'spatial_downscale_factor', 8)
        
        pixel_height = latent_h * vae_scale_factor
        pixel_width = latent_w * vae_scale_factor
        pixel_frames = (num_latent_frames - 1) * video_scale_factor
        
        final_ltx_params = {
            "height": pixel_height, "width": pixel_width, "video_total_frames": pixel_frames,
            "video_fps": fps, "motion_prompt": motion_prompt, "current_fragment_index": int(time.time()),
            "denoise_strength": denoise_strength, "refine_steps": refine_steps,
            "guidance_scale": kwargs.get('guidance_scale', 2.0)
        }
        
        refined_latents_tensor, _ = self.ltx_manager.refine_latents(latents, **final_ltx_params)
        
        logger.info(f"Retornando tensor latente refinado com shape: {refined_latents_tensor.shape}")
        return refined_latents_tensor
        
    def upscale_latents(self, latents: torch.Tensor) -> torch.Tensor:
        logger.info(f"Realizando upscale em tensor latente com shape {latents.shape}.")
        return upscaler_specialist_singleton.upscale(latents)
        
    def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate):
        final_ltx_params = {
            **ltx_params, 'width': target_resolution[0], 'height': target_resolution[1],
            'video_total_frames': total_frames_to_generate, 'video_fps': 24,
            'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items
        }
        new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params)
        gc.collect()
        torch.cuda.empty_cache()
        return new_full_latents
        
    def _quantize_to_multiple(self, n, m):
        if m == 0: return n
        quantized = int(round(n / m) * m)
        return m if n > 0 and quantized == 0 else quantized