euiia commited on
Commit
2db91ab
·
verified ·
1 Parent(s): 48d9f58

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -337
app.py DELETED
@@ -1,337 +0,0 @@
1
- # deformes4D_engine.py
2
- # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
- #
4
- # MODIFICATIONS FOR ADUC-SDR:
5
- # Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved.
6
- #
7
- # This file is part of the ADUC-SDR project. It contains the core logic for
8
- # video fragment generation, latent manipulation, and dynamic editing,
9
- # governed by the ADUC orchestrator.
10
- # This component is licensed under the GNU Affero General Public License v3.0.
11
-
12
- import os
13
- import time
14
- import imageio
15
- import numpy as np
16
- import torch
17
- import logging
18
- from PIL import Image, ImageOps
19
- from dataclasses import dataclass
20
- import gradio as gr
21
- import subprocess
22
- import gc
23
-
24
- from ltx_manager_helpers import ltx_manager_singleton
25
- from gemini_helpers import gemini_singleton
26
- from upscaler_specialist import upscaler_specialist_singleton
27
- from hd_specialist import hd_specialist_singleton
28
- from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
29
- from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
30
- from audio_specialist import audio_specialist_singleton
31
-
32
- logger = logging.getLogger(__name__)
33
-
34
- @dataclass
35
- class LatentConditioningItem:
36
- """Representa uma âncora de condicionamento no espaço latente para a Câmera (Ψ)."""
37
- latent_tensor: torch.Tensor
38
- media_frame_number: int
39
- conditioning_strength: float
40
-
41
- class Deformes4DEngine:
42
- """
43
- Implementa a Câmera (Ψ) e o Destilador (Δ) da arquitetura ADUC-SDR.
44
- Orquestra a geração, pós-produção latente e renderização final dos fragmentos de vídeo.
45
- """
46
- def __init__(self, ltx_manager, workspace_dir="deformes_workspace"):
47
- self.ltx_manager = ltx_manager
48
- self.workspace_dir = workspace_dir
49
- self._vae = None
50
- self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
51
- logger.info("Especialista Deformes4D (Executor ADUC-SDR) inicializado.")
52
-
53
- @property
54
- def vae(self):
55
- if self._vae is None:
56
- self._vae = self.ltx_manager.workers[0].pipeline.vae
57
- self._vae.to(self.device); self._vae.eval()
58
- return self._vae
59
-
60
- # --- MÉTODOS AUXILIARES ---
61
- @torch.no_grad()
62
- def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor:
63
- tensor = tensor.to(self.device, dtype=self.vae.dtype)
64
- return vae_encode(tensor, self.vae, vae_per_channel_normalize=True)
65
-
66
- @torch.no_grad()
67
- def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
68
- latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype)
69
- timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype)
70
- return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True)
71
-
72
- def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24):
73
- if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: return
74
- video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0)
75
- video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0
76
- video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8)
77
- with imageio.get_writer(path, fps=fps, codec='libx264', quality=8, output_params=['-pix_fmt', 'yuv420p']) as writer:
78
- for frame in video_np: writer.append_data(frame)
79
-
80
- def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image:
81
- if image.size != target_resolution:
82
- return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS)
83
- return image
84
-
85
- def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor:
86
- image_np = np.array(pil_image).astype(np.float32) / 255.0
87
- tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2)
88
- tensor = (tensor * 2.0) - 1.0
89
- return self.pixels_to_latents(tensor)
90
-
91
- def concatenate_videos_ffmpeg(self, video_paths: list[str], output_path: str):
92
- if not video_paths: raise gr.Error("Nenhum fragmento de vídeo para montar.")
93
- list_file_path = os.path.join(self.workspace_dir, "concat_list.txt")
94
- with open(list_file_path, 'w', encoding='utf-8') as f:
95
- for path in video_paths: f.write(f"file '{os.path.abspath(path)}'\n")
96
- cmd_list = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', list_file_path, '-c', 'copy', output_path]
97
- logger.info(f"Concatenando {len(video_paths)} clipes de vídeo em {output_path}...")
98
- try:
99
- subprocess.run(cmd_list, check=True, capture_output=True, text=True)
100
- except subprocess.CalledProcessError as e:
101
- logger.error(f"Erro no FFmpeg: {e.stderr}")
102
- raise gr.Error(f"Falha na montagem final do vídeo. Detalhes: {e.stderr}")
103
-
104
- # --- NÚCLEO DA LÓGICA ADUC-SDR ---
105
- def generate_full_movie(self, keyframes: list, global_prompt: str, storyboard: list,
106
- seconds_per_fragment: float, trim_percent: int,
107
- handler_strength: float, destination_convergence_strength: float,
108
- use_upscaler: bool, use_refiner: bool, use_hd: bool, use_audio: bool,
109
- video_resolution: int, use_continuity_director: bool,
110
- progress: gr.Progress = gr.Progress()):
111
-
112
- FPS = 24
113
- FRAMES_PER_LATENT_CHUNK = 8
114
- ECO_LATENT_CHUNKS = 2
115
-
116
- total_frames_brutos = self._quantize_to_multiple(int(seconds_per_fragment * FPS), FRAMES_PER_LATENT_CHUNK)
117
- frames_a_podar = self._quantize_to_multiple(int(total_frames_brutos * (trim_percent / 100)), FRAMES_PER_LATENT_CHUNK)
118
- latents_a_podar = frames_a_podar // FRAMES_PER_LATENT_CHUNK
119
-
120
- DEJAVU_FRAME_TARGET = frames_a_podar - 1 if frames_a_podar > 0 else 0
121
- DESTINATION_FRAME_TARGET = total_frames_brutos - 1
122
-
123
- base_ltx_params = {"guidance_scale": 2.0, "stg_scale": 0.025, "rescaling_scale": 0.15, "num_inference_steps": 20, "image_cond_noise_scale": 0.00}
124
- refine_ltx_params = {"motion_prompt": "", "guidance_scale": 1.0, "denoise_strength": 0.35, "refine_steps": 12}
125
-
126
- keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes]
127
- story_history = ""
128
- target_resolution_tuple = (video_resolution, video_resolution)
129
-
130
- eco_latent_for_next_loop, dejavu_latent_for_next_loop = None, None
131
- latent_fragments, latent_fragment_lengths = [], []
132
-
133
- if len(keyframe_paths) < 2: raise gr.Error(f"A geração requer no mínimo 2 keyframes. Você forneceu {len(keyframe_paths)}.")
134
-
135
- num_transitions_to_generate = len(keyframe_paths) - 1
136
-
137
- for i in range(num_transitions_to_generate):
138
- fragment_index = i + 1
139
- progress(i / num_transitions_to_generate, desc=f"Gerando Latentes {fragment_index}/{num_transitions_to_generate}")
140
-
141
- past_keyframe_path = keyframe_paths[i - 1] if i > 0 else keyframe_paths[i]
142
- start_keyframe_path = keyframe_paths[i]
143
- destination_keyframe_path = keyframe_paths[i + 1]
144
- future_story_prompt = storyboard[i + 1] if (i + 1) < len(storyboard) else "A cena final."
145
-
146
- decision = gemini_singleton.get_cinematic_decision(
147
- global_prompt, story_history, past_keyframe_path, start_keyframe_path, destination_keyframe_path,
148
- storyboard[i - 1] if i > 0 else "O início.", storyboard[i], future_story_prompt
149
- )
150
- transition_type, motion_prompt = decision["transition_type"], decision["motion_prompt"]
151
- story_history += f"\n- Ato {fragment_index}: {motion_prompt}"
152
-
153
- conditioning_items = []
154
- if eco_latent_for_next_loop is None:
155
- img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple)
156
- conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_start), 0, 1.0))
157
- else:
158
- conditioning_items.append(LatentConditioningItem(eco_latent_for_next_loop, 0, 1.0))
159
- conditioning_items.append(LatentConditioningItem(dejavu_latent_for_next_loop, DEJAVU_FRAME_TARGET, handler_strength))
160
-
161
- img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
162
- conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength))
163
-
164
- current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
165
- latents_brutos = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, total_frames_brutos)
166
-
167
- last_trim = latents_brutos[:, :, -(latents_a_podar+1):, :, :].clone()
168
- eco_latent_for_next_loop = last_trim[:, :, :2, :, :].clone()
169
- dejavu_latent_for_next_loop = last_trim[:, :, -1:, :, :].clone()
170
-
171
- latents_video = latents_brutos[:, :, :-(latents_a_podar-1), :, :].clone()
172
- latents_video = latents_video[:, :, 1:, :, :]
173
-
174
- # [LIMPEZA DE MEMÓRIA]
175
- del last_trim, latents_brutos
176
- gc.collect(); torch.cuda.empty_cache()
177
-
178
- if transition_type == "cut":
179
- eco_latent_for_next_loop, dejavu_latent_for_next_loop = None, None
180
-
181
- if use_upscaler:
182
- latents_video = self.upscale_latents(latents_video)
183
-
184
- latent_fragments.append(latents_video)
185
- latent_fragment_lengths.append(latents_video.shape[2])
186
-
187
- # [LIMPEZA DE MEMÓRIA]
188
- del eco_latent_for_next_loop, dejavu_latent_for_next_loop
189
- gc.collect(); torch.cuda.empty_cache()
190
-
191
- logger.info("--- CONCATENANDO E REFINANDO SUPER-LATENTE ---")
192
- tensors_para_concatenar = []
193
- for idx, tensor_frag in enumerate(latent_fragments):
194
- tensor_on_target_device = tensor_frag.to(self.device)
195
- if idx < len(latent_fragments) - 1:
196
- tensors_para_concatenar.append(tensor_on_target_device[:, :, :-1, :, :])
197
- else:
198
- tensors_para_concatenar.append(tensor_on_target_device)
199
-
200
- # [LIMPEZA DE MEMÓRIA]
201
- del latent_fragments
202
- gc.collect(); torch.cuda.empty_cache()
203
-
204
- processed_latents = torch.cat(tensors_para_concatenar, dim=2)
205
-
206
- # [LIMPEZA DE MEMÓRIA]
207
- del tensors_para_concatenar
208
- gc.collect(); torch.cuda.empty_cache()
209
-
210
- logger.info(f"Concatenação concluída. Shape do super-latente: {processed_latents.shape}")
211
-
212
- if use_refiner:
213
- progress(0.8, desc="Refinando continuidade visual...")
214
- processed_latents = self.refine_latents(processed_latents, **refine_ltx_params)
215
-
216
- logger.info("--- DIVIDINDO SUPER-LATENTE E PROCESSANDO FRAGMENTOS INDIVIDUALMENTE ---")
217
-
218
- adjusted_lengths = [l - 1 if i < len(latent_fragment_lengths) - 1 else l for i, l in enumerate(latent_fragment_lengths)]
219
-
220
- refined_fragments = torch.split(processed_latents, adjusted_lengths, dim=2)
221
- del processed_latents; gc.collect(); torch.cuda.empty_cache()
222
-
223
- final_video_paths = []
224
- num_final_fragments = len(refined_fragments)
225
-
226
- for i, fragment_latent in enumerate(refined_fragments):
227
- progress(0.85 + (0.1 * (i / num_final_fragments)), desc=f"Finalizando Clipe {i+1}/{num_final_fragments}")
228
-
229
- base_name = f"fragment_{i}_{int(time.time())}"
230
- current_path = os.path.join(self.workspace_dir, f"{base_name}_temp.mp4")
231
-
232
- if use_audio:
233
- current_path = self._generate_video_and_audio_from_latents(fragment_latent, global_prompt, base_name)
234
- else:
235
- pixel_tensor = self.latents_to_pixels(fragment_latent)
236
- self.save_video_from_tensor(pixel_tensor, current_path, fps=24)
237
- del pixel_tensor
238
-
239
- gc.collect(); torch.cuda.empty_cache()
240
-
241
- if use_hd:
242
- hd_output_path = os.path.join(self.workspace_dir, f"{base_name}_hd.mp4")
243
- try:
244
- hd_specialist_singleton.process_video(input_video_path=current_path, output_video_path=hd_output_path, prompt=" ")
245
- os.remove(current_path)
246
- final_video_paths.append(hd_output_path)
247
- except Exception as e:
248
- logger.error(f"Falha na masterização HD do fragmento {i+1}: {e}. Usando versão padrão.")
249
- os.rename(current_path, hd_output_path)
250
- final_video_paths.append(hd_output_path)
251
- else:
252
- final_video_paths.append(current_path)
253
-
254
- # [LIMPEZA DE MEMÓRIA]
255
- del refined_fragments
256
- gc.collect(); torch.cuda.empty_cache()
257
-
258
- progress(0.98, desc="Montagem final...")
259
- final_movie_path = os.path.join(self.workspace_dir, f"movie_{int(time.time())}_FINAL.mp4")
260
- self.concatenate_videos_ffmpeg(final_video_paths, final_movie_path)
261
-
262
- for path in final_video_paths:
263
- if os.path.exists(path):
264
- os.remove(path)
265
-
266
- logger.info(f"Processo concluído! Vídeo final salvo em: {final_movie_path}")
267
- yield {"final_path": final_movie_path}
268
-
269
- def _generate_video_and_audio_from_latents(self, latent_tensor, audio_prompt, base_name):
270
- silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent_for_audio.mp4")
271
- pixel_tensor = self.latents_to_pixels(latent_tensor)
272
- self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24)
273
- del pixel_tensor; gc.collect(); torch.cuda.empty_cache()
274
-
275
- try:
276
- result = subprocess.run(
277
- ["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", silent_video_path],
278
- capture_output=True, text=True, check=True)
279
- frag_duration = float(result.stdout.strip())
280
- except (subprocess.CalledProcessError, ValueError, FileNotFoundError):
281
- logger.warning(f"ffprobe falhou. Calculando duração manualmente.")
282
- num_pixel_frames = latent_tensor.shape[2] * 8
283
- frag_duration = num_pixel_frames / 24.0
284
-
285
- video_with_audio_path = audio_specialist_singleton.generate_audio_for_video(
286
- video_path=silent_video_path, prompt=audio_prompt,
287
- duration_seconds=frag_duration)
288
-
289
- if os.path.exists(silent_video_path):
290
- os.remove(silent_video_path)
291
- return video_with_audio_path
292
-
293
- def refine_latents(self, latents: torch.Tensor, fps: int = 24, **kwargs) -> torch.Tensor:
294
- """Invoca o LTX Pool Manager para refinar um tensor latente existente."""
295
- logger.info(f"Refinando tensor latente com shape {latents.shape}.")
296
- _, _, num_latent_frames, latent_h, latent_w = latents.shape
297
- video_scale_factor = getattr(self.vae.config, 'temporal_scale_factor', 8)
298
- vae_scale_factor = getattr(self.vae.config, 'spatial_downscale_factor', 8)
299
-
300
- pixel_height = latent_h * vae_scale_factor
301
- pixel_width = latent_w * vae_scale_factor
302
- pixel_frames = num_latent_frames * video_scale_factor
303
-
304
- final_ltx_params = {
305
- "height": pixel_height, "width": pixel_width, "video_total_frames": pixel_frames,
306
- "video_fps": fps, "current_fragment_index": int(time.time()),
307
- **kwargs
308
- }
309
-
310
- refined_latents_tensor, _ = self.ltx_manager.refine_latents(latents, **final_ltx_params)
311
-
312
- if refined_latents_tensor is None:
313
- logger.warning("O refinamento falhou (provavelmente por falta de memória). Retornando tensor original não refinado.")
314
- return latents
315
-
316
- logger.info(f"Retornando tensor latente refinado com shape: {refined_latents_tensor.shape}")
317
- return refined_latents_tensor
318
-
319
- def upscale_latents(self, latents: torch.Tensor) -> torch.Tensor:
320
- logger.info(f"Realizando upscale em tensor latente com shape {latents.shape}.")
321
- return upscaler_specialist_singleton.upscale(latents)
322
-
323
- def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate):
324
- final_ltx_params = {
325
- **ltx_params, 'width': target_resolution[0], 'height': target_resolution[1],
326
- 'video_total_frames': total_frames_to_generate, 'video_fps': 24,
327
- 'current_fragment_index': int(time.time()), 'conditioning_items_data': conditioning_items
328
- }
329
- new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params)
330
- gc.collect()
331
- torch.cuda.empty_cache()
332
- return new_full_latents
333
-
334
- def _quantize_to_multiple(self, n, m):
335
- if m == 0: return n
336
- quantized = int(round(n / m) * m)
337
- return m if n > 0 and quantized == 0 else quantized