Spaces:
Paused
Paused
| import gradio as gr | |
| import torch | |
| import numpy as np | |
| import tempfile | |
| import os | |
| import yaml | |
| import json | |
| from pathlib import Path | |
| import random | |
| # Importações de Hugging Face | |
| from huggingface_hub import snapshot_download, HfFolder | |
| from transformers import T5EncoderModel, T5TokenizerFast | |
| from diffusers import LTXLatentUpsamplePipeline | |
| from diffusers.models import AutoencoderKLLTXVideo, LTXVideoTransformer3DModel | |
| from diffusers.schedulers import FlowMatchEulerDiscreteScheduler | |
| # Nossa pipeline customizada e utilitários | |
| from pipeline_ltx_condition_control import LTXConditionPipeline, LTXVideoCondition | |
| from diffusers.utils import export_to_video | |
| from PIL import Image, ImageOps | |
| import imageio | |
| # --- Configuração de Logging e Avisos --- | |
| import warnings | |
| warnings.filterwarnings("ignore", category=UserWarning) # Correto: UserWarning é uma classe | |
| warnings.filterwarnings("ignore", category=FutureWarning) # Correto: FutureWarning é uma classe | |
| warnings.filterwarnings("ignore", message=".*") | |
| # --- CARREGAMENTO DIRETO DOS MODELOS (SEM CLASSE) --- | |
| print("=== [Inicialização da Aplicação] ===") | |
| # 1. Carregar Configuração do Arquivo YAML | |
| CONFIG_PATH = Path("ltxv-13b-0.9.8-distilled.yaml") | |
| if not CONFIG_PATH.exists(): | |
| raise FileNotFoundError(f"Arquivo de configuração '{CONFIG_PATH}' não encontrado.") | |
| with open(CONFIG_PATH, "r") as f: | |
| config = yaml.safe_load(f) | |
| print(f"Configuração carregada de: {CONFIG_PATH}") | |
| print(json.dumps(config, indent=2)) | |
| # Parâmetros Globais | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| torch_dtype = torch.bfloat16 | |
| base_repo="Lightricks/LTX-Video" | |
| checkpoint_path="ltxv-13b-0.9.8-distilled.safetensors" | |
| upscaler_repo="Lightricks/ltxv-spatial-upscaler-0.9.7" | |
| FPS = 24 | |
| # 2. Baixar os arquivos do modelo base | |
| print(f"=== Baixando snapshot do repositório base: {base_repo} ===") | |
| local_repo_path = snapshot_download( | |
| repo_id=base_repo, | |
| token=os.getenv("HF_TOKEN") or HfFolder.get_token(), | |
| resume_download=True | |
| ) | |
| # 3. Carregar cada componente da pipeline explicitamente | |
| print("=== Carregando componentes da pipeline... ===") | |
| vae = AutoencoderKLLTXVideo.from_pretrained(local_repo_path, subfolder="vae", torch_dtype=torch_dtype) | |
| text_encoder = T5EncoderModel.from_pretrained(local_repo_path, subfolder="text_encoder", torch_dtype=torch_dtype) | |
| tokenizer = T5TokenizerFast.from_pretrained(local_repo_path, subfolder="tokenizer") | |
| scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(local_repo_path, subfolder="scheduler") | |
| # Correção para o erro 'mu': desativar explicitamente o dynamic shifting | |
| if hasattr(scheduler.config, 'use_dynamic_shifting') and scheduler.config.use_dynamic_shifting: | |
| print("[Config] Desativando 'use_dynamic_shifting' no scheduler.") | |
| scheduler.config.use_dynamic_shifting = False | |
| print(f"Carregando pesos do Transformer de: {checkpoint_path}") | |
| transformer = LTXVideoTransformer3DModel.from_pretrained( | |
| local_repo_path, subfolder="transformer", weight_name=checkpoint_path, torch_dtype=torch_dtype | |
| ) | |
| # 4. Montar a pipeline principal | |
| print("Montando a LTXConditionPipeline...") | |
| pipeline = LTXConditionPipeline( | |
| vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, | |
| scheduler=scheduler, transformer=transformer | |
| ) | |
| pipeline.to(device) | |
| pipeline.vae.enable_tiling() | |
| # 5. Carregar a pipeline de upscale | |
| print(f"Carregando o upsampler espacial de: {upscaler_repo}") | |
| pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained( | |
| upscaler_repo, vae=vae, torch_dtype=torch_dtype | |
| ) | |
| pipe_upsample.to(device) | |
| print("=== [Inicialização Concluída] Aplicação pronta. ===") | |
| # --- Lógica Principal da Geração de Vídeo --- | |
| def round_to_nearest_resolution_acceptable_by_vae(height, width, vae_temporal_compression_ratio): | |
| height = height - (height % vae_temporal_compression_ratio) | |
| width = width - (width % vae_temporal_compression_ratio) | |
| return height, width | |
| def prepare_and_generate_video( | |
| condition_image_1, condition_strength_1, condition_frame_index_1, | |
| condition_image_2, condition_strength_2, condition_frame_index_2, | |
| prompt, duration, negative_prompt, | |
| height, width, guidance_scale, seed, randomize_seed, | |
| progress=gr.Progress(track_tqdm=True) | |
| ): | |
| try: | |
| # Lógica para agrupar as condições *dentro* da função | |
| # Cálculo de frames e resolução | |
| num_frames = int(duration * FPS) + 1 | |
| temporal_compression = pipeline.vae_temporal_compression_ratio | |
| num_frames = ((num_frames - 1) // temporal_compression) * temporal_compression + 1 | |
| downscale_factor = 2 / 3 | |
| downscaled_height = int(height * downscale_factor) | |
| downscaled_width = int(width * downscale_factor) | |
| downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae( | |
| downscaled_height, downscaled_width, pipeline.vae_temporal_compression_ratio | |
| ) | |
| conditions = [] | |
| if condition_image_1 is not None: | |
| condition_image_1 = ImageOps.fit(condition_image_1, (downscaled_width, downscaled_height), Image.LANCZOS) | |
| conditions.append(LTXVideoCondition( | |
| image=condition_image_1, | |
| strength=condition_strength_1, | |
| frame_index=int(condition_frame_index_1) | |
| )) | |
| if condition_image_2 is not None: | |
| condition_image_2 = ImageOps.fit(condition_image_2, (downscaled_width, downscaled_height), Image.LANCZOS) | |
| conditions.append(LTXVideoCondition( | |
| image=condition_image_2, | |
| strength=condition_strength_2, | |
| frame_index=int(condition_frame_index_2) | |
| )) | |
| pipeline_args = {} | |
| if conditions: | |
| pipeline_args["conditions"] = conditions | |
| # Manipulação da seed | |
| if randomize_seed: | |
| seed = random.randint(0, 2**32 - 1) | |
| # ETAPA 1: Geração do vídeo em baixa resolução | |
| latents = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=downscaled_width, | |
| height=downscaled_height, | |
| num_frames=num_frames, | |
| timesteps=[1000, 993, 987, 981, 975, 909, 725, 0.03], | |
| decode_timestep=0.05, | |
| decode_noise_scale=0.025, | |
| image_cond_noise_scale=0.0, | |
| guidance_scale=guidance_scale, | |
| guidance_rescale=0.7, | |
| generator=torch.Generator().manual_seed(seed), | |
| output_type="latent", | |
| **pipeline_args | |
| ).frames | |
| # ETAPA 2: Upscale dos latentes | |
| upscaled_height, upscaled_width = downscaled_height * 2, downscaled_width * 2 | |
| upscaled_latents = pipe_upsample( | |
| latents=latents, | |
| output_type="latent" | |
| ).frames | |
| conditions = [] | |
| if condition_image_1 is not None: | |
| condition_image_1 = ImageOps.fit(condition_image_1, (upscaled_width, upscaled_height), Image.LANCZOS) | |
| conditions.append(LTXVideoCondition( | |
| image=condition_image_1, | |
| strength=condition_strength_1, | |
| frame_index=int(condition_frame_index_1) | |
| )) | |
| if condition_image_2 is not None: | |
| condition_image_2 = ImageOps.fit(condition_image_2, (upscaled_width, upscaled_height), Image.LANCZOS) | |
| conditions.append(LTXVideoCondition( | |
| image=condition_image_2, | |
| strength=condition_strength_2, | |
| frame_index=int(condition_frame_index_2) | |
| )) | |
| pipeline_args = {} | |
| if conditions: | |
| pipeline_args["conditions"] = conditions | |
| # ETAPA 3: Denoise final em alta resolução | |
| final_video_frames_np = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=upscaled_width, | |
| height=upscaled_height, | |
| num_frames=num_frames, | |
| denoise_strength=0.999, | |
| timesteps=[1000, 909, 725, 421, 0], | |
| latents=upscaled_latents, | |
| decode_timestep=0.05, | |
| decode_noise_scale=0.025, | |
| image_cond_noise_scale=0.0, | |
| guidance_scale=guidance_scale, | |
| guidance_rescale=0.7, | |
| generator=torch.Generator(device="cuda").manual_seed(seed), | |
| output_type="np", | |
| **pipeline_args | |
| ).frames[0] | |
| # Exportação para arquivo MP4 | |
| video_uint8_frames = [(frame * 255).astype(np.uint8) for frame in final_video_frames_np] | |
| output_filename = "output.mp4" | |
| with imageio.get_writer(output_filename, fps=FPS, quality=8, macro_block_size=1) as writer: | |
| for frame_idx, frame_data in enumerate(video_uint8_frames): | |
| progress((frame_idx + 1) / len(video_uint8_frames), desc="Codificando frames do vídeo...") | |
| writer.append_data(frame_data) | |
| return output_filename, seed | |
| except Exception as e: | |
| print(f"Ocorreu um erro: {e}") | |
| return None, seed | |
| # --- Interface Gráfica com Gradio --- | |
| with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Lexend Deca"), "sans-serif"]), delete_cache=(60, 900)) as demo: | |
| gr.Markdown("# Geração de Vídeo com LTX\n**Crie vídeos a partir de texto e imagens de condição.**") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| prompt = gr.Textbox(label="Prompt", placeholder="Descreva o vídeo que você quer gerar...", lines=3, value="O Coringa dançando em um quarto escuro, iluminação dramática.") | |
| with gr.Accordion("Imagem de Condição 1", open=True): | |
| condition_image_1 = gr.Image(label="Imagem 1", type="pil") | |
| with gr.Row(): | |
| condition_strength_1 = gr.Slider(label="Peso", minimum=0.0, maximum=1.0, step=0.05, value=1.0) | |
| condition_frame_index_1 = gr.Number(label="Frame", value=0, precision=0) | |
| with gr.Accordion("Imagem de Condição 2", open=False): | |
| condition_image_2 = gr.Image(label="Imagem 2", type="pil") | |
| with gr.Row(): | |
| condition_strength_2 = gr.Slider(label="Peso", minimum=0.0, maximum=1.0, step=0.05, value=1.0) | |
| condition_frame_index_2 = gr.Number(label="Frame", value=0, precision=0) | |
| duration = gr.Slider(label="Duração (s)", minimum=1.0, maximum=10.0, step=0.5, value=2) | |
| with gr.Accordion("Configurações Avançadas", open=False): | |
| negative_prompt = gr.Textbox(label="Prompt Negativo", lines=2, value="pior qualidade, embaçado, tremido, distorcido") | |
| with gr.Row(): | |
| height = gr.Slider(label="Altura", minimum=256, maximum=1536, step=32, value=768) | |
| width = gr.Slider(label="Largura", minimum=256, maximum=1536, step=32, value=1152) | |
| with gr.Row(): | |
| guidance_scale = gr.Slider(label="Guidance", minimum=1.0, maximum=5.0, step=0.1, value=1.0) | |
| randomize_seed = gr.Checkbox(label="Seed Aleatória", value=True) | |
| seed = gr.Number(label="Seed", value=0, precision=0) | |
| generate_btn = gr.Button("Gerar Vídeo", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| output_video = gr.Video(label="Vídeo Gerado", height=400) | |
| generated_seed = gr.Number(label="Seed Utilizada", interactive=False) | |
| generate_btn.click( | |
| fn=prepare_and_generate_video, | |
| inputs=[ | |
| condition_image_1, condition_strength_1, condition_frame_index_1, | |
| condition_image_2, condition_strength_2, condition_frame_index_2, | |
| prompt, duration, negative_prompt, | |
| height, width, guidance_scale, seed, randomize_seed, | |
| ], | |
| outputs=[output_video, generated_seed] | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue().launch(server_name="0.0.0.0", server_port=7860, debug=True, show_error=True) |