Spaces:
Paused
Paused
| # FILE: app.py | |
| # DESCRIPTION: Final Gradio web interface for the ADUC-SDR Video Suite. | |
| # This version is updated to import from the new modular file structure, | |
| # including the renamed ADUC pipelines and moved managers. | |
| import gradio as gr | |
| import traceback | |
| import sys | |
| import os | |
| import logging | |
| from typing import List | |
| from PIL import Image as PILImage | |
| import logging | |
| import warnings | |
| warnings.filterwarnings("ignore", category=UserWarning) | |
| warnings.filterwarnings("ignore", category=FutureWarning) | |
| warnings.filterwarnings("ignore", message=".*") | |
| from huggingface_hub import logging as ll | |
| ll.set_verbosity_error() | |
| ll.set_verbosity_warning() | |
| ll.set_verbosity_info() | |
| ll.set_verbosity_debug() | |
| logger = logging.getLogger("AducDebug") | |
| logging.basicConfig(level=logging.DEBUG) | |
| logger.setLevel(logging.DEBUG) | |
| # ============================================================================== | |
| # --- IMPORTAÇÃO DOS SERVIÇOS DE BACKEND E UTILS (CAMINHOS ATUALIZADOS) --- | |
| # ============================================================================== | |
| from api.ltx.ltx_aduc_pipeline import ltx_aduc_pipeline | |
| from utils.debug_utils import log_function_io | |
| from api.seedvr.seedvr_aduc_pipeline import seed_aduc_pipeline as seed_aduc_pipeline | |
| logging.info("All backend services and utils imported successfully from new paths.") | |
| # ============================================================================== | |
| # --- FUNÇÕES WRAPPER (PONTE ENTRE UI E BACKEND) --- | |
| # ============================================================================== | |
| def run_generate_base_video( | |
| prompt: str, neg_prompt: str, start_img: PILImage.Image, | |
| height: int, width: int, duration: float, | |
| fp_num_inference_steps: int, fp_skip_initial_steps: int, fp_skip_final_steps: int, | |
| progress=gr.Progress(track_tqdm=True) | |
| ) -> tuple: | |
| """ | |
| Wrapper final que prepara os dados da UI e chama o backend com a API simplificada. | |
| """ | |
| try: | |
| prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()] | |
| if not prompt_list: | |
| raise gr.Error("O campo de prompt não pode estar vazio.") | |
| logging.info(f"[UI] Request received with {len(prompt_list)} scene(s).") | |
| initial_media_list = [] | |
| if start_img: | |
| initial_media_list.append((start_img, 0, 1.0)) | |
| ltx_configs = { | |
| "num_inference_steps": fp_num_inference_steps, | |
| "skip_initial_inference_steps": fp_skip_initial_steps, | |
| "skip_final_inference_steps": fp_skip_final_steps, | |
| } | |
| video_path, tensor_path, final_seed = ltx_aduc_pipeline.generate_low_resolution( | |
| prompt_list=prompt_list, | |
| negative_prompt=neg_prompt, | |
| height=height, width=width, duration=duration, | |
| initial_media_items=initial_media_list, | |
| ltx_configs_override=ltx_configs | |
| ) | |
| if not video_path: raise RuntimeError("Backend failed to return a valid video path.") | |
| new_state = {"low_res_video": video_path, "low_res_latents": tensor_path, "used_seed": final_seed} | |
| logging.info(f"[UI] Base video generation successful. Seed used: {final_seed}, Path: {video_path}") | |
| return video_path, new_state, gr.update(visible=True) | |
| except Exception as e: | |
| error_message = f"❌ An error occurred during base generation:\n{e}" | |
| logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True) | |
| raise gr.Error(error_message) | |
| def run_ltx_refinement(state: dict, prompt: str, neg_prompt: str, progress=gr.Progress(track_tqdm=True)) -> tuple: | |
| """Wrapper para o refinamento de textura LTX.""" | |
| if not state or not state.get("low_res_latents"): | |
| raise gr.Error("Error: Please generate a base video in Step 1 before refining.") | |
| try: | |
| logging.info(f"[UI] Requesting LTX refinement for latents: {state.get('low_res_latents')}") | |
| video_path, tensor_path = ltx_aduc_pipeline.generate_upscale_denoise( | |
| latents_path=state["low_res_latents"], | |
| prompt=prompt, negative_prompt=neg_prompt, | |
| seed=state["used_seed"] | |
| ) | |
| state["refined_video_ltx"] = video_path | |
| state["refined_latents_ltx"] = tensor_path | |
| logging.info(f"[UI] LTX refinement successful. Path: {video_path}") | |
| return video_path, state | |
| except Exception as e: | |
| error_message = f"❌ An error occurred during LTX Refinement:\n{e}" | |
| logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True) | |
| raise gr.Error(error_message) | |
| def run_seedvr_upscaling(state: dict, seed: int, resolution: int, batch_size: int, fps: int, progress=gr.Progress(track_tqdm=True)) -> tuple: | |
| """Wrapper para o upscale de resolução SeedVR.""" | |
| if not state or not state.get("low_res_video"): | |
| raise gr.Error("Error: Please generate a base video in Step 1 before upscaling.") | |
| if not seed_aduc_pipeline: | |
| raise gr.Error("Error: The SeedVR upscaling server is not available.") | |
| try: | |
| logging.info(f"[UI] Requesting SeedVR upscaling for video: {state.get('low_res_video')}") | |
| def progress_wrapper(p, desc=""): progress(p, desc=desc) | |
| output_filepath = seed_aduc_pipeline.run_inference( | |
| file_path=state["low_res_video"], seed=int(seed), resolution=int(resolution), | |
| batch_size=int(batch_size), fps=float(fps), progress=progress_wrapper | |
| ) | |
| status_message = f"✅ Upscaling complete!\nSaved to: {output_filepath}" | |
| logging.info(f"[UI] SeedVR upscaling successful. Path: {output_filepath}") | |
| return gr.update(value=output_filepath), gr.update(value=status_message) | |
| except Exception as e: | |
| error_message = f"❌ An error occurred during SeedVR Upscaling:\n{e}" | |
| logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True) | |
| return None, gr.update(value=error_message) | |
| # ============================================================================== | |
| # --- CONSTRUÇÃO DA INTERFACE GRADIO --- | |
| # ============================================================================== | |
| def build_ui(): | |
| """Constrói a interface completa do Gradio.""" | |
| with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo")) as demo: | |
| app_state = gr.State(value={"low_res_video": None, "low_res_latents": None, "used_seed": None}) | |
| ui_components = {} | |
| gr.Markdown("🚀 ADUC-SDR Video Suite - Infinite LTX & SeedVR Workflow", elem_id="main-title") | |
| with gr.Row(): | |
| with gr.Column(scale=1): _build_generation_controls(ui_components) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### Etapa 1: Vídeo Base Gerado") | |
| ui_components['low_res_video_output'] = gr.Video(label="O resultado aparecerá aqui", interactive=True) | |
| ui_components['used_seed_display'] = gr.Textbox(label="Seed Utilizada", visible=False, interactive=False) | |
| _build_postprod_controls(ui_components) | |
| _register_event_handlers(app_state, ui_components) | |
| return demo | |
| def _build_generation_controls(ui: dict): | |
| """Constrói os componentes da UI, sem seleção de modo.""" | |
| gr.Markdown("### Configurações de Geração") | |
| ui['prompt'] = gr.Textbox(label="Prompt(s)", info="Para múltiplas cenas escreva um linha por prompt.", value="", lines=6) | |
| ui['neg_prompt'] = gr.Textbox(label="Negative Prompt", visible=False, value="blurry, low quality, bad anatomy, deformed", lines=2) | |
| ui['start_image'] = gr.Image(label="Imagem de Início (Opcional)", type="pil", sources=["upload"]) | |
| with gr.Accordion("Parâmetros Principais", open=True): | |
| ui['duration'] = gr.Slider(label="Duração Total (s)", value=4, step=1, minimum=1, maximum=30) | |
| with gr.Row(): | |
| ui['height'] = gr.Slider(label="Height", value=432, step=128, minimum=256, maximum=1024) | |
| ui['width'] = gr.Slider(label="Width", value=768, step=128, minimum=256, maximum=1024) | |
| with gr.Accordion("Opções Avançadas LTX", open=False): | |
| gr.Markdown("#### Configurações de Passos de Inferência") | |
| gr.Markdown("*Deixe o valor padrão (ex: 20) ou 0 para usar a configuração do `config.yaml`.*") | |
| ui['fp_num_inference_steps'] = gr.Slider(label="Número de Passos", minimum=0, maximum=100, step=1, value=20, info="Padrão LTX: 20.") | |
| ui['fp_skip_initial_steps'] = gr.Slider(label="Pular Passos Iniciais", minimum=0, maximum=100, step=1, value=0) | |
| ui['fp_skip_final_steps'] = gr.Slider(label="Pular Passos Finais", minimum=0, maximum=100, step=1, value=0) | |
| ui['generate_low_btn'] = gr.Button("1. Gerar Vídeo Base", variant="primary") | |
| def _build_postprod_controls(ui: dict): | |
| """Constrói os componentes da UI para a Etapa 2: Pós-Produção.""" | |
| with gr.Group(visible=False) as ui['post_prod_group']: | |
| gr.Markdown("--- \n## Etapa 2: Pós-Produção") | |
| with gr.Tabs(): | |
| with gr.TabItem("🚀 Upscaler de Textura (LTX)"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("Usa o prompt e a semente originais para refinar o vídeo, adicionando detalhes e texturas de alta qualidade.") | |
| ui['ltx_refine_btn'] = gr.Button("2. Aplicar Refinamento LTX", variant="primary") | |
| with gr.Column(scale=1): | |
| ui['ltx_refined_video_output'] = gr.Video(label="Vídeo com Textura Refinada", interactive=False) | |
| with gr.TabItem("✨ Upscaler de Resolução (SeedVR)"): | |
| is_seedvr_available = seed_aduc_pipeline is not None | |
| if not is_seedvr_available: | |
| gr.Markdown("🔴 **AVISO: O serviço SeedVR não está disponível.**") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| ui['seedvr_seed'] = gr.Slider(minimum=0, maximum=999999, value=42, step=1, label="Seed") | |
| ui['seedvr_resolution'] = gr.Slider(minimum=720, maximum=2160, value=1080, step=8, label="Resolução Vertical Alvo") | |
| ui['seedvr_batch_size'] = gr.Slider(minimum=1, maximum=16, value=4, step=1, label="Batch Size por GPU") | |
| ui['seedvr_fps'] = gr.Number(label="FPS de Saída (0 = original)", value=0) | |
| ui['run_seedvr_btn'] = gr.Button("2. Iniciar Upscaling SeedVR", variant="primary", interactive=is_seedvr_available) | |
| with gr.Column(scale=1): | |
| ui['seedvr_video_output'] = gr.Video(label="Vídeo com Upscale SeedVR", interactive=False) | |
| ui['seedvr_status_box'] = gr.Textbox(label="Status do SeedVR", value="Aguardando...", lines=3, interactive=False) | |
| def _register_event_handlers(app_state: gr.State, ui: dict): | |
| """Registra todos os manipuladores de eventos do Gradio.""" | |
| def update_seed_display(state): | |
| return state.get("used_seed", "N/A") | |
| gen_inputs = [ | |
| ui['prompt'], ui['neg_prompt'], ui['start_image'], | |
| ui['height'], ui['width'], ui['duration'], | |
| ui['fp_num_inference_steps'], ui['fp_skip_initial_steps'], ui['fp_skip_final_steps'], | |
| ] | |
| gen_outputs = [ui['low_res_video_output'], app_state, ui['post_prod_group']] | |
| (ui['generate_low_btn'].click(fn=run_generate_base_video, inputs=gen_inputs, outputs=gen_outputs) | |
| .then(fn=update_seed_display, inputs=[app_state], outputs=[ui['used_seed_display']])) | |
| refine_inputs = [app_state, ui['prompt'], ui['neg_prompt']] | |
| refine_outputs = [ui['ltx_refined_video_output'], app_state] | |
| ui['ltx_refine_btn'].click(fn=run_ltx_refinement, inputs=refine_inputs, outputs=refine_outputs) | |
| if 'run_seedvr_btn' in ui and ui['run_seedvr_btn'].interactive: | |
| seedvr_inputs = [app_state, ui['seedvr_seed'], ui['seedvr_resolution'], ui['seedvr_batch_size'], ui['seedvr_fps']] | |
| seedvr_outputs = [ui['seedvr_video_output'], ui['seedvr_status_box']] | |
| ui['run_seedvr_btn'].click(fn=run_seedvr_upscaling, inputs=seedvr_inputs, outputs=seedvr_outputs) | |
| # ============================================================================== | |
| # --- PONTO DE ENTRADA DA APLICAÇÃO --- | |
| # ============================================================================== | |
| if __name__ == "__main__": | |
| demo = build_ui() | |
| demo.queue().launch(server_name="0.0.0.0", server_port=7860, debug=True, show_error=True) |