|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
import yaml |
|
|
import logging |
|
|
import os |
|
|
import sys |
|
|
import shutil |
|
|
import time |
|
|
import json |
|
|
|
|
|
|
|
|
import aduc_framework |
|
|
from aduc_framework.types import PreProductionParams, ProductionParams |
|
|
|
|
|
|
|
|
cinematic_theme = gr.themes.Base( |
|
|
primary_hue=gr.themes.colors.indigo, |
|
|
secondary_hue=gr.themes.colors.purple, |
|
|
neutral_hue=gr.themes.colors.slate, |
|
|
font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"), |
|
|
).set( |
|
|
body_background_fill="#111827", body_text_color="#E5E7EB", |
|
|
button_primary_background_fill="linear-gradient(90deg, #4F46E5, #8B5CF6)", |
|
|
button_primary_text_color="#FFFFFF", button_secondary_background_fill="#374151", |
|
|
button_secondary_border_color="#4B5563", button_secondary_text_color="#E5E7EB", |
|
|
block_background_fill="#1F2937", block_border_width="1px", block_border_color="#374151", |
|
|
block_label_background_fill="#374151", block_label_text_color="#E5E7EB", |
|
|
block_title_text_color="#FFFFFF", input_background_fill="#374151", |
|
|
input_border_color="#4B5563", input_placeholder_color="#9CA3AF", |
|
|
) |
|
|
|
|
|
LOG_FILE_PATH = "aduc_log.txt" |
|
|
if os.path.exists(LOG_FILE_PATH): |
|
|
os.remove(LOG_FILE_PATH) |
|
|
|
|
|
log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s' |
|
|
root_logger = logging.getLogger() |
|
|
root_logger.setLevel(logging.INFO) |
|
|
root_logger.handlers.clear() |
|
|
stream_handler = logging.StreamHandler(sys.stdout) |
|
|
stream_handler.setFormatter(logging.Formatter(log_format)) |
|
|
root_logger.addHandler(stream_handler) |
|
|
file_handler = logging.FileHandler(LOG_FILE_PATH, mode='w', encoding='utf-8') |
|
|
file_handler.setFormatter(logging.Formatter(log_format)) |
|
|
root_logger.addHandler(file_handler) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
try: |
|
|
with open("config.yaml", 'r') as f: config = yaml.safe_load(f) |
|
|
WORKSPACE_DIR = config['application']['workspace_dir'] |
|
|
aduc = aduc_framework.create_aduc_instance(workspace_dir=WORKSPACE_DIR) |
|
|
logger.info("Interface Gradio inicializada e conectada ao Aduc Framework.") |
|
|
except Exception as e: |
|
|
logger.critical(f"ERRO CRÍTICO durante a inicialização: {e}", exc_info=True) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# ERRO CRÍTICO NA INICIALIZAÇÃO") |
|
|
gr.Markdown("Não foi possível iniciar o Aduc Framework. Verifique os logs para mais detalhes.") |
|
|
gr.Textbox(value=str(e), label="Detalhes do Erro", lines=10) |
|
|
demo.launch() |
|
|
exit() |
|
|
|
|
|
|
|
|
|
|
|
def run_pre_production_wrapper(prompt, num_keyframes, ref_files, resolution_str, duration_per_fragment, progress=gr.Progress()): |
|
|
if not ref_files: raise gr.Error("Por favor, forneça pelo menos uma imagem de referência.") |
|
|
ref_paths = [aduc.process_image_for_story(f.name, 480, f"ref_processed_{i}.png") for i, f in enumerate(ref_files)] |
|
|
params = PreProductionParams(prompt=prompt, num_keyframes=int(num_keyframes), ref_paths=ref_paths, resolution=int(resolution_str.split('x')[0]), duration_per_fragment=duration_per_fragment) |
|
|
storyboard, final_keyframes, updated_state = aduc.task_pre_production(params, progress) |
|
|
return updated_state.model_dump(), storyboard, final_keyframes, gr.update(visible=True, open=True) |
|
|
|
|
|
def run_original_production_wrapper(current_state_dict, trim_percent, handler_strength, dest_strength, guidance_scale, stg_scale, steps, progress=gr.Progress()): |
|
|
yield {original_video_output: gr.update(value=None, visible=True, label="🎬 Produzindo seu filme..."), final_video_output: gr.update(value=None, visible=True, label="🎬 Produção em progresso..."), step4_accordion: gr.update(visible=False)} |
|
|
production_params = ProductionParams(trim_percent=int(trim_percent), handler_strength=handler_strength, destination_convergence_strength=dest_strength, guidance_scale=guidance_scale, stg_scale=stg_scale, inference_steps=int(steps)) |
|
|
final_video_path, latent_paths, updated_state = aduc.task_produce_original_movie(params=production_params, progress_callback=progress) |
|
|
updated_state_dict = updated_state.model_dump() |
|
|
yield {original_video_output: gr.update(value=final_video_path, label="✅ Filme Original Master"), final_video_output: gr.update(value=final_video_path), step4_accordion: gr.update(visible=True, open=True), original_latents_paths_state: latent_paths, original_video_path_state: final_video_path, current_source_video_state: final_video_path, generation_state_holder: updated_state_dict, generation_data_output: updated_state_dict} |
|
|
|
|
|
def run_upscaler_wrapper(latent_paths, chunk_size, progress=gr.Progress()): |
|
|
if not latent_paths: raise gr.Error("Não é possível executar o Upscaler. Nenhum latente original encontrado.") |
|
|
yield {upscaler_video_output: gr.update(value=None, visible=True, label="Fazendo upscale dos latentes..."), final_video_output: gr.update(label="Pós-Produção: Upscaler Latente...")} |
|
|
final_path = None |
|
|
for update in aduc.task_run_latent_upscaler(latent_paths, int(chunk_size), progress): |
|
|
if "final_path" in update: final_path = update['final_path'] |
|
|
yield {upscaler_video_output: gr.update(value=final_path, label="✅ Upscale Latente Concluído"), final_video_output: gr.update(value=final_path), upscaled_video_path_state: final_path, current_source_video_state: final_path} |
|
|
|
|
|
def run_hd_wrapper(source_video, steps, global_prompt, progress=gr.Progress()): |
|
|
if not source_video: raise gr.Error("Não é possível executar a Masterização HD.") |
|
|
yield {hd_video_output: gr.update(value=None, visible=True, label="Aplicando masterização HD..."), final_video_output: gr.update(label="Pós-Produção: Masterização HD...")} |
|
|
final_path = None |
|
|
for update in aduc.task_run_hd_mastering(source_video, int(steps), global_prompt, progress): |
|
|
if "final_path" in update: final_path = update['final_path'] |
|
|
yield {hd_video_output: gr.update(value=final_path, label="✅ Masterização HD Concluída"), final_video_output: gr.update(value=final_path), hd_video_path_state: final_path, current_source_video_state: final_path} |
|
|
|
|
|
def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()): |
|
|
if not source_video: raise gr.Error("Não é possível executar a Geração de Áudio.") |
|
|
yield {audio_video_output: gr.update(value=None, visible=True, label="Gerando áudio e unindo..."), final_video_output: gr.update(label="Pós-Produção: Geração de Áudio...")} |
|
|
final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt |
|
|
final_path = None |
|
|
for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress): |
|
|
if "final_path" in update: final_path = update['final_path'] |
|
|
yield {audio_video_output: gr.update(value=final_path, label="✅ Geração de Áudio Concluída"), final_video_output: gr.update(value=final_path)} |
|
|
|
|
|
def get_log_content(): |
|
|
try: |
|
|
with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: return f.read() |
|
|
except FileNotFoundError: return "Arquivo de log ainda não criado." |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=cinematic_theme, css="style.css") as demo: |
|
|
generation_state_holder = gr.State(value={}) |
|
|
original_latents_paths_state = gr.State(value=[]) |
|
|
original_video_path_state = gr.State(value=None) |
|
|
current_source_video_state = gr.State(value=None) |
|
|
upscaled_video_path_state = gr.State(value=None) |
|
|
hd_video_path_state = gr.State(value=None) |
|
|
|
|
|
gr.Markdown("<h1>ADUC-SDR 🎬 - O Diretor de Cinema IA</h1>") |
|
|
gr.Markdown("<p>Crie um filme completo com vídeo e áudio, orquestrado por uma equipe de IAs especialistas.</p>") |
|
|
|
|
|
with gr.Row(): |
|
|
lang_selector = gr.Radio(["🇧🇷", "🇺🇸", "🇨🇳"], value="🇧🇷", label="Idioma / Language") |
|
|
resolution_selector = gr.Radio(["480x480", "720x720", "960x960"], value="480x480", label="Resolução Base") |
|
|
|
|
|
with gr.Accordion("Etapa 1: Roteiro e Cenas-Chave (Pré-Produção)", open=True) as step1_accordion: |
|
|
prompt_input = gr.Textbox(label="Ideia Geral do Filme", value="Um leão majestoso caminha pela savana, senta-se e ruge para o sol poente.") |
|
|
ref_image_input = gr.File(label="Imagens de Referência", file_count="multiple", file_types=["image"]) |
|
|
with gr.Row(): |
|
|
num_keyframes_slider = gr.Slider(minimum=3, maximum=42, value=5, step=1, label="Número de Cenas-Chave") |
|
|
duration_per_fragment_slider = gr.Slider(label="Duração de cada Clipe (s)", info="Duração alvo para cada fragmento de vídeo.", minimum=2.0, maximum=10.0, value=4.0, step=0.1) |
|
|
storyboard_and_keyframes_button = gr.Button("Gerar Roteiro e Keyframes", variant="primary") |
|
|
storyboard_output = gr.JSON(label="Roteiro Gerado (Storyboard)") |
|
|
keyframe_gallery = gr.Gallery(label="Galeria de Cenas-Chave (Keyframes)", visible=True, object_fit="contain", height="auto", type="filepath") |
|
|
|
|
|
with gr.Accordion("Etapa 3: Produção do Vídeo Original", open=False, visible=False) as step3_accordion: |
|
|
trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label="Poda Causal (%)") |
|
|
handler_strength = gr.Slider(label="Força do Déjà-Vu", minimum=0.0, maximum=1.0, value=0.5, step=0.05) |
|
|
dest_strength = gr.Slider(label="Força da Âncora Final", minimum=0.0, maximum=1.0, value=0.75, step=0.05) |
|
|
guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label="Escala de Orientação") |
|
|
stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label="Escala STG") |
|
|
inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label="Passos de Inferência") |
|
|
produce_original_button = gr.Button("🎬 Produzir Vídeo Original", variant="primary") |
|
|
original_video_output = gr.Video(label="Filme Original Master", visible=False, interactive=False) |
|
|
|
|
|
with gr.Accordion("Etapa 4: Pós-Produção (Opcional)", open=False, visible=False) as step4_accordion: |
|
|
gr.Markdown("Aplique efeitos de melhoria ao vídeo mais recente. Cada etapa usa o resultado da anterior como fonte.") |
|
|
with gr.Accordion("A. Upscaler Latente 2x", open=True): |
|
|
upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Fragmentos por Lote") |
|
|
run_upscaler_button = gr.Button("Executar Upscaler Latente", variant="secondary") |
|
|
upscaler_video_output = gr.Video(label="Vídeo com Upscale", visible=False, interactive=False) |
|
|
with gr.Accordion("B. Masterização HD (SeedVR)", open=True): |
|
|
hd_steps_slider = gr.Slider(minimum=20, maximum=150, value=100, step=5, label="Passos de Inferência HD") |
|
|
run_hd_button = gr.Button("Executar Masterização HD (Modelo 3B)", variant="secondary") |
|
|
hd_video_output = gr.Video(label="Vídeo Masterizado em HD", visible=False, interactive=False) |
|
|
with gr.Accordion("C. Geração de Áudio", open=True): |
|
|
audio_prompt_input = gr.Textbox(label="Prompt de Áudio Detalhado (Opcional)", lines=3, placeholder="Descreva os sons, efeitos e música desejados. Se vazio, usará o prompt geral do filme.") |
|
|
run_audio_button = gr.Button("Gerar Áudio", variant="secondary") |
|
|
audio_video_output = gr.Video(label="Vídeo com Áudio", visible=False, interactive=False) |
|
|
|
|
|
with gr.Accordion("🧬 DNA Digital da Geração (JSON)", open=False) as data_accordion: |
|
|
generation_data_output = gr.JSON(label="Estado de Geração Completo") |
|
|
|
|
|
final_video_output = gr.Video(label="Filme Final (Resultado da Última Etapa)", visible=False, interactive=False) |
|
|
|
|
|
with gr.Accordion("📝 Log de Geração (Detalhado)", open=False) as log_accordion: |
|
|
log_display = gr.Textbox(label="Log da Sessão", lines=20, interactive=False, autoscroll=True) |
|
|
update_log_button = gr.Button("Atualizar Log") |
|
|
|
|
|
|
|
|
storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[generation_state_holder, storyboard_output, keyframe_gallery, step3_accordion]) |
|
|
produce_original_button.click(fn=run_original_production_wrapper, inputs=[generation_state_holder, trim_percent_slider, handler_strength, dest_strength, guidance_scale_slider, stg_scale_slider, inference_steps_slider], outputs=[original_video_output, final_video_output, step4_accordion, original_latents_paths_state, original_video_path_state, current_source_video_state, generation_state_holder, generation_data_output]) |
|
|
|
|
|
run_upscaler_button.click(fn=run_upscaler_wrapper, inputs=[original_latents_paths_state, upscaler_chunk_size_slider], outputs=[upscaler_video_output, final_video_output, upscaled_video_path_state, current_source_video_state]) |
|
|
run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_steps_slider, prompt_input], outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state]) |
|
|
run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[audio_video_output, final_video_output]) |
|
|
|
|
|
generation_state_holder.change(fn=lambda state: state, inputs=generation_state_holder, outputs=generation_data_output) |
|
|
update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display]) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
if os.path.exists(WORKSPACE_DIR): |
|
|
shutil.rmtree(WORKSPACE_DIR) |
|
|
os.makedirs(WORKSPACE_DIR) |
|
|
logger.info("Aplicação Gradio iniciada. Lançando interface...") |
|
|
demo.queue().launch() |