Aduc-sdr commited on
Commit
501fd32
·
verified ·
1 Parent(s): a08cec6

Upload ai_studio_code (5).py

Browse files
Files changed (1) hide show
  1. ai_studio_code (5).py +270 -0
ai_studio_code (5).py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ #
3
+ # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
+ #
5
+ # Version: 2.3.0
6
+ #
7
+ # This version introduces a new "Cinematic Dark Mode" theme and custom CSS
8
+ # to provide a more professional and visually appealing user interface that
9
+ # matches the sophistication of the underlying ADUC-SDR framework.
10
+
11
+ import gradio as gr
12
+ import yaml
13
+ import logging
14
+ import os
15
+ import sys
16
+ import shutil
17
+ import time
18
+ import json
19
+
20
+ from aduc_orchestrator import AducOrchestrator
21
+
22
+ # --- CUSTOM UI THEME DEFINITION ---
23
+ # This theme provides a professional, dark-mode look and feel, suitable for creative tools.
24
+ cinematic_theme = gr.themes.Base(
25
+ primary_hue=gr.themes.colors.indigo,
26
+ secondary_hue=gr.themes.colors.purple,
27
+ neutral_hue=gr.themes.colors.slate,
28
+ font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"),
29
+ ).set(
30
+ # -- Colors --
31
+ body_background_fill="#111827", # Slate 900
32
+ body_text_color="#E5E7EB", # Slate 200
33
+
34
+ # -- Buttons --
35
+ button_primary_background_fill="linear-gradient(90deg, #4F46E5, #8B5CF6)", # Gradient Indigo -> Purple
36
+ button_primary_text_color="#FFFFFF",
37
+ button_secondary_background_fill="#374151", # Slate 700
38
+ button_secondary_border_color="#4B5563",
39
+ button_secondary_text_color="#E5E7EB",
40
+
41
+ # -- Blocks and Containers --
42
+ block_background_fill="#1F2937", # Slate 800
43
+ block_border_width="1px",
44
+ block_border_color="#374151", # Slate 700
45
+ block_label_background_fill="#374151",
46
+ block_label_text_color="#E5E7EB",
47
+ block_title_text_color="#FFFFFF",
48
+
49
+ # -- Input Fields --
50
+ input_background_fill="#374151",
51
+ input_border_color="#4B5563",
52
+ input_placeholder_color="#9CA3AF",
53
+
54
+ # -- Spacing and Radius --
55
+ radius_size="lg",
56
+ spacing_size="lg",
57
+ layout_gap="lg",
58
+ )
59
+
60
+ # --- 1. CONFIGURATION AND INITIALIZATION ---
61
+ LOG_FILE_PATH = "aduc_log.txt"
62
+ if os.path.exists(LOG_FILE_PATH):
63
+ os.remove(LOG_FILE_PATH)
64
+
65
+ log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s'
66
+ root_logger = logging.getLogger()
67
+ root_logger.setLevel(logging.INFO)
68
+ root_logger.handlers.clear()
69
+ stream_handler = logging.StreamHandler(sys.stdout)
70
+ stream_handler.setLevel(logging.INFO)
71
+ stream_handler.setFormatter(logging.Formatter(log_format))
72
+ root_logger.addHandler(stream_handler)
73
+ file_handler = logging.FileHandler(LOG_FILE_PATH, mode='w', encoding='utf-8')
74
+ file_handler.setLevel(logging.INFO)
75
+ file_handler.setFormatter(logging.Formatter(log_format))
76
+ root_logger.addHandler(file_handler)
77
+ logger = logging.getLogger(__name__)
78
+
79
+ i18n = {}
80
+ try:
81
+ with open("i18n.json", "r", encoding="utf-8") as f: i18n = json.load(f)
82
+ except Exception as e:
83
+ logger.error(f"Error loading i18n.json: {e}")
84
+ i18n = {"pt": {}, "en": {}, "zh": {}}
85
+ if 'pt' not in i18n: i18n['pt'] = i18n.get('en', {})
86
+ if 'en' not in i18n: i18n['en'] = {}
87
+ if 'zh' not in i18n: i18n['zh'] = i18n.get('en', {})
88
+
89
+ try:
90
+ with open("config.yaml", 'r') as f: config = yaml.safe_load(f)
91
+ WORKSPACE_DIR = config['application']['workspace_dir']
92
+ aduc = AducOrchestrator(workspace_dir=WORKSPACE_DIR)
93
+ logger.info("ADUC Orchestrator and Specialists initialized successfully.")
94
+ except Exception as e:
95
+ logger.error(f"CRITICAL ERROR during initialization: {e}", exc_info=True)
96
+ exit()
97
+
98
+ # --- 2. UI WRAPPER FUNCTIONS ---
99
+ def run_pre_production_wrapper(prompt, num_keyframes, ref_files, resolution_str, duration_per_fragment, progress=gr.Progress()):
100
+ if not ref_files: raise gr.Error("Please provide at least one reference image.")
101
+ ref_paths = [aduc.process_image_for_story(f.name, 480, f"ref_processed_{i}.png") for i, f in enumerate(ref_files)]
102
+ progress(0.1, desc="Generating storyboard...")
103
+ storyboard, initial_ref_path, _ = aduc.task_generate_storyboard(prompt, num_keyframes, ref_paths, progress)
104
+ resolution = int(resolution_str.split('x')[0])
105
+ def cb_factory(scene_index, total_scenes):
106
+ start_time = time.time()
107
+ total_steps = 12
108
+ def callback(pipe_self, step, timestep, callback_kwargs):
109
+ elapsed, current_step = time.time() - start_time, step + 1
110
+ if current_step > 0:
111
+ it_per_sec = current_step / elapsed
112
+ eta = (total_steps - current_step) / it_per_sec if it_per_sec > 0 else 0
113
+ desc = f"Keyframe {scene_index}/{total_scenes}: {int((current_step/total_steps)*100)}% | {current_step}/{total_steps} [{elapsed:.0f}s<{eta:.0f}s, {it_per_sec:.2f}it/s]"
114
+ base_progress = 0.2 + (scene_index - 1) * (0.8 / total_scenes)
115
+ step_progress = (current_step / total_steps) * (0.8 / total_scenes)
116
+ progress(base_progress + step_progress, desc=desc)
117
+ return {}
118
+ return callback
119
+ final_keyframes = aduc.task_generate_keyframes(storyboard, initial_ref_path, prompt, resolution, cb_factory)
120
+ return gr.update(value=storyboard), gr.update(value=final_keyframes), gr.update(visible=True, open=True)
121
+
122
+ def run_pre_production_photo_wrapper(prompt, num_keyframes, ref_files, progress=gr.Progress()):
123
+ if not ref_files or len(ref_files) < 2: raise gr.Error("Photographer Mode requires at least 2 images: one base and one for the scene pool.")
124
+ base_ref_paths = [aduc.process_image_for_story(ref_files[0].name, 480, "base_ref_processed_0.png")]
125
+ pool_ref_paths = [aduc.process_image_for_story(f.name, 480, f"pool_ref_{i+1}.png") for i, f in enumerate(ref_files[1:])]
126
+ progress(0.1, desc="Generating storyboard...")
127
+ storyboard, _, _ = aduc.task_generate_storyboard(prompt, num_keyframes, base_ref_paths, progress)
128
+ progress(0.5, desc="AI Photographer is selecting the best scenes...")
129
+ selected_keyframes = aduc.task_select_keyframes(storyboard, base_ref_paths, pool_ref_paths)
130
+ return gr.update(value=storyboard), gr.update(value=selected_keyframes), gr.update(visible=True, open=True)
131
+
132
+ def run_original_production_wrapper(keyframes, prompt, duration, trim_percent, handler_strength, dest_strength, guidance_scale, stg_scale, steps, resolution, progress=gr.Progress()):
133
+ yield {original_video_output: gr.update(value=None, visible=True, label="🎬 Producing your original master video... Please wait."), final_video_output: gr.update(value=None, visible=True, label="🎬 Production in progress..."), step4_accordion: gr.update(visible=False)}
134
+ res = int(resolution.split('x')[0])
135
+ result = aduc.task_produce_original_movie(keyframes, prompt, duration, int(trim_percent), handler_strength, dest_strength, guidance_scale, stg_scale, int(steps), res, use_continuity_director=True, progress=progress)
136
+ yield {original_video_output: gr.update(value=result["final_path"], label="✅ Original Master Video"), final_video_output: gr.update(value=result["final_path"], label="Final Film (Result of the Last Step)"), step4_accordion: gr.update(visible=True, open=True), original_latents_paths_state: result["latent_paths"], original_video_path_state: result["final_path"], current_source_video_state: result["final_path"]}
137
+
138
+ def run_upscaler_wrapper(latent_paths, chunk_size, progress=gr.Progress()):
139
+ if not latent_paths: raise gr.Error("Cannot run Upscaler. No original latents found. Please complete Step 3 first.")
140
+ yield {upscaler_video_output: gr.update(value=None, visible=True, label="Upscaling latents and decoding video..."), final_video_output: gr.update(label="Post-Production in progress: Latent Upscaling...")}
141
+ final_path = None
142
+ for update in aduc.task_run_latent_upscaler(latent_paths, int(chunk_size), progress=progress): final_path = update['final_path']
143
+ yield {upscaler_video_output: gr.update(value=final_path, label="✅ Latent Upscale Complete"), final_video_output: gr.update(value=final_path), upscaled_video_path_state: final_path, current_source_video_state: final_path}
144
+
145
+ def run_hd_wrapper(source_video, model_version, steps, global_prompt, progress=gr.Progress()):
146
+ if not source_video: raise gr.Error("Cannot run HD Mastering. No source video found. Please complete a previous step first.")
147
+ yield {hd_video_output: gr.update(value=None, visible=True, label="Applying HD mastering... This may take a while."), final_video_output: gr.update(label="Post-Production in progress: HD Mastering...")}
148
+ final_path = None
149
+ for update in aduc.task_run_hd_mastering(source_video, model_version, int(steps), global_prompt, progress=progress): final_path = update['final_path']
150
+ yield {hd_video_output: gr.update(value=final_path, label="✅ HD Mastering Complete"), final_video_output: gr.update(value=final_path), hd_video_path_state: final_path, current_source_video_state: final_path}
151
+
152
+ def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()):
153
+ if not source_video: raise gr.Error("Cannot run Audio Generation. No source video found. Please complete a previous step first.")
154
+ yield {audio_video_output: gr.update(value=None, visible=True, label="Generating audio and muxing..."), final_video_output: gr.update(label="Post-Production in progress: Audio Generation...")}
155
+ final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt
156
+ final_path = None
157
+ for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress=progress): final_path = update['final_path']
158
+ yield {audio_video_output: gr.update(value=final_path, label="✅ Audio Generation Complete"), final_video_output: gr.update(value=final_path)}
159
+
160
+ def get_log_content():
161
+ try:
162
+ with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: return f.read()
163
+ except FileNotFoundError:
164
+ return "Log file not yet created. Start a generation."
165
+
166
+ def update_ui_language(lang_emoji):
167
+ lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
168
+ lang_code = lang_code_map.get(lang_emoji, "en")
169
+ lang_map = i18n.get(lang_code, i18n.get('en', {}))
170
+ # ... This dictionary mapping will be long, so it's defined once in the main block
171
+
172
+ # --- 3. GRADIO UI DEFINITION ---
173
+ with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
174
+ default_lang = i18n.get('pt', {})
175
+
176
+ original_latents_paths_state = gr.State(value=None)
177
+ original_video_path_state = gr.State(value=None)
178
+ upscaled_video_path_state = gr.State(value=None)
179
+ hd_video_path_state = gr.State(value=None)
180
+ current_source_video_state = gr.State(value=None)
181
+
182
+ title_md = gr.Markdown(f"<h1>{default_lang.get('app_title')}</h1>")
183
+ subtitle_md = gr.Markdown(f"<p>{default_lang.get('app_subtitle')}</p>")
184
+ with gr.Row():
185
+ lang_selector = gr.Radio(["🇧🇷", "🇺🇸", "🇨🇳"], value="🇧🇷", label=default_lang.get('lang_selector_label'))
186
+ resolution_selector = gr.Radio(["480x480", "720x720", "960x960"], value="480x480", label="Base Resolution")
187
+
188
+ with gr.Accordion(default_lang.get('step1_accordion'), open=True) as step1_accordion:
189
+ prompt_input = gr.Textbox(label=default_lang.get('prompt_label'), value="A majestic lion walks across the savanna, sits down, and then roars at the setting sun.")
190
+ ref_image_input = gr.File(label=default_lang.get('ref_images_label'), file_count="multiple", file_types=["image"])
191
+ with gr.Row():
192
+ num_keyframes_slider = gr.Slider(minimum=3, maximum=42, value=5, step=1, label=default_lang.get('keyframes_label'))
193
+ duration_per_fragment_slider = gr.Slider(label=default_lang.get('duration_label'), info=default_lang.get('duration_info'), minimum=2.0, maximum=10.0, value=4.0, step=0.1)
194
+ with gr.Row():
195
+ storyboard_and_keyframes_button = gr.Button(default_lang.get('storyboard_and_keyframes_button'), variant="primary")
196
+ storyboard_from_photos_button = gr.Button(default_lang.get('storyboard_from_photos_button'), variant="secondary")
197
+ step1_mode_b_info_md = gr.Markdown(f"*{default_lang.get('step1_mode_b_info')}*")
198
+ storyboard_output = gr.JSON(label=default_lang.get('storyboard_output_label'))
199
+ keyframe_gallery = gr.Gallery(label=default_lang.get('keyframes_gallery_label'), visible=True, object_fit="contain", height="auto", type="filepath")
200
+
201
+ with gr.Accordion(default_lang.get('step3_accordion'), open=False, visible=False) as step3_accordion:
202
+ step3_description_md = gr.Markdown(default_lang.get('step3_description'))
203
+ with gr.Accordion(default_lang.get('ltx_advanced_options'), open=False) as ltx_advanced_options_accordion:
204
+ with gr.Accordion(default_lang.get('causality_controls_title'), open=True) as causality_accordion:
205
+ trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label=default_lang.get('trim_percent_label'), info=default_lang.get('trim_percent_info'))
206
+ with gr.Row():
207
+ forca_guia_slider = gr.Slider(label=default_lang.get('forca_guia_label'), minimum=0.0, maximum=1.0, value=0.5, step=0.05, info=default_lang.get('forca_guia_info'))
208
+ convergencia_destino_slider = gr.Slider(label=default_lang.get('convergencia_final_label'), minimum=0.0, maximum=1.0, value=0.75, step=0.05, info=default_lang.get('convergencia_final_info'))
209
+ with gr.Accordion(default_lang.get('ltx_pipeline_options'), open=True) as ltx_pipeline_accordion:
210
+ with gr.Row():
211
+ guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label=default_lang.get('guidance_scale_label'), info=default_lang.get('guidance_scale_info'))
212
+ stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label=default_lang.get('stg_scale_label'), info=default_lang.get('stg_scale_info'))
213
+ inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label=default_lang.get('steps_label'), info=default_lang.get('steps_info'))
214
+ produce_original_button = gr.Button(default_lang.get('produce_original_button'), variant="primary")
215
+ original_video_output = gr.Video(label="Original Master Video", visible=False, interactive=False)
216
+
217
+ with gr.Accordion(default_lang.get('step4_accordion'), open=False, visible=False) as step4_accordion:
218
+ step4_description_md = gr.Markdown(default_lang.get('step4_description'))
219
+ with gr.Accordion(default_lang.get('sub_step_a_upscaler'), open=True) as sub_step_a_accordion:
220
+ upscaler_description_md = gr.Markdown(default_lang.get('upscaler_description'))
221
+ with gr.Accordion(default_lang.get('upscaler_options'), open=False) as upscaler_options_accordion:
222
+ upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label=default_lang.get('upscaler_chunk_size_label'), info=default_lang.get('upscaler_chunk_size_info'))
223
+ run_upscaler_button = gr.Button(default_lang.get('run_upscaler_button'), variant="secondary")
224
+ upscaler_video_output = gr.Video(label="Upscaled Video", visible=False, interactive=False)
225
+ with gr.Accordion(default_lang.get('sub_step_b_hd'), open=True) as sub_step_b_accordion:
226
+ hd_description_md = gr.Markdown(default_lang.get('hd_description'))
227
+ with gr.Accordion(default_lang.get('hd_options'), open=False) as hd_options_accordion:
228
+ hd_model_radio = gr.Radio(["3B", "7B"], value="7B", label=default_lang.get('hd_model_label'))
229
+ hd_steps_slider = gr.Slider(minimum=20, maximum=150, value=100, step=5, label=default_lang.get('hd_steps_label'), info=default_lang.get('hd_steps_info'))
230
+ run_hd_button = gr.Button(default_lang.get('run_hd_button'), variant="secondary")
231
+ hd_video_output = gr.Video(label="HD Mastered Video", visible=False, interactive=False)
232
+ with gr.Accordion(default_lang.get('sub_step_c_audio'), open=True) as sub_step_c_accordion:
233
+ audio_description_md = gr.Markdown(default_lang.get('audio_description'))
234
+ with gr.Accordion(default_lang.get('audio_options'), open=False) as audio_options_accordion:
235
+ audio_prompt_input = gr.Textbox(label=default_lang.get('audio_prompt_label'), info=default_lang.get('audio_prompt_info'), lines=3)
236
+ run_audio_button = gr.Button(default_lang.get('run_audio_button'), variant="secondary")
237
+ audio_video_output = gr.Video(label="Video with Audio", visible=False, interactive=False)
238
+
239
+ final_video_output = gr.Video(label=default_lang.get('final_video_label'), visible=False, interactive=False)
240
+ with gr.Accordion(default_lang.get('log_accordion_label'), open=False) as log_accordion:
241
+ log_display = gr.Textbox(label=default_lang.get('log_display_label'), lines=20, interactive=False, autoscroll=True)
242
+ update_log_button = gr.Button(default_lang.get('update_log_button'))
243
+
244
+ # --- 4. UI EVENT CONNECTIONS ---
245
+ all_ui_components = [title_md, subtitle_md, lang_selector, step1_accordion, prompt_input, ref_image_input, num_keyframes_slider, duration_per_fragment_slider, storyboard_and_keyframes_button, storyboard_from_photos_button, step1_mode_b_info_md, storyboard_output, keyframe_gallery, step3_accordion, step3_description_md, produce_original_button, ltx_advanced_options_accordion, causality_accordion, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, ltx_pipeline_accordion, guidance_scale_slider, stg_scale_slider, inference_steps_slider, step4_accordion, step4_description_md, sub_step_a_accordion, upscaler_description_md, upscaler_options_accordion, upscaler_chunk_size_slider, run_upscaler_button, sub_step_b_accordion, hd_description_md, hd_options_accordion, hd_model_radio, hd_steps_slider, run_hd_button, sub_step_c_accordion, audio_description_md, audio_options_accordion, audio_prompt_input, run_audio_button, final_video_output, log_accordion, log_display, update_log_button]
246
+ def create_lang_update_fn():
247
+ def update_lang(lang_emoji):
248
+ lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
249
+ lang_code = lang_code_map.get(lang_emoji, "en")
250
+ lang_map = i18n.get(lang_code, i18n.get('en', {}))
251
+ return [gr.update(value=f"<h1>{lang_map.get('app_title')}</h1>"),gr.update(value=f"<p>{lang_map.get('app_subtitle')}</p>"),gr.update(label=lang_map.get('lang_selector_label')),gr.update(label=lang_map.get('step1_accordion')),gr.update(label=lang_map.get('prompt_label')),gr.update(label=lang_map.get('ref_images_label')),gr.update(label=lang_map.get('keyframes_label')),gr.update(label=lang_map.get('duration_label'), info=lang_map.get('duration_info')),gr.update(value=lang_map.get('storyboard_and_keyframes_button')),gr.update(value=lang_map.get('storyboard_from_photos_button')),gr.update(value=f"*{lang_map.get('step1_mode_b_info')}*"),gr.update(label=lang_map.get('storyboard_output_label')),gr.update(label=lang_map.get('keyframes_gallery_label')),gr.update(label=lang_map.get('step3_accordion')),gr.update(value=lang_map.get('step3_description')),gr.update(value=lang_map.get('produce_original_button')),gr.update(label=lang_map.get('ltx_advanced_options')),gr.update(label=lang_map.get('causality_controls_title')),gr.update(label=lang_map.get('trim_percent_label'), info=lang_map.get('trim_percent_info')),gr.update(label=lang_map.get('forca_guia_label'), info=lang_map.get('forca_guia_info')),gr.update(label=lang_map.get('convergencia_final_label'), info=lang_map.get('convergencia_final_info')),gr.update(label=lang_map.get('ltx_pipeline_options')),gr.update(label=lang_map.get('guidance_scale_label'), info=lang_map.get('guidance_scale_info')),gr.update(label=lang_map.get('stg_scale_label'), info=lang_map.get('stg_scale_info')),gr.update(label=lang_map.get('steps_label'), info=lang_map.get('steps_info')),gr.update(label=lang_map.get('step4_accordion')),gr.update(value=lang_map.get('step4_description')),gr.update(label=lang_map.get('sub_step_a_upscaler')),gr.update(value=lang_map.get('upscaler_description')),gr.update(label=lang_map.get('upscaler_options')),gr.update(label=lang_map.get('upscaler_chunk_size_label'), info=lang_map.get('upscaler_chunk_size_info')),gr.update(value=lang_map.get('run_upscaler_button')),gr.update(label=lang_map.get('sub_step_b_hd')),gr.update(value=lang_map.get('hd_description')),gr.update(label=lang_map.get('hd_options')),gr.update(label=lang_map.get('hd_model_label')),gr.update(label=lang_map.get('hd_steps_label'), info=lang_map.get('hd_steps_info')),gr.update(value=lang_map.get('run_hd_button')),gr.update(label=lang_map.get('sub_step_c_audio')),gr.update(value=lang_map.get('audio_description')),gr.update(label=lang_map.get('audio_options')),gr.update(label=lang_map.get('audio_prompt_label'), info=lang_map.get('audio_prompt_info')),gr.update(value=lang_map.get('run_audio_button')),gr.update(label=lang_map.get('final_video_label')),gr.update(label=lang_map.get('log_accordion_label')),gr.update(label=lang_map.get('log_display_label')),gr.update(value=lang_map.get('update_log_button'))]
252
+ return update_lang
253
+ lang_selector.change(fn=create_lang_update_fn(), inputs=lang_selector, outputs=all_ui_components)
254
+
255
+ storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
256
+ storyboard_from_photos_button.click(fn=run_pre_production_photo_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
257
+ produce_original_button.click(fn=run_original_production_wrapper, inputs=[keyframe_gallery, prompt_input, duration_per_fragment_slider, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, guidance_scale_slider, stg_scale_slider, inference_steps_slider, resolution_selector], outputs=[original_video_output, final_video_output, step4_accordion, original_latents_paths_state, original_video_path_state, current_source_video_state])
258
+ run_upscaler_button.click(fn=run_upscaler_wrapper, inputs=[original_latents_paths_state, upscaler_chunk_size_slider], outputs=[upscaler_video_output, final_video_output, upscaled_video_path_state, current_source_video_state])
259
+ run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_model_radio, hd_steps_slider, prompt_input], outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state])
260
+ run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[audio_video_output, final_video_output])
261
+ update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display])
262
+
263
+ # --- 5. APPLICATION LAUNCH ---
264
+ if __name__ == "__main__":
265
+ if os.path.exists(WORKSPACE_DIR):
266
+ logger.info(f"Clearing previous workspace at: {WORKSPACE_DIR}")
267
+ shutil.rmtree(WORKSPACE_DIR)
268
+ os.makedirs(WORKSPACE_DIR)
269
+ logger.info(f"Application started. Launching Gradio interface...")
270
+ demo.queue().launch()