Aduc-sdr commited on
Commit
a08cec6
·
verified ·
1 Parent(s): b1e6827

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -427
app.py DELETED
@@ -1,427 +0,0 @@
1
- # app.py
2
- #
3
- # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
- #
5
- # Version: 2.0.2
6
- #
7
- # Contact:
8
- # Carlos Rodrigues dos Santos
9
- # carlex22@gmail.com
10
- #
11
- # Related Repositories and Projects:
12
- # GitHub: https://github.com/carlex22/Aduc-sdr
13
- # YouTube (Results): https://m.youtube.com/channel/UC3EgoJi_Fv7yuDpvfYNtoIQ
14
- #
15
- # This program is free software: you can redistribute it and/or modify
16
- # it under the terms of the GNU Affero General Public License as published by the
17
- # Free Software Foundation, either version 3 of the License, or
18
- # (at your option) any later version.
19
- #
20
- # This program is distributed in the hope that it will be useful,
21
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
22
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23
- # GNU Affero General Public License for more details.
24
- #
25
- # You should have received a copy of the GNU Affero General Public License
26
- # along with this program. If not, see <https://www.gnu.org/licenses/>.
27
- #
28
- # PENDING PATENT NOTICE: The ADUC method and system implemented in this
29
- # software is in the process of being patented. Please see NOTICE.md for details.
30
-
31
- """
32
- This file serves as the main entry point for the ADUC-SDR Gradio user interface.
33
- It orchestrates the multi-step workflow for AI-driven film creation, from
34
- pre-production (storyboarding, keyframing) to production (original video rendering)
35
- and post-production (upscaling, HD mastering, audio generation).
36
-
37
- The UI is structured using Accordion blocks to guide the user through a logical
38
- sequence of operations, while `gr.State` components manage the flow of data
39
- (file paths of generated artifacts) between these independent steps.
40
- """
41
-
42
- import gradio as gr
43
- import yaml
44
- import logging
45
- import os
46
- import sys
47
- import shutil
48
- import time
49
- import json
50
-
51
- from aduc_orchestrator import AducOrchestrator
52
-
53
- # --- 1. CONFIGURATION AND INITIALIZATION ---
54
- # This section sets up logging, loads internationalization strings, and initializes
55
- # the core AducOrchestrator which manages all AI specialist models.
56
-
57
- LOG_FILE_PATH = "aduc_log.txt"
58
- if os.path.exists(LOG_FILE_PATH):
59
- os.remove(LOG_FILE_PATH)
60
-
61
- log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s'
62
- root_logger = logging.getLogger()
63
- root_logger.setLevel(logging.INFO)
64
- root_logger.handlers.clear()
65
-
66
- stream_handler = logging.StreamHandler(sys.stdout)
67
- stream_handler.setLevel(logging.INFO)
68
- stream_handler.setFormatter(logging.Formatter(log_format))
69
- root_logger.addHandler(stream_handler)
70
-
71
- file_handler = logging.FileHandler(LOG_FILE_PATH, mode='w', encoding='utf-8')
72
- file_handler.setLevel(logging.INFO)
73
- file_handler.setFormatter(logging.Formatter(log_format))
74
- root_logger.addHandler(file_handler)
75
-
76
- logger = logging.getLogger(__name__)
77
-
78
- # Load translation strings for the UI
79
- i18n = {}
80
- try:
81
- with open("i18n.json", "r", encoding="utf-8") as f:
82
- i18n = json.load(f)
83
- except Exception as e:
84
- logger.error(f"Error loading i18n.json: {e}")
85
- i18n = {"pt": {}, "en": {}, "zh": {}}
86
-
87
- # Fallback for missing languages
88
- if 'pt' not in i18n: i18n['pt'] = i18n.get('en', {})
89
- if 'en' not in i18n: i18n['en'] = {}
90
- if 'zh' not in i18n: i18n['zh'] = i18n.get('en', {})
91
-
92
- # Initialize the main orchestrator from the configuration file
93
- try:
94
- with open("config.yaml", 'r') as f: config = yaml.safe_load(f)
95
- WORKSPACE_DIR = config['application']['workspace_dir']
96
- aduc = AducOrchestrator(workspace_dir=WORKSPACE_DIR)
97
- logger.info("ADUC Orchestrator and Specialists initialized successfully.")
98
- except Exception as e:
99
- logger.error(f"CRITICAL ERROR during initialization: {e}", exc_info=True)
100
- exit()
101
-
102
- # --- 2. UI WRAPPER FUNCTIONS ---
103
- # These functions act as intermediaries between the Gradio UI components and the
104
- # AducOrchestrator. They handle input validation, progress tracking, and updating
105
- # the UI state after each operation.
106
-
107
- def run_pre_production_wrapper(prompt, num_keyframes, ref_files, resolution_str, duration_per_fragment, progress=gr.Progress()):
108
- if not ref_files:
109
- raise gr.Error("Please provide at least one reference image.")
110
-
111
- ref_paths = [aduc.process_image_for_story(f.name, 480, f"ref_processed_{i}.png") for i, f in enumerate(ref_files)]
112
-
113
- progress(0.1, desc="Generating storyboard...")
114
- storyboard, initial_ref_path, _ = aduc.task_generate_storyboard(prompt, num_keyframes, ref_paths, progress)
115
-
116
- resolution = int(resolution_str.split('x')[0])
117
-
118
- def cb_factory(scene_index, total_scenes):
119
- start_time = time.time()
120
- total_steps = 12
121
- def callback(pipe_self, step, timestep, callback_kwargs):
122
- elapsed = time.time() - start_time
123
- current_step = step + 1
124
- if current_step > 0:
125
- it_per_sec = current_step / elapsed
126
- eta = (total_steps - current_step) / it_per_sec if it_per_sec > 0 else 0
127
- desc = f"Keyframe {scene_index}/{total_scenes}: {int((current_step/total_steps)*100)}% | {current_step}/{total_steps} [{elapsed:.0f}s<{eta:.0f}s, {it_per_sec:.2f}it/s]"
128
- base_progress = 0.2 + (scene_index - 1) * (0.8 / total_scenes)
129
- step_progress = (current_step / total_steps) * (0.8 / total_scenes)
130
- progress(base_progress + step_progress, desc=desc)
131
- return {}
132
- return callback
133
-
134
- final_keyframes = aduc.task_generate_keyframes(storyboard, initial_ref_path, prompt, resolution, cb_factory)
135
-
136
- return gr.update(value=storyboard), gr.update(value=final_keyframes), gr.update(visible=True, open=True)
137
-
138
- def run_pre_production_photo_wrapper(prompt, num_keyframes, ref_files, progress=gr.Progress()):
139
- if not ref_files or len(ref_files) < 2:
140
- raise gr.Error("Photographer Mode requires at least 2 images: one base and one for the scene pool.")
141
-
142
- base_ref_paths = [aduc.process_image_for_story(ref_files[0].name, 480, "base_ref_processed_0.png")]
143
- pool_ref_paths = [aduc.process_image_for_story(f.name, 480, f"pool_ref_{i+1}.png") for i, f in enumerate(ref_files[1:])]
144
-
145
- progress(0.1, desc="Generating storyboard...")
146
- storyboard, _, _ = aduc.task_generate_storyboard(prompt, num_keyframes, base_ref_paths, progress)
147
-
148
- progress(0.5, desc="AI Photographer is selecting the best scenes...")
149
- selected_keyframes = aduc.task_select_keyframes(storyboard, base_ref_paths, pool_ref_paths)
150
-
151
- return gr.update(value=storyboard), gr.update(value=selected_keyframes), gr.update(visible=True, open=True)
152
-
153
- def run_original_production_wrapper(keyframes, prompt, duration,
154
- trim_percent, handler_strength, destination_convergence_strength,
155
- guidance_scale, stg_scale, inference_steps,
156
- video_resolution,
157
- progress=gr.Progress()):
158
- """Wrapper for Step 3: Production. Correctly handles the return dictionary."""
159
- yield {
160
- original_video_output: gr.update(value=None, visible=True, label="🎬 Producing your original master video... Please wait."),
161
- final_video_output: gr.update(value=None, visible=True, label="🎬 Production in progress..."),
162
- step4_accordion: gr.update(visible=False)
163
- }
164
-
165
- resolution = int(video_resolution.split('x')[0])
166
-
167
- result = aduc.task_produce_original_movie(
168
- keyframes, prompt, duration,
169
- int(trim_percent), handler_strength, destination_convergence_strength,
170
- guidance_scale, stg_scale, int(inference_steps),
171
- resolution, use_continuity_director=True, progress=progress
172
- )
173
-
174
- original_latents = result["latent_paths"]
175
- original_video = result["final_path"]
176
-
177
- yield {
178
- original_video_output: gr.update(value=original_video, label="✅ Original Master Video"),
179
- final_video_output: gr.update(value=original_video, label="Final Film (Result of the Last Step)"),
180
- step4_accordion: gr.update(visible=True, open=True),
181
- original_latents_paths_state: original_latents,
182
- original_video_path_state: original_video,
183
- current_source_video_state: original_video,
184
- }
185
-
186
- def run_upscaler_wrapper(latent_paths, chunk_size, progress=gr.Progress()):
187
- """Wrapper for Step 4A: Latent Upscaler. Correctly handles the generator."""
188
- if not latent_paths:
189
- raise gr.Error("Cannot run Upscaler. No original latents found. Please complete Step 3 first.")
190
-
191
- yield {
192
- upscaler_video_output: gr.update(value=None, visible=True, label="Upscaling latents and decoding video..."),
193
- final_video_output: gr.update(label="Post-Production in progress: Latent Upscaling...")
194
- }
195
-
196
- final_path = None
197
- for update in aduc.task_run_latent_upscaler(latent_paths, int(chunk_size), progress=progress):
198
- final_path = update['final_path']
199
-
200
- yield {
201
- upscaler_video_output: gr.update(value=final_path, label="✅ Latent Upscale Complete"),
202
- final_video_output: gr.update(value=final_path),
203
- upscaled_video_path_state: final_path,
204
- current_source_video_state: final_path,
205
- }
206
-
207
- def run_hd_wrapper(source_video, model_version, steps, global_prompt, progress=gr.Progress()):
208
- """Wrapper for Step 4B: HD Mastering. Correctly handles the generator."""
209
- if not source_video:
210
- raise gr.Error("Cannot run HD Mastering. No source video found. Please complete a previous step first.")
211
-
212
- yield {
213
- hd_video_output: gr.update(value=None, visible=True, label="Applying HD mastering... This may take a while."),
214
- final_video_output: gr.update(label="Post-Production in progress: HD Mastering...")
215
- }
216
-
217
- final_path = None
218
- for update in aduc.task_run_hd_mastering(source_video, model_version, int(steps), global_prompt, progress=progress):
219
- final_path = update['final_path']
220
-
221
- yield {
222
- hd_video_output: gr.update(value=final_path, label="✅ HD Mastering Complete"),
223
- final_video_output: gr.update(value=final_path),
224
- hd_video_path_state: final_path,
225
- current_source_video_state: final_path,
226
- }
227
-
228
- def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()):
229
- """Wrapper for Step 4C: Audio Generation. Correctly handles the generator."""
230
- if not source_video:
231
- raise gr.Error("Cannot run Audio Generation. No source video found. Please complete a previous step first.")
232
-
233
- yield {
234
- audio_video_output: gr.update(value=None, visible=True, label="Generating audio and muxing..."),
235
- final_video_output: gr.update(label="Post-Production in progress: Audio Generation...")
236
- }
237
-
238
- final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt
239
-
240
- final_path = None
241
- for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress=progress):
242
- final_path = update['final_path']
243
-
244
- yield {
245
- audio_video_output: gr.update(value=final_path, label="✅ Audio Generation Complete"),
246
- final_video_output: gr.update(value=final_path),
247
- }
248
-
249
- def get_log_content():
250
- try:
251
- with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
252
- return f.read()
253
- except FileNotFoundError:
254
- return "Log file not yet created. Start a generation."
255
-
256
- def update_ui_language(lang_emoji):
257
- lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
258
- lang_code = lang_code_map.get(lang_emoji, "en")
259
- lang_map = i18n.get(lang_code, i18n.get('en', {}))
260
- return {
261
- # General
262
- title_md: gr.update(value=f"# {lang_map.get('app_title')}"),
263
- subtitle_md: gr.update(value=lang_map.get('app_subtitle')),
264
- lang_selector: gr.update(label=lang_map.get('lang_selector_label')),
265
- # Step 1: Pre-Production
266
- step1_accordion: gr.update(label=lang_map.get('step1_accordion')),
267
- prompt_input: gr.update(label=lang_map.get('prompt_label')),
268
- ref_image_input: gr.update(label=lang_map.get('ref_images_label')),
269
- num_keyframes_slider: gr.update(label=lang_map.get('keyframes_label')),
270
- duration_per_fragment_slider: gr.update(label=lang_map.get('duration_label'), info=lang_map.get('duration_info')),
271
- storyboard_and_keyframes_button: gr.update(value=lang_map.get('storyboard_and_keyframes_button')),
272
- storyboard_from_photos_button: gr.update(value=lang_map.get('storyboard_from_photos_button')),
273
- step1_mode_b_info_md: gr.update(value=f"*{lang_map.get('step1_mode_b_info')}*"),
274
- storyboard_output: gr.update(label=lang_map.get('storyboard_output_label')),
275
- keyframe_gallery: gr.update(label=lang_map.get('keyframes_gallery_label')),
276
- # Step 3: Production
277
- step3_accordion: gr.update(label=lang_map.get('step3_accordion')),
278
- step3_description_md: gr.update(value=lang_map.get('step3_description')),
279
- produce_original_button: gr.update(value=lang_map.get('produce_original_button')),
280
- ltx_advanced_options_accordion: gr.update(label=lang_map.get('ltx_advanced_options')),
281
- causality_accordion: gr.update(label=lang_map.get('causality_controls_title')),
282
- trim_percent_slider: gr.update(label=lang_map.get('trim_percent_label'), info=lang_map.get('trim_percent_info')),
283
- forca_guia_slider: gr.update(label=lang_map.get('forca_guia_label'), info=lang_map.get('forca_guia_info')),
284
- convergencia_destino_slider: gr.update(label=lang_map.get('convergencia_final_label'), info=lang_map.get('convergencia_final_info')),
285
- ltx_pipeline_accordion: gr.update(label=lang_map.get('ltx_pipeline_options')),
286
- guidance_scale_slider: gr.update(label=lang_map.get('guidance_scale_label'), info=lang_map.get('guidance_scale_info')),
287
- stg_scale_slider: gr.update(label=lang_map.get('stg_scale_label'), info=lang_map.get('stg_scale_info')),
288
- inference_steps_slider: gr.update(label=lang_map.get('steps_label'), info=lang_map.get('steps_info')),
289
- # Step 4: Post-Production
290
- step4_accordion: gr.update(label=lang_map.get('step4_accordion')),
291
- step4_description_md: gr.update(value=lang_map.get('step4_description')),
292
- sub_step_a_accordion: gr.update(label=lang_map.get('sub_step_a_upscaler')),
293
- upscaler_description_md: gr.update(value=lang_map.get('upscaler_description')),
294
- upscaler_options_accordion: gr.update(label=lang_map.get('upscaler_options')),
295
- upscaler_chunk_size_slider: gr.update(label=lang_map.get('upscaler_chunk_size_label'), info=lang_map.get('upscaler_chunk_size_info')),
296
- run_upscaler_button: gr.update(value=lang_map.get('run_upscaler_button')),
297
- sub_step_b_accordion: gr.update(label=lang_map.get('sub_step_b_hd')),
298
- hd_description_md: gr.update(value=lang_map.get('hd_description')),
299
- hd_options_accordion: gr.update(label=lang_map.get('hd_options')),
300
- hd_model_radio: gr.update(label=lang_map.get('hd_model_label')),
301
- hd_steps_slider: gr.update(label=lang_map.get('hd_steps_label'), info=lang_map.get('hd_steps_info')),
302
- run_hd_button: gr.update(value=lang_map.get('run_hd_button')),
303
- sub_step_c_accordion: gr.update(label=lang_map.get('sub_step_c_audio')),
304
- audio_description_md: gr.update(value=lang_map.get('audio_description')),
305
- audio_options_accordion: gr.update(label=lang_map.get('audio_options')),
306
- audio_prompt_input: gr.update(label=lang_map.get('audio_prompt_label'), info=lang_map.get('audio_prompt_info')),
307
- run_audio_button: gr.update(value=lang_map.get('run_audio_button')),
308
- # Final Outputs & Logs
309
- final_video_output: gr.update(label=lang_map.get('final_video_label')),
310
- log_accordion: gr.update(label=lang_map.get('log_accordion_label')),
311
- log_display: gr.update(label=lang_map.get('log_display_label')),
312
- update_log_button: gr.update(value=lang_map.get('update_log_button')),
313
- }
314
-
315
- # --- 3. GRADIO UI DEFINITION ---
316
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
317
- default_lang = i18n.get('pt', {})
318
-
319
- original_latents_paths_state = gr.State(value=None)
320
- original_video_path_state = gr.State(value=None)
321
- upscaled_video_path_state = gr.State(value=None)
322
- hd_video_path_state = gr.State(value=None)
323
- current_source_video_state = gr.State(value=None)
324
-
325
- title_md = gr.Markdown(f"# {default_lang.get('app_title')}")
326
- subtitle_md = gr.Markdown(default_lang.get('app_subtitle'))
327
- with gr.Row():
328
- lang_selector = gr.Radio(["🇧🇷", "🇺🇸", "🇨🇳"], value="🇧🇷", label=default_lang.get('lang_selector_label'))
329
- resolution_selector = gr.Radio(["480x480", "720x720", "960x960"], value="480x480", label="Base Resolution")
330
-
331
- with gr.Accordion(default_lang.get('step1_accordion'), open=True) as step1_accordion:
332
- prompt_input = gr.Textbox(label=default_lang.get('prompt_label'), value="A majestic lion walks across the savanna, sits down, and then roars at the setting sun.")
333
- ref_image_input = gr.File(label=default_lang.get('ref_images_label'), file_count="multiple", file_types=["image"])
334
- with gr.Row():
335
- num_keyframes_slider = gr.Slider(minimum=3, maximum=42, value=5, step=1, label=default_lang.get('keyframes_label'))
336
- duration_per_fragment_slider = gr.Slider(label=default_lang.get('duration_label'), info=default_lang.get('duration_info'), minimum=2.0, maximum=10.0, value=4.0, step=0.1)
337
- with gr.Row():
338
- storyboard_and_keyframes_button = gr.Button(default_lang.get('storyboard_and_keyframes_button'), variant="primary")
339
- storyboard_from_photos_button = gr.Button(default_lang.get('storyboard_from_photos_button'))
340
- step1_mode_b_info_md = gr.Markdown(f"*{default_lang.get('step1_mode_b_info')}*")
341
- storyboard_output = gr.JSON(label=default_lang.get('storyboard_output_label'))
342
- keyframe_gallery = gr.Gallery(label=default_lang.get('keyframes_gallery_label'), visible=True, object_fit="contain", height="auto", type="filepath")
343
-
344
- with gr.Accordion(default_lang.get('step3_accordion'), open=False, visible=False) as step3_accordion:
345
- step3_description_md = gr.Markdown(default_lang.get('step3_description'))
346
- with gr.Accordion(default_lang.get('ltx_advanced_options'), open=False) as ltx_advanced_options_accordion:
347
- with gr.Accordion(default_lang.get('causality_controls_title'), open=True) as causality_accordion:
348
- trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label=default_lang.get('trim_percent_label'), info=default_lang.get('trim_percent_info'))
349
- with gr.Row():
350
- forca_guia_slider = gr.Slider(label=default_lang.get('forca_guia_label'), minimum=0.0, maximum=1.0, value=0.5, step=0.05, info=default_lang.get('forca_guia_info'))
351
- convergencia_destino_slider = gr.Slider(label=default_lang.get('convergencia_final_label'), minimum=0.0, maximum=1.0, value=0.75, step=0.05, info=default_lang.get('convergencia_final_info'))
352
- with gr.Accordion(default_lang.get('ltx_pipeline_options'), open=True) as ltx_pipeline_accordion:
353
- with gr.Row():
354
- guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label=default_lang.get('guidance_scale_label'), info=default_lang.get('guidance_scale_info'))
355
- stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label=default_lang.get('stg_scale_label'), info=default_lang.get('stg_scale_info'))
356
- inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label=default_lang.get('steps_label'), info=default_lang.get('steps_info'))
357
- produce_original_button = gr.Button(default_lang.get('produce_original_button'), variant="primary")
358
- original_video_output = gr.Video(label="Original Master Video", visible=False, interactive=False)
359
-
360
- with gr.Accordion(default_lang.get('step4_accordion'), open=False, visible=False) as step4_accordion:
361
- step4_description_md = gr.Markdown(default_lang.get('step4_description'))
362
- with gr.Accordion(default_lang.get('sub_step_a_upscaler'), open=True) as sub_step_a_accordion:
363
- upscaler_description_md = gr.Markdown(default_lang.get('upscaler_description'))
364
- with gr.Accordion(default_lang.get('upscaler_options'), open=False) as upscaler_options_accordion:
365
- upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label=default_lang.get('upscaler_chunk_size_label'), info=default_lang.get('upscaler_chunk_size_info'))
366
- run_upscaler_button = gr.Button(default_lang.get('run_upscaler_button'), variant="secondary")
367
- upscaler_video_output = gr.Video(label="Upscaled Video", visible=False, interactive=False)
368
- with gr.Accordion(default_lang.get('sub_step_b_hd'), open=True) as sub_step_b_accordion:
369
- hd_description_md = gr.Markdown(default_lang.get('hd_description'))
370
- with gr.Accordion(default_lang.get('hd_options'), open=False) as hd_options_accordion:
371
- hd_model_radio = gr.Radio(["3B", "7B"], value="7B", label=default_lang.get('hd_model_label'))
372
- hd_steps_slider = gr.Slider(minimum=20, maximum=150, value=100, step=5, label=default_lang.get('hd_steps_label'), info=default_lang.get('hd_steps_info'))
373
- run_hd_button = gr.Button(default_lang.get('run_hd_button'), variant="secondary")
374
- hd_video_output = gr.Video(label="HD Mastered Video", visible=False, interactive=False)
375
- with gr.Accordion(default_lang.get('sub_step_c_audio'), open=True) as sub_step_c_accordion:
376
- audio_description_md = gr.Markdown(default_lang.get('audio_description'))
377
- with gr.Accordion(default_lang.get('audio_options'), open=False) as audio_options_accordion:
378
- audio_prompt_input = gr.Textbox(label=default_lang.get('audio_prompt_label'), info=default_lang.get('audio_prompt_info'), lines=3)
379
- run_audio_button = gr.Button(default_lang.get('run_audio_button'), variant="secondary")
380
- audio_video_output = gr.Video(label="Video with Audio", visible=False, interactive=False)
381
-
382
- final_video_output = gr.Video(label=default_lang.get('final_video_label'), visible=False, interactive=False)
383
- with gr.Accordion(default_lang.get('log_accordion_label'), open=False) as log_accordion:
384
- log_display = gr.Textbox(label=default_lang.get('log_display_label'), lines=20, interactive=False, autoscroll=True)
385
- update_log_button = gr.Button(default_lang.get('update_log_button'))
386
-
387
- # --- 4. UI EVENT CONNECTIONS ---
388
- all_ui_components = list(update_ui_language('🇧🇷').keys())
389
- lang_selector.change(fn=update_ui_language, inputs=lang_selector, outputs=all_ui_components)
390
-
391
- storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
392
- storyboard_from_photos_button.click(fn=run_pre_production_photo_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
393
-
394
- produce_original_button.click(
395
- fn=run_original_production_wrapper,
396
- inputs=[keyframe_gallery, prompt_input, duration_per_fragment_slider, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, guidance_scale_slider, stg_scale_slider, inference_steps_slider, resolution_selector],
397
- outputs=[original_video_output, final_video_output, step4_accordion, original_latents_paths_state, original_video_path_state, current_source_video_state]
398
- )
399
-
400
- run_upscaler_button.click(
401
- fn=run_upscaler_wrapper,
402
- inputs=[original_latents_paths_state, upscaler_chunk_size_slider],
403
- outputs=[upscaler_video_output, final_video_output, upscaled_video_path_state, current_source_video_state]
404
- )
405
-
406
- run_hd_button.click(
407
- fn=run_hd_wrapper,
408
- inputs=[current_source_video_state, hd_model_radio, hd_steps_slider, prompt_input],
409
- outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state]
410
- )
411
-
412
- run_audio_button.click(
413
- fn=run_audio_wrapper,
414
- inputs=[current_source_video_state, audio_prompt_input, prompt_input],
415
- outputs=[audio_video_output, final_video_output]
416
- )
417
-
418
- update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display])
419
-
420
- # --- 5. APPLICATION LAUNCH ---
421
- if __name__ == "__main__":
422
- if os.path.exists(WORKSPACE_DIR):
423
- logger.info(f"Clearing previous workspace at: {WORKSPACE_DIR}")
424
- shutil.rmtree(WORKSPACE_DIR)
425
- os.makedirs(WORKSPACE_DIR)
426
- logger.info(f"Application started. Launching Gradio interface...")
427
- demo.queue().launch()