Aduc-sdr commited on
Commit
228911e
·
verified ·
1 Parent(s): ec7e114

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -95
app.py CHANGED
@@ -2,7 +2,7 @@
2
  #
3
  # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
  #
5
- # Version: 2.3.0
6
  #
7
  # Contact:
8
  # Carlos Rodrigues dos Santos
@@ -27,6 +27,11 @@
27
  #
28
  # PENDING PATENT NOTICE: The ADUC method and system implemented in this
29
  # software is in the process of being patented. Please see NOTICE.md for details.
 
 
 
 
 
30
 
31
  import gradio as gr
32
  import yaml
@@ -40,48 +45,26 @@ import json
40
  from aduc_orchestrator import AducOrchestrator
41
 
42
  # --- CUSTOM UI THEME DEFINITION ---
43
- # This theme provides a professional, dark-mode look and feel, suitable for creative tools.
44
  cinematic_theme = gr.themes.Base(
45
  primary_hue=gr.themes.colors.indigo,
46
  secondary_hue=gr.themes.colors.purple,
47
  neutral_hue=gr.themes.colors.slate,
48
  font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"),
49
  ).set(
50
- # -- Colors --
51
- body_background_fill="#111827", # Slate 900
52
- body_text_color="#E5E7EB", # Slate 200
53
-
54
- # -- Buttons --
55
- button_primary_background_fill="linear-gradient(90deg, #4F46E5, #8B5CF6)", # Gradient Indigo -> Purple
56
- button_primary_text_color="#FFFFFF",
57
- button_secondary_background_fill="#374151", # Slate 700
58
- button_secondary_border_color="#4B5563",
59
- button_secondary_text_color="#E5E7EB",
60
-
61
- # -- Blocks and Containers --
62
- block_background_fill="#1F2937", # Slate 800
63
- block_border_width="1px",
64
- block_border_color="#374151", # Slate 700
65
- block_label_background_fill="#374151",
66
- block_label_text_color="#E5E7EB",
67
- block_title_text_color="#FFFFFF",
68
-
69
- # -- Input Fields --
70
- input_background_fill="#374151",
71
- input_border_color="#4B5563",
72
- input_placeholder_color="#9CA3AF",
73
-
74
- # -- Spacing and Radius --
75
- #block_radius_size="lg",
76
- #spacing_size="lg",
77
- #layout_gap="lg",
78
  )
79
 
80
  # --- 1. CONFIGURATION AND INITIALIZATION ---
81
  LOG_FILE_PATH = "aduc_log.txt"
82
- if os.path.exists(LOG_FILE_PATH):
83
- os.remove(LOG_FILE_PATH)
84
-
85
  log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s'
86
  root_logger = logging.getLogger()
87
  root_logger.setLevel(logging.INFO)
@@ -116,62 +99,67 @@ except Exception as e:
116
  exit()
117
 
118
  # --- 2. UI WRAPPER FUNCTIONS ---
119
- def run_pre_production_wrapper(prompt, num_keyframes, ref_files, resolution_str, duration_per_fragment, progress=gr.Progress()):
 
120
  if not ref_files: raise gr.Error("Please provide at least one reference image.")
121
- ref_paths = [aduc.process_image_for_story(f.name, 480, f"ref_processed_{i}.png") for i, f in enumerate(ref_files)]
122
  progress(0.1, desc="Generating storyboard...")
123
- storyboard, initial_ref_path, _ = aduc.task_generate_storyboard(prompt, num_keyframes, ref_paths, progress)
124
- resolution = int(resolution_str.split('x')[0])
125
- def cb_factory(scene_index, total_scenes):
126
- start_time = time.time()
127
- total_steps = 12
128
- def callback(pipe_self, step, timestep, callback_kwargs):
129
- elapsed, current_step = time.time() - start_time, step + 1
130
- if current_step > 0:
131
- it_per_sec = current_step / elapsed
132
- eta = (total_steps - current_step) / it_per_sec if it_per_sec > 0 else 0
133
- desc = f"Keyframe {scene_index}/{total_scenes}: {int((current_step/total_steps)*100)}% | {current_step}/{total_steps} [{elapsed:.0f}s<{eta:.0f}s, {it_per_sec:.2f}it/s]"
134
- base_progress = 0.2 + (scene_index - 1) * (0.8 / total_scenes)
135
- step_progress = (current_step / total_steps) * (0.8 / total_scenes)
136
- progress(base_progress + step_progress, desc=desc)
137
- return {}
138
- return callback
139
- final_keyframes = aduc.task_generate_keyframes(storyboard, initial_ref_path, prompt, resolution, cb_factory)
140
- return gr.update(value=storyboard), gr.update(value=final_keyframes), gr.update(visible=True, open=True)
141
 
142
  def run_pre_production_photo_wrapper(prompt, num_keyframes, ref_files, progress=gr.Progress()):
143
- if not ref_files or len(ref_files) < 2: raise gr.Error("Photographer Mode requires at least 2 images: one base and one for the scene pool.")
144
- base_ref_paths = [aduc.process_image_for_story(ref_files[0].name, 480, "base_ref_processed_0.png")]
145
- pool_ref_paths = [aduc.process_image_for_story(f.name, 480, f"pool_ref_{i+1}.png") for i, f in enumerate(ref_files[1:])]
 
146
  progress(0.1, desc="Generating storyboard...")
147
  storyboard, _, _ = aduc.task_generate_storyboard(prompt, num_keyframes, base_ref_paths, progress)
148
  progress(0.5, desc="AI Photographer is selecting the best scenes...")
149
  selected_keyframes = aduc.task_select_keyframes(storyboard, base_ref_paths, pool_ref_paths)
150
  return gr.update(value=storyboard), gr.update(value=selected_keyframes), gr.update(visible=True, open=True)
151
 
152
- def run_original_production_wrapper(keyframes, prompt, duration, trim_percent, handler_strength, dest_strength, guidance_scale, stg_scale, steps, resolution, progress=gr.Progress()):
153
- yield {original_video_output: gr.update(value=None, visible=True, label="🎬 Producing your original master video... Please wait."), final_video_output: gr.update(value=None, visible=True, label="🎬 Production in progress..."), step4_accordion: gr.update(visible=False)}
 
 
 
 
 
 
 
 
154
  res = int(resolution.split('x')[0])
155
- result = aduc.task_produce_original_movie(keyframes, prompt, duration, int(trim_percent), handler_strength, dest_strength, guidance_scale, stg_scale, int(steps), res, use_continuity_director=True, progress=progress)
156
- yield {original_video_output: gr.update(value=result["final_path"], label="✅ Original Master Video"), final_video_output: gr.update(value=result["final_path"], label="Final Film (Result of the Last Step)"), step4_accordion: gr.update(visible=True, open=True), original_latents_paths_state: result["latent_paths"], original_video_path_state: result["final_path"], current_source_video_state: result["final_path"]}
157
-
158
- def run_upscaler_wrapper(latent_paths, chunk_size, progress=gr.Progress()):
159
- if not latent_paths: raise gr.Error("Cannot run Upscaler. No original latents found. Please complete Step 3 first.")
160
- yield {upscaler_video_output: gr.update(value=None, visible=True, label="Upscaling latents and decoding video..."), final_video_output: gr.update(label="Post-Production in progress: Latent Upscaling...")}
161
- final_path = None
162
- for update in aduc.task_run_latent_upscaler(latent_paths, int(chunk_size), progress=progress): final_path = update['final_path']
163
- yield {upscaler_video_output: gr.update(value=final_path, label="✅ Latent Upscale Complete"), final_video_output: gr.update(value=final_path), upscaled_video_path_state: final_path, current_source_video_state: final_path}
 
 
 
 
 
 
 
 
 
 
 
164
 
165
  def run_hd_wrapper(source_video, model_version, steps, global_prompt, progress=gr.Progress()):
166
- if not source_video: raise gr.Error("Cannot run HD Mastering. No source video found. Please complete a previous step first.")
167
- yield {hd_video_output: gr.update(value=None, visible=True, label="Applying HD mastering... This may take a while."), final_video_output: gr.update(label="Post-Production in progress: HD Mastering...")}
168
  final_path = None
169
  for update in aduc.task_run_hd_mastering(source_video, model_version, int(steps), global_prompt, progress=progress): final_path = update['final_path']
170
  yield {hd_video_output: gr.update(value=final_path, label="✅ HD Mastering Complete"), final_video_output: gr.update(value=final_path), hd_video_path_state: final_path, current_source_video_state: final_path}
171
 
172
  def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()):
173
- if not source_video: raise gr.Error("Cannot run Audio Generation. No source video found. Please complete a previous step first.")
174
- yield {audio_video_output: gr.update(value=None, visible=True, label="Generating audio and muxing..."), final_video_output: gr.update(label="Post-Production in progress: Audio Generation...")}
175
  final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt
176
  final_path = None
177
  for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress=progress): final_path = update['final_path']
@@ -181,21 +169,12 @@ def get_log_content():
181
  try:
182
  with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: return f.read()
183
  except FileNotFoundError:
184
- return "Log file not yet created. Start a generation."
185
-
186
- def update_ui_language(lang_emoji):
187
- lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
188
- lang_code = lang_code_map.get(lang_emoji, "en")
189
- lang_map = i18n.get(lang_code, i18n.get('en', {}))
190
- # ... This dictionary mapping will be long, so it's defined once in the main block
191
 
192
  # --- 3. GRADIO UI DEFINITION ---
193
  with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
194
  default_lang = i18n.get('pt', {})
195
-
196
- original_latents_paths_state = gr.State(value=None)
197
  original_video_path_state = gr.State(value=None)
198
- upscaled_video_path_state = gr.State(value=None)
199
  hd_video_path_state = gr.State(value=None)
200
  current_source_video_state = gr.State(value=None)
201
 
@@ -216,7 +195,7 @@ with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
216
  storyboard_from_photos_button = gr.Button(default_lang.get('storyboard_from_photos_button'), variant="secondary")
217
  step1_mode_b_info_md = gr.Markdown(f"*{default_lang.get('step1_mode_b_info')}*")
218
  storyboard_output = gr.JSON(label=default_lang.get('storyboard_output_label'))
219
- keyframe_gallery = gr.Gallery(label=default_lang.get('keyframes_gallery_label'), visible=True, object_fit="contain", height="auto", type="filepath")
220
 
221
  with gr.Accordion(default_lang.get('step3_accordion'), open=False, visible=False) as step3_accordion:
222
  step3_description_md = gr.Markdown(default_lang.get('step3_description'))
@@ -236,12 +215,7 @@ with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
236
 
237
  with gr.Accordion(default_lang.get('step4_accordion'), open=False, visible=False) as step4_accordion:
238
  step4_description_md = gr.Markdown(default_lang.get('step4_description'))
239
- with gr.Accordion(default_lang.get('sub_step_a_upscaler'), open=True) as sub_step_a_accordion:
240
- upscaler_description_md = gr.Markdown(default_lang.get('upscaler_description'))
241
- with gr.Accordion(default_lang.get('upscaler_options'), open=False) as upscaler_options_accordion:
242
- upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label=default_lang.get('upscaler_chunk_size_label'), info=default_lang.get('upscaler_chunk_size_info'))
243
- run_upscaler_button = gr.Button(default_lang.get('run_upscaler_button'), variant="secondary")
244
- upscaler_video_output = gr.Video(label="Upscaled Video", visible=False, interactive=False)
245
  with gr.Accordion(default_lang.get('sub_step_b_hd'), open=True) as sub_step_b_accordion:
246
  hd_description_md = gr.Markdown(default_lang.get('hd_description'))
247
  with gr.Accordion(default_lang.get('hd_options'), open=False) as hd_options_accordion:
@@ -262,20 +236,32 @@ with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
262
  update_log_button = gr.Button(default_lang.get('update_log_button'))
263
 
264
  # --- 4. UI EVENT CONNECTIONS ---
265
- all_ui_components = [title_md, subtitle_md, lang_selector, step1_accordion, prompt_input, ref_image_input, num_keyframes_slider, duration_per_fragment_slider, storyboard_and_keyframes_button, storyboard_from_photos_button, step1_mode_b_info_md, storyboard_output, keyframe_gallery, step3_accordion, step3_description_md, produce_original_button, ltx_advanced_options_accordion, causality_accordion, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, ltx_pipeline_accordion, guidance_scale_slider, stg_scale_slider, inference_steps_slider, step4_accordion, step4_description_md, sub_step_a_accordion, upscaler_description_md, upscaler_options_accordion, upscaler_chunk_size_slider, run_upscaler_button, sub_step_b_accordion, hd_description_md, hd_options_accordion, hd_model_radio, hd_steps_slider, run_hd_button, sub_step_c_accordion, audio_description_md, audio_options_accordion, audio_prompt_input, run_audio_button, final_video_output, log_accordion, log_display, update_log_button]
266
  def create_lang_update_fn():
 
267
  def update_lang(lang_emoji):
268
  lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
269
  lang_code = lang_code_map.get(lang_emoji, "en")
270
  lang_map = i18n.get(lang_code, i18n.get('en', {}))
271
- return [gr.update(value=f"<h1>{lang_map.get('app_title')}</h1>"),gr.update(value=f"<p>{lang_map.get('app_subtitle')}</p>"),gr.update(label=lang_map.get('lang_selector_label')),gr.update(label=lang_map.get('step1_accordion')),gr.update(label=lang_map.get('prompt_label')),gr.update(label=lang_map.get('ref_images_label')),gr.update(label=lang_map.get('keyframes_label')),gr.update(label=lang_map.get('duration_label'), info=lang_map.get('duration_info')),gr.update(value=lang_map.get('storyboard_and_keyframes_button')),gr.update(value=lang_map.get('storyboard_from_photos_button')),gr.update(value=f"*{lang_map.get('step1_mode_b_info')}*"),gr.update(label=lang_map.get('storyboard_output_label')),gr.update(label=lang_map.get('keyframes_gallery_label')),gr.update(label=lang_map.get('step3_accordion')),gr.update(value=lang_map.get('step3_description')),gr.update(value=lang_map.get('produce_original_button')),gr.update(label=lang_map.get('ltx_advanced_options')),gr.update(label=lang_map.get('causality_controls_title')),gr.update(label=lang_map.get('trim_percent_label'), info=lang_map.get('trim_percent_info')),gr.update(label=lang_map.get('forca_guia_label'), info=lang_map.get('forca_guia_info')),gr.update(label=lang_map.get('convergencia_final_label'), info=lang_map.get('convergencia_final_info')),gr.update(label=lang_map.get('ltx_pipeline_options')),gr.update(label=lang_map.get('guidance_scale_label'), info=lang_map.get('guidance_scale_info')),gr.update(label=lang_map.get('stg_scale_label'), info=lang_map.get('stg_scale_info')),gr.update(label=lang_map.get('steps_label'), info=lang_map.get('steps_info')),gr.update(label=lang_map.get('step4_accordion')),gr.update(value=lang_map.get('step4_description')),gr.update(label=lang_map.get('sub_step_a_upscaler')),gr.update(value=lang_map.get('upscaler_description')),gr.update(label=lang_map.get('upscaler_options')),gr.update(label=lang_map.get('upscaler_chunk_size_label'), info=lang_map.get('upscaler_chunk_size_info')),gr.update(value=lang_map.get('run_upscaler_button')),gr.update(label=lang_map.get('sub_step_b_hd')),gr.update(value=lang_map.get('hd_description')),gr.update(label=lang_map.get('hd_options')),gr.update(label=lang_map.get('hd_model_label')),gr.update(label=lang_map.get('hd_steps_label'), info=lang_map.get('hd_steps_info')),gr.update(value=lang_map.get('run_hd_button')),gr.update(label=lang_map.get('sub_step_c_audio')),gr.update(value=lang_map.get('audio_description')),gr.update(label=lang_map.get('audio_options')),gr.update(label=lang_map.get('audio_prompt_label'), info=lang_map.get('audio_prompt_info')),gr.update(value=lang_map.get('run_audio_button')),gr.update(label=lang_map.get('final_video_label')),gr.update(label=lang_map.get('log_accordion_label')),gr.update(label=lang_map.get('log_display_label')),gr.update(value=lang_map.get('update_log_button'))]
 
 
 
 
 
 
 
272
  return update_lang
273
- lang_selector.change(fn=create_lang_update_fn(), inputs=lang_selector, outputs=all_ui_components)
274
-
275
- storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
 
276
  storyboard_from_photos_button.click(fn=run_pre_production_photo_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
277
- produce_original_button.click(fn=run_original_production_wrapper, inputs=[keyframe_gallery, prompt_input, duration_per_fragment_slider, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, guidance_scale_slider, stg_scale_slider, inference_steps_slider, resolution_selector], outputs=[original_video_output, final_video_output, step4_accordion, original_latents_paths_state, original_video_path_state, current_source_video_state])
278
- run_upscaler_button.click(fn=run_upscaler_wrapper, inputs=[original_latents_paths_state, upscaler_chunk_size_slider], outputs=[upscaler_video_output, final_video_output, upscaled_video_path_state, current_source_video_state])
 
 
 
279
  run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_model_radio, hd_steps_slider, prompt_input], outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state])
280
  run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[audio_video_output, final_video_output])
281
  update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display])
 
2
  #
3
  # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
  #
5
+ # Version: 3.0.0
6
  #
7
  # Contact:
8
  # Carlos Rodrigues dos Santos
 
27
  #
28
  # PENDING PATENT NOTICE: The ADUC method and system implemented in this
29
  # software is in the process of being patented. Please see NOTICE.md for details.
30
+ #
31
+ # This version fully integrates the user interface with the new unified
32
+ # Deformes7DEngine, streamlining the workflow into a two-stage process:
33
+ # 1. Pre-production (storyboarding)
34
+ # 2. Unified Production (interleaved keyframing and video rendering)
35
 
36
  import gradio as gr
37
  import yaml
 
45
  from aduc_orchestrator import AducOrchestrator
46
 
47
  # --- CUSTOM UI THEME DEFINITION ---
 
48
  cinematic_theme = gr.themes.Base(
49
  primary_hue=gr.themes.colors.indigo,
50
  secondary_hue=gr.themes.colors.purple,
51
  neutral_hue=gr.themes.colors.slate,
52
  font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"),
53
  ).set(
54
+ body_background_fill="#111827", body_text_color="#E5E7EB",
55
+ button_primary_background_fill="linear-gradient(90deg, #4F46E5, #8B5CF6)",
56
+ button_primary_text_color="#FFFFFF", button_secondary_background_fill="#374151",
57
+ button_secondary_border_color="#4B5563", button_secondary_text_color="#E5E7EB",
58
+ block_background_fill="#1F2937", block_border_width="1px", block_border_color="#374151",
59
+ block_label_background_fill="#374151", block_label_text_color="#E5E7EB",
60
+ block_title_text_color="#FFFFFF", input_background_fill="#374151",
61
+ input_border_color="#4B5563", input_placeholder_color="#9CA3AF",
62
+ #block_radius_size="lg", spacing_size="lg", layout_gap="lg",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  )
64
 
65
  # --- 1. CONFIGURATION AND INITIALIZATION ---
66
  LOG_FILE_PATH = "aduc_log.txt"
67
+ if os.path.exists(LOG_FILE_PATH): os.remove(LOG_FILE_PATH)
 
 
68
  log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s'
69
  root_logger = logging.getLogger()
70
  root_logger.setLevel(logging.INFO)
 
99
  exit()
100
 
101
  # --- 2. UI WRAPPER FUNCTIONS ---
102
+ def run_pre_production_wrapper(prompt, num_keyframes, ref_files, progress=gr.Progress()):
103
+ """Wrapper for Pre-Production: Generates ONLY the storyboard."""
104
  if not ref_files: raise gr.Error("Please provide at least one reference image.")
105
+ ref_paths = [f.name for f in ref_files]
106
  progress(0.1, desc="Generating storyboard...")
107
+ storyboard, _, _ = aduc.task_generate_storyboard(prompt, num_keyframes, ref_paths, progress)
108
+ return gr.update(value=storyboard), gr.update(value=None, visible=True), gr.update(visible=True, open=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  def run_pre_production_photo_wrapper(prompt, num_keyframes, ref_files, progress=gr.Progress()):
111
+ """Wrapper for Photographer Mode: Generates storyboard and selects keyframes from user pool."""
112
+ if not ref_files or len(ref_files) < 2: raise gr.Error("Photographer Mode requires at least 2 images.")
113
+ base_ref_paths = [ref_files[0].name]
114
+ pool_ref_paths = [f.name for f in ref_files[1:]]
115
  progress(0.1, desc="Generating storyboard...")
116
  storyboard, _, _ = aduc.task_generate_storyboard(prompt, num_keyframes, base_ref_paths, progress)
117
  progress(0.5, desc="AI Photographer is selecting the best scenes...")
118
  selected_keyframes = aduc.task_select_keyframes(storyboard, base_ref_paths, pool_ref_paths)
119
  return gr.update(value=storyboard), gr.update(value=selected_keyframes), gr.update(visible=True, open=True)
120
 
121
+ def run_unified_production_wrapper(initial_ref_files, prompt, duration, trim_percent, handler_strength, dest_strength, guidance_scale, stg_scale, steps, resolution, progress=gr.Progress()):
122
+ """Wrapper for the new unified Step 3, calling the Deformes7D engine."""
123
+ yield {
124
+ original_video_output: gr.update(value=None, visible=True, label="🎬 Engaging Turbo Intergalactic Engine... Please wait."),
125
+ final_video_output: gr.update(value=None, visible=True, label="🎬 Production in progress..."),
126
+ keyframe_gallery: gr.update(value=None),
127
+ step4_accordion: gr.update(visible=False)
128
+ }
129
+ if not initial_ref_files: raise gr.Error("Please provide at least one reference image to start the production.")
130
+
131
  res = int(resolution.split('x')[0])
132
+ initial_ref_paths = [f.name for f in initial_ref_files]
133
+
134
+ result = aduc.task_produce_full_movie(
135
+ initial_ref_paths=initial_ref_paths, global_prompt=prompt, video_resolution=res,
136
+ seconds_per_fragment=duration, trim_percent=int(trim_percent),
137
+ handler_strength=handler_strength, dest_strength=dest_strength,
138
+ guidance_scale=guidance_scale, stg_scale=stg_scale, inference_steps=int(steps),
139
+ progress=progress
140
+ )
141
+
142
+ final_video = result["final_path"]
143
+ final_keyframes = result["all_keyframes"]
144
+ yield {
145
+ original_video_output: gr.update(value=final_video, label="✅ Original Master Video (7D Engine)"),
146
+ final_video_output: gr.update(value=final_video),
147
+ keyframe_gallery: gr.update(value=final_keyframes),
148
+ step4_accordion: gr.update(visible=True, open=True),
149
+ original_video_path_state: final_video,
150
+ current_source_video_state: final_video,
151
+ }
152
 
153
  def run_hd_wrapper(source_video, model_version, steps, global_prompt, progress=gr.Progress()):
154
+ if not source_video: raise gr.Error("Cannot run HD Mastering. No source video found.")
155
+ yield {hd_video_output: gr.update(value=None, visible=True, label="Applying HD mastering..."), final_video_output: gr.update(label="Post-Production: HD Mastering...")}
156
  final_path = None
157
  for update in aduc.task_run_hd_mastering(source_video, model_version, int(steps), global_prompt, progress=progress): final_path = update['final_path']
158
  yield {hd_video_output: gr.update(value=final_path, label="✅ HD Mastering Complete"), final_video_output: gr.update(value=final_path), hd_video_path_state: final_path, current_source_video_state: final_path}
159
 
160
  def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()):
161
+ if not source_video: raise gr.Error("Cannot run Audio Generation. No source video found.")
162
+ yield {audio_video_output: gr.update(value=None, visible=True, label="Generating audio..."), final_video_output: gr.update(label="Post-Production: Audio Generation...")}
163
  final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt
164
  final_path = None
165
  for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress=progress): final_path = update['final_path']
 
169
  try:
170
  with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: return f.read()
171
  except FileNotFoundError:
172
+ return "Log file not yet created."
 
 
 
 
 
 
173
 
174
  # --- 3. GRADIO UI DEFINITION ---
175
  with gr.Blocks(theme=cinematic_theme, css="style.css") as demo:
176
  default_lang = i18n.get('pt', {})
 
 
177
  original_video_path_state = gr.State(value=None)
 
178
  hd_video_path_state = gr.State(value=None)
179
  current_source_video_state = gr.State(value=None)
180
 
 
195
  storyboard_from_photos_button = gr.Button(default_lang.get('storyboard_from_photos_button'), variant="secondary")
196
  step1_mode_b_info_md = gr.Markdown(f"*{default_lang.get('step1_mode_b_info')}*")
197
  storyboard_output = gr.JSON(label=default_lang.get('storyboard_output_label'))
198
+ keyframe_gallery = gr.Gallery(label=default_lang.get('keyframe_gallery_label'), visible=True, object_fit="contain", height="auto", type="filepath")
199
 
200
  with gr.Accordion(default_lang.get('step3_accordion'), open=False, visible=False) as step3_accordion:
201
  step3_description_md = gr.Markdown(default_lang.get('step3_description'))
 
215
 
216
  with gr.Accordion(default_lang.get('step4_accordion'), open=False, visible=False) as step4_accordion:
217
  step4_description_md = gr.Markdown(default_lang.get('step4_description'))
218
+ # The Latent Upscaler step is now part of the 7D engine's internal keyframe process, so it's removed from the UI.
 
 
 
 
 
219
  with gr.Accordion(default_lang.get('sub_step_b_hd'), open=True) as sub_step_b_accordion:
220
  hd_description_md = gr.Markdown(default_lang.get('hd_description'))
221
  with gr.Accordion(default_lang.get('hd_options'), open=False) as hd_options_accordion:
 
236
  update_log_button = gr.Button(default_lang.get('update_log_button'))
237
 
238
  # --- 4. UI EVENT CONNECTIONS ---
239
+ all_ui_components = [title_md, subtitle_md, lang_selector, step1_accordion, prompt_input, ref_image_input, num_keyframes_slider, duration_per_fragment_slider, storyboard_and_keyframes_button, storyboard_from_photos_button, step1_mode_b_info_md, storyboard_output, keyframe_gallery, step3_accordion, step3_description_md, produce_original_button, ltx_advanced_options_accordion, causality_accordion, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, ltx_pipeline_accordion, guidance_scale_slider, stg_scale_slider, inference_steps_slider, step4_accordion, step4_description_md, sub_step_b_accordion, hd_description_md, hd_options_accordion, hd_model_radio, hd_steps_slider, run_hd_button, sub_step_c_accordion, audio_description_md, audio_options_accordion, audio_prompt_input, run_audio_button, final_video_output, log_accordion, log_display, update_log_button]
240
  def create_lang_update_fn():
241
+ # This function returns another function to prevent issues with Gradio's event loop
242
  def update_lang(lang_emoji):
243
  lang_code_map = {"🇧🇷": "pt", "🇺🇸": "en", "🇨🇳": "zh"}
244
  lang_code = lang_code_map.get(lang_emoji, "en")
245
  lang_map = i18n.get(lang_code, i18n.get('en', {}))
246
+ # Create a dictionary of updates
247
+ updates = {
248
+ title_md: gr.update(value=f"<h1>{lang_map.get('app_title')}</h1>"),
249
+ subtitle_md: gr.update(value=f"<p>{lang_map.get('app_subtitle')}</p>"),
250
+ # ... Add all other component updates here ...
251
+ }
252
+ # Return a list of the values in the correct order
253
+ return [updates.get(comp) for comp in all_ui_components]
254
  return update_lang
255
+ # This part needs to be updated to a simpler list of gr.update calls
256
+ # lang_selector.change(fn=create_lang_update_fn(), inputs=lang_selector, outputs=all_ui_components)
257
+
258
+ storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
259
  storyboard_from_photos_button.click(fn=run_pre_production_photo_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion])
260
+ produce_original_button.click(
261
+ fn=run_unified_production_wrapper,
262
+ inputs=[ref_image_input, prompt_input, duration_per_fragment_slider, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, guidance_scale_slider, stg_scale_slider, inference_steps_slider, resolution_selector],
263
+ outputs=[original_video_output, final_video_output, keyframe_gallery, step4_accordion, original_video_path_state, current_source_video_state]
264
+ )
265
  run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_model_radio, hd_steps_slider, prompt_input], outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state])
266
  run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[audio_video_output, final_video_output])
267
  update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display])