iAkashPaul commited on
Commit
766ae19
·
verified ·
1 Parent(s): 4e4921b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -17
app.py CHANGED
@@ -23,7 +23,7 @@ from optimization import optimize_pipeline_
23
  MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
24
 
25
  # --- NEW: Flexible Dimension Constants ---
26
- MAX_DIMENSION = 832
27
  MIN_DIMENSION = 480
28
  DIMENSION_MULTIPLE = 16
29
  SQUARE_SIZE = 480
@@ -137,14 +137,14 @@ def resize_and_crop_to_match(target_image, reference_image):
137
  left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
138
  return resized.crop((left, top, left + ref_width, top + ref_height))
139
 
140
- @spaces.GPU(duration=30)
141
  def generate_video(
142
  start_image_pil,
143
  end_image_pil,
144
  prompt,
145
  negative_prompt=default_negative_prompt,
146
- duration_seconds=2.1,
147
- steps=8,
148
  guidance_scale=1,
149
  guidance_scale_2=1,
150
  seed=42,
@@ -219,9 +219,9 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
219
  prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
220
 
221
  with gr.Accordion("Advanced Settings", open=False):
222
- duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
223
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
224
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
225
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
226
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
227
  with gr.Row():
@@ -255,17 +255,6 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
255
  outputs=ui_outputs
256
  )
257
 
258
- gr.Examples(
259
- examples=[
260
- ["poli_tower.png", "tower_takes_off.png", "the man turns around"],
261
- ["ugly_sonic.jpeg", "squatting_sonic.png", "the character dodges the missiles"],
262
- ["capyabara_zoomed.png", "capyabara.webp", "a dramatic dolly zoom"],
263
- ],
264
- inputs=[start_image, end_image, prompt],
265
- outputs=ui_outputs,
266
- fn=generate_video,
267
- cache_examples="lazy",
268
- )
269
 
270
  if __name__ == "__main__":
271
  app.launch(share=True)
 
23
  MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
24
 
25
  # --- NEW: Flexible Dimension Constants ---
26
+ MAX_DIMENSION = 720
27
  MIN_DIMENSION = 480
28
  DIMENSION_MULTIPLE = 16
29
  SQUARE_SIZE = 480
 
137
  left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
138
  return resized.crop((left, top, left + ref_width, top + ref_height))
139
 
140
+ @spaces.GPU(duration=40)
141
  def generate_video(
142
  start_image_pil,
143
  end_image_pil,
144
  prompt,
145
  negative_prompt=default_negative_prompt,
146
+ duration_seconds=2.5,
147
+ steps=5,
148
  guidance_scale=1,
149
  guidance_scale_2=1,
150
  seed=42,
 
219
  prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
220
 
221
  with gr.Accordion("Advanced Settings", open=False):
222
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.5, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
223
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
224
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=5, label="Inference Steps")
225
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
226
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
227
  with gr.Row():
 
255
  outputs=ui_outputs
256
  )
257
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
  if __name__ == "__main__":
260
  app.launch(share=True)