oKen38461 commited on
Commit
f25d32d
·
1 Parent(s): f76034d

Fix syntax errors caused by incorrect regex replacements

Browse files
Files changed (1) hide show
  1. wgp.py +12 -8
wgp.py CHANGED
@@ -2647,7 +2647,7 @@ def preprocess_video(process_type, height, width, video_in, max_frames, start_fr
2647
  if fit_canvas :
2648
  scale1 = min(height / frame_height, width / frame_width)
2649
  scale2 = min(height / frame_width, width / frame_height)
2650
- scale2)
2651
  else:
2652
  scale = ((height * width ) / (frame_height * frame_width))**(1/2)
2653
 
@@ -2973,7 +2973,7 @@ def generate_video(
2973
  current_video_length = min(int(fps * duration // 4) * 4 + 5, current_video_length)
2974
  if fantasy:
2975
  audio_proj_split, audio_context_lens = parse_audio(audio_guide, num_frames= current_video_length, fps= fps, device= processing_device )
2976
- audio_999999999)
2977
 
2978
  torch.set_grad_enabled(False)
2979
  global save_path
@@ -3181,7 +3181,9 @@ def generate_video(
3181
  fit_into_canvas = fit_canvas == 1,
3182
  shift=flow_shift,
3183
  sampling_steps=num_inference_steps,
3184
- guide_embedded_guidance_n_prompt=negative_prompt,
 
 
3185
  seed=seed,
3186
  callback=callback,
3187
  enable_RIFLEx = enable_RIFLEx,
@@ -3192,9 +3194,11 @@ def generate_video(
3192
  slg_end = slg_end_perc/100,
3193
  cfg_star_switch = cfg_star_switch,
3194
  cfg_zero_step = cfg_zero_step,
3195
- audio_cfg_audio_guide=audio_guide,
 
3196
  audio_proj= audio_proj_split,
3197
- audio_audio_context_lens= audio_context_lens,
 
3198
  ar_step = model_mode, #5
3199
  causal_block_size = 5,
3200
  causal_attention = True,
@@ -4845,9 +4849,9 @@ def generate_video_tab(update_form = False, state_dict = None, ui_defaults = Non
4845
  ], visible= test_class_i2v(model_filename), label= "Multiple Images as Texts Prompts"
4846
  )
4847
  with gr.Row(visible = not ltxv):
4848
- guidance_20.0, value=ui_defaults.get("guidance_scale",5), step=0.5, label="Guidance Scale", visible=not (hunyuan_t2v or hunyuan_i2v))
4849
- audio_guidance_20.0, value=ui_defaults.get("audio_guidance_scale",5), step=0.5, label="Audio Guidance", visible=fantasy)
4850
- embedded_guidance_20.0, value=6.0, step=0.5, label="Embedded Guidance Scale", visible=(hunyuan_t2v or hunyuan_i2v))
4851
  flow_shift = gr.Slider(0.0, 25.0, value=ui_defaults.get("flow_shift",3), step=0.1, label="Shift Scale")
4852
  with gr.Row():
4853
  negative_prompt = gr.Textbox(label="Negative Prompt", value=ui_defaults.get("negative_prompt", "") )
 
2647
  if fit_canvas :
2648
  scale1 = min(height / frame_height, width / frame_width)
2649
  scale2 = min(height / frame_width, width / frame_height)
2650
+ scale = max(scale1, scale2)
2651
  else:
2652
  scale = ((height * width ) / (frame_height * frame_width))**(1/2)
2653
 
 
2973
  current_video_length = min(int(fps * duration // 4) * 4 + 5, current_video_length)
2974
  if fantasy:
2975
  audio_proj_split, audio_context_lens = parse_audio(audio_guide, num_frames= current_video_length, fps= fps, device= processing_device )
2976
+ audio_scale = 1.0
2977
 
2978
  torch.set_grad_enabled(False)
2979
  global save_path
 
3181
  fit_into_canvas = fit_canvas == 1,
3182
  shift=flow_shift,
3183
  sampling_steps=num_inference_steps,
3184
+ guide_scale=guidance_scale,
3185
+ embedded_guidance_scale=embedded_guidance_scale,
3186
+ n_prompt=negative_prompt,
3187
  seed=seed,
3188
  callback=callback,
3189
  enable_RIFLEx = enable_RIFLEx,
 
3194
  slg_end = slg_end_perc/100,
3195
  cfg_star_switch = cfg_star_switch,
3196
  cfg_zero_step = cfg_zero_step,
3197
+ audio_cfg_scale= audio_guidance_scale,
3198
+ audio_guide=audio_guide,
3199
  audio_proj= audio_proj_split,
3200
+ audio_scale= audio_scale,
3201
+ audio_context_lens= audio_context_lens,
3202
  ar_step = model_mode, #5
3203
  causal_block_size = 5,
3204
  causal_attention = True,
 
4849
  ], visible= test_class_i2v(model_filename), label= "Multiple Images as Texts Prompts"
4850
  )
4851
  with gr.Row(visible = not ltxv):
4852
+ guidance_scale = gr.Slider(1.0, 20.0, value=ui_defaults.get("guidance_scale",5), step=0.5, label="Guidance Scale", visible=not (hunyuan_t2v or hunyuan_i2v))
4853
+ audio_guidance_scale = gr.Slider(1.0, 20.0, value=ui_defaults.get("audio_guidance_scale",5), step=0.5, label="Audio Guidance", visible=fantasy)
4854
+ embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label="Embedded Guidance Scale", visible=(hunyuan_t2v or hunyuan_i2v))
4855
  flow_shift = gr.Slider(0.0, 25.0, value=ui_defaults.get("flow_shift",3), step=0.1, label="Shift Scale")
4856
  with gr.Row():
4857
  negative_prompt = gr.Textbox(label="Negative Prompt", value=ui_defaults.get("negative_prompt", "") )