ovi054 commited on
Commit
d464be0
·
verified ·
1 Parent(s): f6c78a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -2
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import torch
2
  from diffusers import UniPCMultistepScheduler
3
  from diffusers import WanPipeline, AutoencoderKLWan
 
4
  from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
5
  from huggingface_hub import hf_hub_download
6
  from PIL import Image
@@ -13,7 +14,20 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
  # --- MODEL SETUP ---
14
  model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
15
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
16
- pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  flow_shift = 2.0
18
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
19
  pipe.to(device)
@@ -146,7 +160,8 @@ def generate(prompt, negative_prompt, width, height, num_inference_steps, option
146
  num_frames=1,
147
  num_inference_steps=num_inference_steps,
148
  guidance_scale=float(guidance_scale),
149
- # guidance_scale_2=float(guidance_scale_2),
 
150
  )
151
  image = output.frames[0][0]
152
  image = (image * 255).astype(np.uint8)
 
1
  import torch
2
  from diffusers import UniPCMultistepScheduler
3
  from diffusers import WanPipeline, AutoencoderKLWan
4
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
  from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
6
  from huggingface_hub import hf_hub_download
7
  from PIL import Image
 
14
  # --- MODEL SETUP ---
15
  model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
16
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
17
+ transformer=WanTransformer3DModel.from_pretrained(model_id,
18
+ subfolder='transformer',
19
+ torch_dtype=torch.bfloat16,
20
+ device_map='cuda',
21
+ )
22
+ transformer_2=WanTransformer3DModel.from_pretrained(model_id,
23
+ subfolder='transformer_2',
24
+ torch_dtype=torch.bfloat16,
25
+ device_map='cuda',
26
+ )
27
+ pipe = WanPipeline.from_pretrained(model_id,
28
+ transformer=transformer,
29
+ transformer_2 = transformer_2,
30
+ vae=vae, torch_dtype=torch.bfloat16)
31
  flow_shift = 2.0
32
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
33
  pipe.to(device)
 
160
  num_frames=1,
161
  num_inference_steps=num_inference_steps,
162
  guidance_scale=float(guidance_scale),
163
+ guidance_scale_2=float(guidance_scale_2),
164
+ boundary_ratio=0.3,
165
  )
166
  image = output.frames[0][0]
167
  image = (image * 255).astype(np.uint8)