multimodalart HF Staff commited on
Commit
6a3ec9b
·
verified ·
1 Parent(s): 97a289e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -46,14 +46,11 @@ scheduler_config = {
46
 
47
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
48
 
49
- pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype).to(device)
50
 
51
  # Load the relight LoRA
52
- pipe.load_lora_weights(
53
- "dx8152/Qwen-Image-Edit-2509-Relight",
54
- weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight"
55
- )
56
-
57
  pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
58
  weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", adapter_name="lightning")
59
  pipe.set_adapters(["relight", "lightning"], adapter_weights=[1., 1.])
@@ -63,12 +60,12 @@ pipe.unload_lora_weights()
63
  pipe.transformer.__class__ = QwenImageTransformer2DModel
64
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
65
 
66
- optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
67
 
 
68
 
69
  MAX_SEED = np.iinfo(np.int32).max
70
 
71
- # Initialize translation client
72
  translation_client = InferenceClient(
73
  api_key=os.environ.get("HF_TOKEN"),
74
  )
 
46
 
47
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
48
 
49
+ pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype)
50
 
51
  # Load the relight LoRA
52
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
53
+ weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight")
 
 
 
54
  pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
55
  weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", adapter_name="lightning")
56
  pipe.set_adapters(["relight", "lightning"], adapter_weights=[1., 1.])
 
60
  pipe.transformer.__class__ = QwenImageTransformer2DModel
61
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
62
 
63
+ pipe.to(device)
64
 
65
+ optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
66
 
67
  MAX_SEED = np.iinfo(np.int32).max
68
 
 
69
  translation_client = InferenceClient(
70
  api_key=os.environ.get("HF_TOKEN"),
71
  )