HAL1993 commited on
Commit
787ebcf
·
verified ·
1 Parent(s): 44598c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -57
app.py CHANGED
@@ -15,9 +15,6 @@ from optimization import optimize_pipeline_
15
  from PIL import Image
16
  import gradio as gr
17
 
18
- # ----------------------------------------------------------------------
19
- # Logging configuration
20
- # ----------------------------------------------------------------------
21
  logging.basicConfig(
22
  level=logging.INFO,
23
  filename="qwen_image_editor.log",
@@ -26,9 +23,6 @@ logging.basicConfig(
26
  )
27
  logger = logging.getLogger(__name__)
28
 
29
- # ----------------------------------------------------------------------
30
- # Translation (GPU‑accelerated)
31
- # ----------------------------------------------------------------------
32
  @spaces.GPU
33
  def translate_albanian_to_english(text: str, language: str = "en"):
34
  """Translate Albanian text to English using an external HF Space."""
@@ -52,13 +46,9 @@ def translate_albanian_to_english(text: str, language: str = "en"):
52
  raise gr.Error("Translation failed. Please try again.")
53
  raise gr.Error("Translation failed. Please try again.")
54
 
55
- # ----------------------------------------------------------------------
56
- # Model loading & preparation
57
- # ----------------------------------------------------------------------
58
  dtype = torch.bfloat16
59
  device = "cuda" if torch.cuda.is_available() else "cpu"
60
 
61
- # Lightning‑optimized scheduler configuration
62
  scheduler_cfg = {
63
  "base_image_seq_len": 256,
64
  "base_shift": math.log(3),
@@ -77,25 +67,21 @@ scheduler_cfg = {
77
  }
78
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_cfg)
79
 
80
- # Load Qwen‑Image‑Edit pipeline
81
  pipe = QwenImageEditPlusPipeline.from_pretrained(
82
  "Qwen/Qwen-Image-Edit-2509",
83
  scheduler=scheduler,
84
  torch_dtype=dtype,
85
  ).to(device)
86
 
87
- # Load + fuse Lightning LoRA
88
  pipe.load_lora_weights(
89
  "lightx2v/Qwen-Image-Lightning",
90
  weight_name="Qwen-Image-Lightning-4steps-V2.0.safetensors",
91
  )
92
  pipe.fuse_lora()
93
 
94
- # Replace transformer class & set custom attention processor
95
  pipe.transformer.__class__ = QwenImageTransformer2DModel
96
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
97
 
98
- # Ahead‑of‑time compilation / optimization
99
  optimize_pipeline_(
100
  pipe,
101
  image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))],
@@ -105,22 +91,15 @@ optimize_pipeline_(
105
  MAX_SEED = np.iinfo(np.int32).max
106
  QUALITY_PROMPT = ", high quality, detailed, vibrant, professional lighting"
107
 
108
- # ----------------------------------------------------------------------
109
- # Inference (GPU‑accelerated, duration hint for Spaces)
110
- # ----------------------------------------------------------------------
111
  @spaces.GPU(duration=40)
112
  def infer(image, prompt):
113
- """
114
- Generate an edited image from the input image and prompt.
115
- """
116
  negative_prompt = ""
117
  seed = random.randint(0, MAX_SEED)
118
  generator = torch.Generator(device=device).manual_seed(seed)
119
 
120
- # Translate prompt + quality suffix
121
  prompt_en = translate_albanian_to_english(prompt.strip(), language="en") + QUALITY_PROMPT
122
 
123
- # Load input image as PIL.Image (if any)
124
  pil_img = None
125
  if image is not None:
126
  if isinstance(image, Image.Image):
@@ -130,7 +109,6 @@ def infer(image, prompt):
130
  elif hasattr(image, "name"):
131
  pil_img = Image.open(image.name).convert("RGB")
132
 
133
- # Run pipeline
134
  output = pipe(
135
  image=[pil_img] if pil_img is not None else None,
136
  prompt=prompt_en,
@@ -145,14 +123,8 @@ def infer(image, prompt):
145
 
146
  return output[0] if output else None
147
 
148
- # ----------------------------------------------------------------------
149
- # Gradio UI
150
- # ----------------------------------------------------------------------
151
  def create_demo():
152
  with gr.Blocks(css="", title="Qwen Image Editor") as demo:
153
- # --------------------------------------------------------------
154
- # Custom HTML + full CSS (including top gap) + JS
155
- # --------------------------------------------------------------
156
  gr.HTML(
157
  """
158
  <style>
@@ -261,7 +233,6 @@ def create_demo():
261
  box-sizing:border-box !important;
262
  display:block !important;
263
  }
264
- /* Hide upload / toolbar elements */
265
  .image-container[aria-label="Input Image"] .file-upload,
266
  .image-container[aria-label="Input Image"] .file-preview,
267
  .image-container[aria-label="Input Image"] .image-actions,
@@ -288,7 +259,6 @@ def create_demo():
288
  div[aria-label="Result Image"] .gr-button{
289
  display:none !important;
290
  }
291
- /* Processing overlay for Result Image */
292
  .image-container[aria-label="Result Image"].processing{
293
  background:#000000 !important;
294
  position:relative !important;
@@ -393,22 +363,14 @@ def create_demo():
393
  }
394
  </style>
395
  <script>
396
- // --------------------------------------------------------------
397
- // Enforce loading only under /spaceishere (or any sub‑path)
398
- // --------------------------------------------------------------
399
  const allowedPath = /^\\/spaceishere(\\/.*)?$/;
400
  if (!allowedPath.test(window.location.pathname)) {
401
  document.body.innerHTML = '<h1 style="color:#ef4444;font-family:sans-serif;text-align:center;margin-top:100px;">500 Internal Server Error</h1>';
402
  throw new Error('500');
403
  }
404
-
405
- // --------------------------------------------------------------
406
- // UI behaviour: processing overlay & hide progress elements
407
- // --------------------------------------------------------------
408
  document.addEventListener('DOMContentLoaded', () => {
409
  const generateBtn = document.querySelector('.gr-button-primary');
410
  const resultContainer = document.querySelector('.image-container[aria-label="Result Image"]');
411
-
412
  if (generateBtn && resultContainer) {
413
  generateBtn.addEventListener('click', () => {
414
  resultContainer.classList.add('processing');
@@ -416,7 +378,6 @@ def create_demo():
416
  if (child.tagName !== 'IMG') child.style.display = 'none';
417
  });
418
  });
419
-
420
  const imgObserver = new MutationObserver(muts => {
421
  muts.forEach(m => {
422
  m.addedNodes.forEach(node => {
@@ -429,8 +390,6 @@ def create_demo():
429
  });
430
  imgObserver.observe(resultContainer, { childList: true, subtree: true });
431
  }
432
-
433
- // Periodically purge any stray progress elements
434
  setInterval(() => {
435
  document.querySelectorAll('.progress-text,.gr-progress,[class*="progress"]').forEach(el => el.remove());
436
  }, 500);
@@ -438,10 +397,6 @@ def create_demo():
438
  </script>
439
  """
440
  )
441
-
442
- # --------------------------------------------------------------
443
- # Layout
444
- # --------------------------------------------------------------
445
  with gr.Row(elem_id="general_items"):
446
  gr.Markdown("# Image Edit")
447
  gr.Markdown("Edit your images with prompt descriptions", elem_id="subtitle")
@@ -473,18 +428,10 @@ def create_demo():
473
  show_share_button=False,
474
  elem_classes=["gradio-component", "image-container"],
475
  )
476
-
477
- # --------------------------------------------------------------
478
- # Event bindings
479
- # --------------------------------------------------------------
480
  run_button.click(fn=infer, inputs=[input_image, prompt], outputs=[result_image])
481
  prompt.submit(fn=infer, inputs=[input_image, prompt], outputs=[result_image])
482
-
483
  return demo
484
 
485
- # ----------------------------------------------------------------------
486
- # FastAPI app with strict routing (only /spaceishere allowed)
487
- # ----------------------------------------------------------------------
488
  app = FastAPI()
489
  demo = create_demo()
490
  app.mount("/spaceishere", demo.app)
@@ -493,9 +440,6 @@ app.mount("/spaceishere", demo.app)
493
  async def catch_all(path: str):
494
  raise HTTPException(status_code=500, detail="Internal Server Error")
495
 
496
- # ----------------------------------------------------------------------
497
- # Entry point
498
- # ----------------------------------------------------------------------
499
  if __name__ == "__main__":
500
  logger.info(f"Gradio version: {gr.__version__}")
501
  demo.queue().launch(share=True)
 
15
  from PIL import Image
16
  import gradio as gr
17
 
 
 
 
18
  logging.basicConfig(
19
  level=logging.INFO,
20
  filename="qwen_image_editor.log",
 
23
  )
24
  logger = logging.getLogger(__name__)
25
 
 
 
 
26
  @spaces.GPU
27
  def translate_albanian_to_english(text: str, language: str = "en"):
28
  """Translate Albanian text to English using an external HF Space."""
 
46
  raise gr.Error("Translation failed. Please try again.")
47
  raise gr.Error("Translation failed. Please try again.")
48
 
 
 
 
49
  dtype = torch.bfloat16
50
  device = "cuda" if torch.cuda.is_available() else "cpu"
51
 
 
52
  scheduler_cfg = {
53
  "base_image_seq_len": 256,
54
  "base_shift": math.log(3),
 
67
  }
68
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_cfg)
69
 
 
70
  pipe = QwenImageEditPlusPipeline.from_pretrained(
71
  "Qwen/Qwen-Image-Edit-2509",
72
  scheduler=scheduler,
73
  torch_dtype=dtype,
74
  ).to(device)
75
 
 
76
  pipe.load_lora_weights(
77
  "lightx2v/Qwen-Image-Lightning",
78
  weight_name="Qwen-Image-Lightning-4steps-V2.0.safetensors",
79
  )
80
  pipe.fuse_lora()
81
 
 
82
  pipe.transformer.__class__ = QwenImageTransformer2DModel
83
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
84
 
 
85
  optimize_pipeline_(
86
  pipe,
87
  image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))],
 
91
  MAX_SEED = np.iinfo(np.int32).max
92
  QUALITY_PROMPT = ", high quality, detailed, vibrant, professional lighting"
93
 
 
 
 
94
  @spaces.GPU(duration=40)
95
  def infer(image, prompt):
96
+ """Generate an edited image from the input image and prompt."""
 
 
97
  negative_prompt = ""
98
  seed = random.randint(0, MAX_SEED)
99
  generator = torch.Generator(device=device).manual_seed(seed)
100
 
 
101
  prompt_en = translate_albanian_to_english(prompt.strip(), language="en") + QUALITY_PROMPT
102
 
 
103
  pil_img = None
104
  if image is not None:
105
  if isinstance(image, Image.Image):
 
109
  elif hasattr(image, "name"):
110
  pil_img = Image.open(image.name).convert("RGB")
111
 
 
112
  output = pipe(
113
  image=[pil_img] if pil_img is not None else None,
114
  prompt=prompt_en,
 
123
 
124
  return output[0] if output else None
125
 
 
 
 
126
  def create_demo():
127
  with gr.Blocks(css="", title="Qwen Image Editor") as demo:
 
 
 
128
  gr.HTML(
129
  """
130
  <style>
 
233
  box-sizing:border-box !important;
234
  display:block !important;
235
  }
 
236
  .image-container[aria-label="Input Image"] .file-upload,
237
  .image-container[aria-label="Input Image"] .file-preview,
238
  .image-container[aria-label="Input Image"] .image-actions,
 
259
  div[aria-label="Result Image"] .gr-button{
260
  display:none !important;
261
  }
 
262
  .image-container[aria-label="Result Image"].processing{
263
  background:#000000 !important;
264
  position:relative !important;
 
363
  }
364
  </style>
365
  <script>
 
 
 
366
  const allowedPath = /^\\/spaceishere(\\/.*)?$/;
367
  if (!allowedPath.test(window.location.pathname)) {
368
  document.body.innerHTML = '<h1 style="color:#ef4444;font-family:sans-serif;text-align:center;margin-top:100px;">500 Internal Server Error</h1>';
369
  throw new Error('500');
370
  }
 
 
 
 
371
  document.addEventListener('DOMContentLoaded', () => {
372
  const generateBtn = document.querySelector('.gr-button-primary');
373
  const resultContainer = document.querySelector('.image-container[aria-label="Result Image"]');
 
374
  if (generateBtn && resultContainer) {
375
  generateBtn.addEventListener('click', () => {
376
  resultContainer.classList.add('processing');
 
378
  if (child.tagName !== 'IMG') child.style.display = 'none';
379
  });
380
  });
 
381
  const imgObserver = new MutationObserver(muts => {
382
  muts.forEach(m => {
383
  m.addedNodes.forEach(node => {
 
390
  });
391
  imgObserver.observe(resultContainer, { childList: true, subtree: true });
392
  }
 
 
393
  setInterval(() => {
394
  document.querySelectorAll('.progress-text,.gr-progress,[class*="progress"]').forEach(el => el.remove());
395
  }, 500);
 
397
  </script>
398
  """
399
  )
 
 
 
 
400
  with gr.Row(elem_id="general_items"):
401
  gr.Markdown("# Image Edit")
402
  gr.Markdown("Edit your images with prompt descriptions", elem_id="subtitle")
 
428
  show_share_button=False,
429
  elem_classes=["gradio-component", "image-container"],
430
  )
 
 
 
 
431
  run_button.click(fn=infer, inputs=[input_image, prompt], outputs=[result_image])
432
  prompt.submit(fn=infer, inputs=[input_image, prompt], outputs=[result_image])
 
433
  return demo
434
 
 
 
 
435
  app = FastAPI()
436
  demo = create_demo()
437
  app.mount("/spaceishere", demo.app)
 
440
  async def catch_all(path: str):
441
  raise HTTPException(status_code=500, detail="Internal Server Error")
442
 
 
 
 
443
  if __name__ == "__main__":
444
  logger.info(f"Gradio version: {gr.__version__}")
445
  demo.queue().launch(share=True)