Spaces:
Running
Running
add qwen image edit
Browse files
app.py
CHANGED
|
@@ -1227,7 +1227,7 @@ def generate_image_with_qwen(prompt: str, image_index: int = 0) -> str:
|
|
| 1227 |
return f"Error generating image: {str(e)}"
|
| 1228 |
|
| 1229 |
def generate_image_to_image(input_image_data, prompt: str) -> str:
|
| 1230 |
-
"""Generate an image using image-to-image with
|
| 1231 |
|
| 1232 |
Returns an HTML <img> tag with optimized base64 JPEG data, similar to text-to-image output.
|
| 1233 |
"""
|
|
@@ -1270,15 +1270,20 @@ def generate_image_to_image(input_image_data, prompt: str) -> str:
|
|
| 1270 |
if pil_image.mode != 'RGB':
|
| 1271 |
pil_image = pil_image.convert('RGB')
|
| 1272 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1273 |
buf = io.BytesIO()
|
| 1274 |
-
pil_image.save(buf, format='
|
| 1275 |
input_bytes = buf.getvalue()
|
| 1276 |
|
| 1277 |
# Call image-to-image
|
| 1278 |
image = client.image_to_image(
|
| 1279 |
input_bytes,
|
| 1280 |
prompt=prompt,
|
| 1281 |
-
model="
|
| 1282 |
)
|
| 1283 |
|
| 1284 |
# Resize/optimize
|
|
@@ -4824,7 +4829,7 @@ with gr.Blocks(
|
|
| 4824 |
label="๐ผ๏ธ Image to Image (uses input image)",
|
| 4825 |
value=False,
|
| 4826 |
visible=True,
|
| 4827 |
-
info="Transform your uploaded image using
|
| 4828 |
)
|
| 4829 |
image_to_video_toggle = gr.Checkbox(
|
| 4830 |
label="๐๏ธ Image to Video (uses input image)",
|
|
|
|
| 1227 |
return f"Error generating image: {str(e)}"
|
| 1228 |
|
| 1229 |
def generate_image_to_image(input_image_data, prompt: str) -> str:
|
| 1230 |
+
"""Generate an image using image-to-image with Qwen-Image-Edit via Hugging Face InferenceClient.
|
| 1231 |
|
| 1232 |
Returns an HTML <img> tag with optimized base64 JPEG data, similar to text-to-image output.
|
| 1233 |
"""
|
|
|
|
| 1270 |
if pil_image.mode != 'RGB':
|
| 1271 |
pil_image = pil_image.convert('RGB')
|
| 1272 |
|
| 1273 |
+
# Resize input image to avoid request body size limits
|
| 1274 |
+
max_input_size = 1024
|
| 1275 |
+
if pil_image.width > max_input_size or pil_image.height > max_input_size:
|
| 1276 |
+
pil_image.thumbnail((max_input_size, max_input_size), Image.Resampling.LANCZOS)
|
| 1277 |
+
|
| 1278 |
buf = io.BytesIO()
|
| 1279 |
+
pil_image.save(buf, format='JPEG', quality=85, optimize=True)
|
| 1280 |
input_bytes = buf.getvalue()
|
| 1281 |
|
| 1282 |
# Call image-to-image
|
| 1283 |
image = client.image_to_image(
|
| 1284 |
input_bytes,
|
| 1285 |
prompt=prompt,
|
| 1286 |
+
model="Qwen/Qwen-Image-Edit",
|
| 1287 |
)
|
| 1288 |
|
| 1289 |
# Resize/optimize
|
|
|
|
| 4829 |
label="๐ผ๏ธ Image to Image (uses input image)",
|
| 4830 |
value=False,
|
| 4831 |
visible=True,
|
| 4832 |
+
info="Transform your uploaded image using Qwen-Image-Edit"
|
| 4833 |
)
|
| 4834 |
image_to_video_toggle = gr.Checkbox(
|
| 4835 |
label="๐๏ธ Image to Video (uses input image)",
|