Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,14 +6,11 @@ import gradio as gr
|
|
| 6 |
import numpy as np
|
| 7 |
import torch
|
| 8 |
from PIL import Image
|
| 9 |
-
from diffusers import
|
| 10 |
from huggingface_hub import hf_hub_download, InferenceClient
|
| 11 |
|
| 12 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 13 |
|
| 14 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
|
| 15 |
-
refiner.to("cuda")
|
| 16 |
-
|
| 17 |
pipe_fast = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, vae=vae, use_safetensors=True)
|
| 18 |
pipe_fast.to("cuda")
|
| 19 |
|
|
@@ -56,6 +53,7 @@ def promptifier(prompt):
|
|
| 56 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
| 57 |
|
| 58 |
client_image = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
|
|
|
| 59 |
# Generator
|
| 60 |
@spaces.GPU(duration=60, queue=False)
|
| 61 |
def king(type ,
|
|
@@ -82,14 +80,13 @@ def king(type ,
|
|
| 82 |
width = input_image.width, height = input_image.height,
|
| 83 |
num_inference_steps=steps, generator=generator, output_type="latent",
|
| 84 |
).images
|
| 85 |
-
refine = refiner(
|
| 86 |
prompt=f"{instruction}, 4k, hd, high quality, masterpiece",
|
| 87 |
negative_prompt = negative_prompt,
|
| 88 |
guidance_scale=7.5,
|
| 89 |
-
num_inference_steps=steps,
|
| 90 |
-
image=output_image
|
| 91 |
-
|
| 92 |
-
).images[0]
|
| 93 |
return seed, refine
|
| 94 |
else :
|
| 95 |
if randomize_seed:
|
|
@@ -113,12 +110,10 @@ def king(type ,
|
|
| 113 |
guidance_scale = guidance_scale,
|
| 114 |
num_inference_steps = steps,
|
| 115 |
width = width, height = height )
|
| 116 |
-
refine = refiner(
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
image=image, generator=generator,
|
| 121 |
-
).images[0]
|
| 122 |
return seed, refine
|
| 123 |
|
| 124 |
client = InferenceClient()
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
import torch
|
| 8 |
from PIL import Image
|
| 9 |
+
from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
|
| 10 |
from huggingface_hub import hf_hub_download, InferenceClient
|
| 11 |
|
| 12 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 13 |
|
|
|
|
|
|
|
|
|
|
| 14 |
pipe_fast = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, vae=vae, use_safetensors=True)
|
| 15 |
pipe_fast.to("cuda")
|
| 16 |
|
|
|
|
| 53 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
| 54 |
|
| 55 |
client_image = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
| 56 |
+
refiner = InferenceClient("stabilityai/stable-diffusion-xl-refiner-1.0")
|
| 57 |
# Generator
|
| 58 |
@spaces.GPU(duration=60, queue=False)
|
| 59 |
def king(type ,
|
|
|
|
| 80 |
width = input_image.width, height = input_image.height,
|
| 81 |
num_inference_steps=steps, generator=generator, output_type="latent",
|
| 82 |
).images
|
| 83 |
+
refine = refiner.image_to_image(
|
| 84 |
prompt=f"{instruction}, 4k, hd, high quality, masterpiece",
|
| 85 |
negative_prompt = negative_prompt,
|
| 86 |
guidance_scale=7.5,
|
| 87 |
+
num_inference_steps=int(steps/3),
|
| 88 |
+
image=output_image
|
| 89 |
+
)
|
|
|
|
| 90 |
return seed, refine
|
| 91 |
else :
|
| 92 |
if randomize_seed:
|
|
|
|
| 110 |
guidance_scale = guidance_scale,
|
| 111 |
num_inference_steps = steps,
|
| 112 |
width = width, height = height )
|
| 113 |
+
refine = refiner.image_to_image(
|
| 114 |
+
prompt=f"{instruction}, 4k, hd, high quality, masterpiece",
|
| 115 |
+
negative_prompt = negative_prompt, guidance_scale = 7.5,
|
| 116 |
+
num_inference_steps= int(steps/3), image=image )
|
|
|
|
|
|
|
| 117 |
return seed, refine
|
| 118 |
|
| 119 |
client = InferenceClient()
|