Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -127,82 +127,113 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
|
|
| 127 |
|
| 128 |
try:
|
| 129 |
if img is not None:
|
| 130 |
-
return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator), f"Done. Seed: {seed}"
|
| 131 |
else:
|
| 132 |
-
return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator), f"Done. Seed: {seed}"
|
| 133 |
except Exception as e:
|
| 134 |
return None, error_str(e)
|
| 135 |
|
| 136 |
-
def
|
|
|
|
|
|
|
|
|
|
| 137 |
global last_mode
|
| 138 |
global pipe
|
| 139 |
global current_model_path
|
| 140 |
-
|
| 141 |
-
if model_path != current_model_path or last_mode != mode:
|
| 142 |
current_model_path = model_path
|
| 143 |
-
update_state(f"Loading {current_model.name} {mode} model...")
|
| 144 |
|
| 145 |
-
|
|
|
|
| 146 |
if is_colab or current_model == custom_model:
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
else:
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
| 159 |
|
| 160 |
if torch.cuda.is_available():
|
| 161 |
-
|
| 162 |
-
|
|
|
|
| 163 |
|
| 164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
-
def
|
| 167 |
-
ratio = min(height / img.height, width / img.width)
|
| 168 |
-
return img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
|
| 169 |
|
|
|
|
| 170 |
|
| 171 |
-
|
| 172 |
-
print(f"{datetime.datetime.now()} {mode}, model: {model_path}")
|
| 173 |
global pipe
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
prompt = current_model.prefix + prompt
|
|
|
|
|
|
|
| 176 |
result = pipe(
|
| 177 |
prompt,
|
| 178 |
-
|
| 179 |
-
**kwargs,
|
| 180 |
-
callback=pipe_callback
|
| 181 |
-
)
|
| 182 |
-
return replace_nsfw_images(result)
|
| 183 |
-
|
| 184 |
-
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator):
|
| 185 |
-
return inference_image(model_path, prompt, "txt2img",
|
| 186 |
-
negative_prompt=neg_prompt,
|
| 187 |
-
num_images_per_prompt=n_images,
|
| 188 |
-
num_inference_steps=int(steps),
|
| 189 |
-
guidance_scale=guidance,
|
| 190 |
-
width=width,
|
| 191 |
-
height=height,
|
| 192 |
-
generator=generator
|
| 193 |
-
)
|
| 194 |
-
|
| 195 |
-
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
| 196 |
-
img = process_image(img, width, height)
|
| 197 |
-
return inference_image(model_path, prompt, "img2img",
|
| 198 |
-
negative_prompt=neg_prompt,
|
| 199 |
num_images_per_prompt=n_images,
|
| 200 |
-
image=img,
|
| 201 |
-
num_inference_steps=int(steps),
|
| 202 |
-
strength=strength,
|
| 203 |
-
guidance_scale=guidance,
|
| 204 |
-
|
| 205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
def replace_nsfw_images(results):
|
| 208 |
|
|
|
|
| 127 |
|
| 128 |
try:
|
| 129 |
if img is not None:
|
| 130 |
+
return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
|
| 131 |
else:
|
| 132 |
+
return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
|
| 133 |
except Exception as e:
|
| 134 |
return None, error_str(e)
|
| 135 |
|
| 136 |
+
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
|
| 137 |
+
|
| 138 |
+
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
| 139 |
+
|
| 140 |
global last_mode
|
| 141 |
global pipe
|
| 142 |
global current_model_path
|
| 143 |
+
if model_path != current_model_path or last_mode != "txt2img":
|
|
|
|
| 144 |
current_model_path = model_path
|
|
|
|
| 145 |
|
| 146 |
+
update_state(f"Loading {current_model.name} text-to-image model...")
|
| 147 |
+
|
| 148 |
if is_colab or current_model == custom_model:
|
| 149 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 150 |
+
current_model_path,
|
| 151 |
+
torch_dtype=torch.float16,
|
| 152 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
| 153 |
+
safety_checker=lambda images, clip_input: (images, False)
|
| 154 |
+
)
|
| 155 |
else:
|
| 156 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 157 |
+
current_model_path,
|
| 158 |
+
torch_dtype=torch.float16,
|
| 159 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
| 160 |
+
)
|
| 161 |
+
# pipe = pipe.to("cpu")
|
| 162 |
+
# pipe = current_model.pipe_t2i
|
| 163 |
|
| 164 |
if torch.cuda.is_available():
|
| 165 |
+
pipe = pipe.to("cuda")
|
| 166 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 167 |
+
last_mode = "txt2img"
|
| 168 |
|
| 169 |
+
prompt = current_model.prefix + prompt
|
| 170 |
+
result = pipe(
|
| 171 |
+
prompt,
|
| 172 |
+
negative_prompt = neg_prompt,
|
| 173 |
+
num_images_per_prompt=n_images,
|
| 174 |
+
num_inference_steps = int(steps),
|
| 175 |
+
guidance_scale = guidance,
|
| 176 |
+
width = width,
|
| 177 |
+
height = height,
|
| 178 |
+
generator = generator,
|
| 179 |
+
callback=pipe_callback)
|
| 180 |
+
|
| 181 |
+
# update_state(f"Done. Seed: {seed}")
|
| 182 |
+
|
| 183 |
+
return replace_nsfw_images(result)
|
| 184 |
|
| 185 |
+
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
|
|
|
|
|
|
|
| 186 |
|
| 187 |
+
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
| 188 |
|
| 189 |
+
global last_mode
|
|
|
|
| 190 |
global pipe
|
| 191 |
+
global current_model_path
|
| 192 |
+
if model_path != current_model_path or last_mode != "img2img":
|
| 193 |
+
current_model_path = model_path
|
| 194 |
+
|
| 195 |
+
update_state(f"Loading {current_model.name} image-to-image model...")
|
| 196 |
+
|
| 197 |
+
if is_colab or current_model == custom_model:
|
| 198 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 199 |
+
current_model_path,
|
| 200 |
+
torch_dtype=torch.float16,
|
| 201 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
| 202 |
+
safety_checker=lambda images, clip_input: (images, False)
|
| 203 |
+
)
|
| 204 |
+
else:
|
| 205 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 206 |
+
current_model_path,
|
| 207 |
+
torch_dtype=torch.float16,
|
| 208 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
| 209 |
+
)
|
| 210 |
+
# pipe = pipe.to("cpu")
|
| 211 |
+
# pipe = current_model.pipe_i2i
|
| 212 |
+
|
| 213 |
+
if torch.cuda.is_available():
|
| 214 |
+
pipe = pipe.to("cuda")
|
| 215 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 216 |
+
last_mode = "img2img"
|
| 217 |
+
|
| 218 |
prompt = current_model.prefix + prompt
|
| 219 |
+
ratio = min(height / img.height, width / img.width)
|
| 220 |
+
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
|
| 221 |
result = pipe(
|
| 222 |
prompt,
|
| 223 |
+
negative_prompt = neg_prompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
num_images_per_prompt=n_images,
|
| 225 |
+
image = img,
|
| 226 |
+
num_inference_steps = int(steps),
|
| 227 |
+
strength = strength,
|
| 228 |
+
guidance_scale = guidance,
|
| 229 |
+
# width = width,
|
| 230 |
+
# height = height,
|
| 231 |
+
generator = generator,
|
| 232 |
+
callback=pipe_callback)
|
| 233 |
+
|
| 234 |
+
# update_state(f"Done. Seed: {seed}")
|
| 235 |
+
|
| 236 |
+
return replace_nsfw_images(result)
|
| 237 |
|
| 238 |
def replace_nsfw_images(results):
|
| 239 |
|