Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -133,106 +133,81 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
|
|
| 133 |
except Exception as e:
|
| 134 |
return None, error_str(e)
|
| 135 |
|
| 136 |
-
def
|
| 137 |
-
|
| 138 |
-
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
| 139 |
-
|
| 140 |
global last_mode
|
| 141 |
global pipe
|
| 142 |
global current_model_path
|
| 143 |
-
if model_path != current_model_path or last_mode != "txt2img":
|
| 144 |
-
current_model_path = model_path
|
| 145 |
|
| 146 |
-
|
|
|
|
|
|
|
| 147 |
|
|
|
|
| 148 |
if is_colab or current_model == custom_model:
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
else:
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
# pipe = pipe.to("cpu")
|
| 162 |
-
# pipe = current_model.pipe_t2i
|
| 163 |
|
| 164 |
if torch.cuda.is_available():
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
last_mode = "txt2img"
|
| 168 |
|
| 169 |
-
|
| 170 |
-
result = pipe(
|
| 171 |
-
prompt,
|
| 172 |
-
negative_prompt = neg_prompt,
|
| 173 |
-
num_images_per_prompt=n_images,
|
| 174 |
-
num_inference_steps = int(steps),
|
| 175 |
-
guidance_scale = guidance,
|
| 176 |
-
width = width,
|
| 177 |
-
height = height,
|
| 178 |
-
generator = generator,
|
| 179 |
-
callback=pipe_callback)
|
| 180 |
-
|
| 181 |
-
# update_state(f"Done. Seed: {seed}")
|
| 182 |
-
|
| 183 |
-
return replace_nsfw_images(result)
|
| 184 |
|
| 185 |
-
def
|
|
|
|
|
|
|
| 186 |
|
| 187 |
-
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
|
| 195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
|
| 197 |
-
|
| 198 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 199 |
-
current_model_path,
|
| 200 |
-
torch_dtype=torch.float16,
|
| 201 |
-
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
| 202 |
-
safety_checker=lambda images, clip_input: (images, False)
|
| 203 |
-
)
|
| 204 |
-
else:
|
| 205 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 206 |
-
current_model_path,
|
| 207 |
-
torch_dtype=torch.float16,
|
| 208 |
-
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
| 209 |
-
)
|
| 210 |
-
# pipe = pipe.to("cpu")
|
| 211 |
-
# pipe = current_model.pipe_i2i
|
| 212 |
-
|
| 213 |
-
if torch.cuda.is_available():
|
| 214 |
-
pipe = pipe.to("cuda")
|
| 215 |
-
pipe.enable_xformers_memory_efficient_attention()
|
| 216 |
-
last_mode = "img2img"
|
| 217 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
prompt = current_model.prefix + prompt
|
| 219 |
-
|
| 220 |
-
|
| 221 |
result = pipe(
|
| 222 |
prompt,
|
| 223 |
-
negative_prompt
|
| 224 |
num_images_per_prompt=n_images,
|
| 225 |
-
image
|
| 226 |
-
num_inference_steps
|
| 227 |
-
strength
|
| 228 |
-
guidance_scale
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
# update_state(f"Done. Seed: {seed}")
|
| 235 |
-
|
| 236 |
return replace_nsfw_images(result)
|
| 237 |
|
| 238 |
def replace_nsfw_images(results):
|
|
|
|
| 133 |
except Exception as e:
|
| 134 |
return None, error_str(e)
|
| 135 |
|
| 136 |
+
def load_model(model_path, mode):
|
|
|
|
|
|
|
|
|
|
| 137 |
global last_mode
|
| 138 |
global pipe
|
| 139 |
global current_model_path
|
|
|
|
|
|
|
| 140 |
|
| 141 |
+
if model_path != current_model_path or last_mode != mode:
|
| 142 |
+
current_model_path = model_path
|
| 143 |
+
update_state(f"Loading {current_model.name} {mode} model...")
|
| 144 |
|
| 145 |
+
model_class = StableDiffusionPipeline if mode == "txt2img" else StableDiffusionImg2ImgPipeline
|
| 146 |
if is_colab or current_model == custom_model:
|
| 147 |
+
pipe = model_class.from_pretrained(
|
| 148 |
+
current_model_path,
|
| 149 |
+
torch_dtype=torch.float16,
|
| 150 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
| 151 |
+
safety_checker=lambda images, clip_input: (images, False)
|
| 152 |
+
)
|
| 153 |
else:
|
| 154 |
+
pipe = model_class.from_pretrained(
|
| 155 |
+
current_model_path,
|
| 156 |
+
torch_dtype=torch.float16,
|
| 157 |
+
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
| 158 |
+
)
|
|
|
|
|
|
|
| 159 |
|
| 160 |
if torch.cuda.is_available():
|
| 161 |
+
pipe = pipe.to("cuda")
|
| 162 |
+
pipe.enable_xformers_memory_efficient_attention()
|
|
|
|
| 163 |
|
| 164 |
+
last_mode = mode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
+
def process_image(img, width, height):
|
| 167 |
+
ratio = min(height / img.height, width / img.width)
|
| 168 |
+
return img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
|
| 169 |
|
|
|
|
| 170 |
|
| 171 |
+
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
|
| 172 |
+
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
| 173 |
+
|
| 174 |
+
load_model(model_path, "txt2img")
|
| 175 |
+
prompt = current_model.prefix + prompt
|
| 176 |
|
| 177 |
+
result = pipe(
|
| 178 |
+
prompt,
|
| 179 |
+
negative_prompt=neg_prompt,
|
| 180 |
+
num_images_per_prompt=n_images,
|
| 181 |
+
num_inference_steps=int(steps),
|
| 182 |
+
guidance_scale=guidance,
|
| 183 |
+
width=width,
|
| 184 |
+
height=height,
|
| 185 |
+
generator=generator,
|
| 186 |
+
callback=pipe_callback
|
| 187 |
+
)
|
| 188 |
|
| 189 |
+
return replace_nsfw_images(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
+
|
| 192 |
+
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
|
| 193 |
+
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
| 194 |
+
|
| 195 |
+
load_model(model_path, "img2img")
|
| 196 |
prompt = current_model.prefix + prompt
|
| 197 |
+
img = process_image(img, width, height)
|
| 198 |
+
|
| 199 |
result = pipe(
|
| 200 |
prompt,
|
| 201 |
+
negative_prompt=neg_prompt,
|
| 202 |
num_images_per_prompt=n_images,
|
| 203 |
+
image=img,
|
| 204 |
+
num_inference_steps=int(steps),
|
| 205 |
+
strength=strength,
|
| 206 |
+
guidance_scale=guidance,
|
| 207 |
+
generator=generator,
|
| 208 |
+
callback=pipe_callback
|
| 209 |
+
)
|
| 210 |
+
|
|
|
|
|
|
|
|
|
|
| 211 |
return replace_nsfw_images(result)
|
| 212 |
|
| 213 |
def replace_nsfw_images(results):
|