Spaces:
Runtime error
Runtime error
Commit
Β·
d461b4f
1
Parent(s):
a849e1d
use cpu
Browse files
app.py
CHANGED
|
@@ -58,10 +58,9 @@ current_model_path = current_model.path
|
|
| 58 |
|
| 59 |
auth_token = os.getenv("HUGGING_FACE_HUB_TOKEN")
|
| 60 |
|
| 61 |
-
|
| 62 |
-
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
|
| 63 |
|
| 64 |
-
|
| 65 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16, use_auth_token=auth_token)
|
| 66 |
for model in models:
|
| 67 |
try:
|
|
@@ -71,10 +70,20 @@ else: # download all models
|
|
| 71 |
except:
|
| 72 |
models.remove(model)
|
| 73 |
pipe = models[0].pipe_t2i
|
| 74 |
-
|
| 75 |
-
if torch.cuda.is_available():
|
| 76 |
pipe = pipe.to("cuda")
|
| 77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
device = "GPU π₯" if torch.cuda.is_available() else "CPU π₯Ά"
|
| 79 |
|
| 80 |
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
|
@@ -100,11 +109,8 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
|
|
| 100 |
if model_path != current_model_path or last_mode != "txt2img":
|
| 101 |
current_model_path = model_path
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
else:
|
| 106 |
-
pipe.to("cpu")
|
| 107 |
-
pipe = current_model.pipe_t2i
|
| 108 |
|
| 109 |
if torch.cuda.is_available():
|
| 110 |
pipe = pipe.to("cuda")
|
|
@@ -131,11 +137,8 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
|
|
| 131 |
if model_path != current_model_path or last_mode != "img2img":
|
| 132 |
current_model_path = model_path
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
else:
|
| 137 |
-
pipe.to("cpu")
|
| 138 |
-
pipe = current_model.pipe_i2i
|
| 139 |
|
| 140 |
if torch.cuda.is_available():
|
| 141 |
pipe = pipe.to("cuda")
|
|
|
|
| 58 |
|
| 59 |
auth_token = os.getenv("HUGGING_FACE_HUB_TOKEN")
|
| 60 |
|
| 61 |
+
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
|
|
|
| 62 |
|
| 63 |
+
if torch.cuda.is_available():
|
| 64 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16, use_auth_token=auth_token)
|
| 65 |
for model in models:
|
| 66 |
try:
|
|
|
|
| 70 |
except:
|
| 71 |
models.remove(model)
|
| 72 |
pipe = models[0].pipe_t2i
|
|
|
|
|
|
|
| 73 |
pipe = pipe.to("cuda")
|
| 74 |
|
| 75 |
+
else:
|
| 76 |
+
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", use_auth_token=auth_token)
|
| 77 |
+
for model in models:
|
| 78 |
+
try:
|
| 79 |
+
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", use_auth_token=auth_token)
|
| 80 |
+
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, scheduler=scheduler, use_auth_token=auth_token)
|
| 81 |
+
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, scheduler=scheduler, use_auth_token=auth_token)
|
| 82 |
+
except:
|
| 83 |
+
models.remove(model)
|
| 84 |
+
pipe = models[0].pipe_t2i
|
| 85 |
+
pipe = pipe.to("cpu")
|
| 86 |
+
|
| 87 |
device = "GPU π₯" if torch.cuda.is_available() else "CPU π₯Ά"
|
| 88 |
|
| 89 |
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
|
|
|
| 109 |
if model_path != current_model_path or last_mode != "txt2img":
|
| 110 |
current_model_path = model_path
|
| 111 |
|
| 112 |
+
pipe.to("cpu")
|
| 113 |
+
pipe = current_model.pipe_t2i
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
if torch.cuda.is_available():
|
| 116 |
pipe = pipe.to("cuda")
|
|
|
|
| 137 |
if model_path != current_model_path or last_mode != "img2img":
|
| 138 |
current_model_path = model_path
|
| 139 |
|
| 140 |
+
pipe.to("cpu")
|
| 141 |
+
pipe = current_model.pipe_i2i
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
if torch.cuda.is_available():
|
| 144 |
pipe = pipe.to("cuda")
|