Spaces:
Runtime error
Runtime error
eliphatfs
commited on
Commit
·
cbdb77e
1
Parent(s):
e628f3f
Thread safety.
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import sys
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
from huggingface_hub import HfFolder, snapshot_download
|
| 4 |
|
|
@@ -21,12 +22,16 @@ import transformers
|
|
| 21 |
from PIL import Image
|
| 22 |
|
| 23 |
@st.cache_resource
|
| 24 |
-
def load_openshape(name):
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
@st.cache_resource
|
| 29 |
def load_openclip():
|
|
|
|
| 30 |
return transformers.CLIPModel.from_pretrained(
|
| 31 |
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
| 32 |
low_cpu_mem_usage=True, torch_dtype=half,
|
|
@@ -38,7 +43,7 @@ f32 = numpy.float32
|
|
| 38 |
half = torch.float16 if torch.cuda.is_available() else torch.bfloat16
|
| 39 |
# clip_model, clip_prep = None, None
|
| 40 |
clip_model, clip_prep = load_openclip()
|
| 41 |
-
model_b32 = load_openshape('openshape-pointbert-vitb32-rgb')
|
| 42 |
model_l14 = load_openshape('openshape-pointbert-vitl14-rgb')
|
| 43 |
model_g14 = load_openshape('openshape-pointbert-vitg14-rgb')
|
| 44 |
torch.set_grad_enabled(False)
|
|
@@ -187,17 +192,19 @@ def demo_pc2img():
|
|
| 187 |
col2 = misc_utils.render_pc(pc)
|
| 188 |
prog.progress(0.49, "Running Generation")
|
| 189 |
if torch.cuda.is_available():
|
| 190 |
-
|
|
|
|
| 191 |
img = sd_pc2img.pc_to_image(
|
| 192 |
model_l14, pc, prompt, noise_scale, width, height, cfg_scale, steps,
|
| 193 |
lambda i, t, _: prog.progress(0.49 + i / (steps + 1) / 2, "Running Diffusion Step %d" % i)
|
| 194 |
)
|
| 195 |
if torch.cuda.is_available():
|
| 196 |
-
|
|
|
|
| 197 |
with col2:
|
| 198 |
st.image(img)
|
| 199 |
prog.progress(1.0, "Idle")
|
| 200 |
-
if image_examples(samples_index.sd, 3):
|
| 201 |
queue_auto_submit("sdauto")
|
| 202 |
|
| 203 |
|
|
@@ -285,7 +292,8 @@ def demo_retrieval():
|
|
| 285 |
|
| 286 |
try:
|
| 287 |
if torch.cuda.is_available():
|
| 288 |
-
|
|
|
|
| 289 |
with tab_cls:
|
| 290 |
demo_classification()
|
| 291 |
with tab_cap:
|
|
|
|
| 1 |
import sys
|
| 2 |
+
import threading
|
| 3 |
import streamlit as st
|
| 4 |
from huggingface_hub import HfFolder, snapshot_download
|
| 5 |
|
|
|
|
| 22 |
from PIL import Image
|
| 23 |
|
| 24 |
@st.cache_resource
|
| 25 |
+
def load_openshape(name, to_cpu=False):
|
| 26 |
+
pce = openshape.load_pc_encoder(name)
|
| 27 |
+
if to_cpu:
|
| 28 |
+
pce = pce.cpu()
|
| 29 |
+
return pce
|
| 30 |
|
| 31 |
|
| 32 |
@st.cache_resource
|
| 33 |
def load_openclip():
|
| 34 |
+
sys.clip_move_lock = threading.Lock()
|
| 35 |
return transformers.CLIPModel.from_pretrained(
|
| 36 |
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
| 37 |
low_cpu_mem_usage=True, torch_dtype=half,
|
|
|
|
| 43 |
half = torch.float16 if torch.cuda.is_available() else torch.bfloat16
|
| 44 |
# clip_model, clip_prep = None, None
|
| 45 |
clip_model, clip_prep = load_openclip()
|
| 46 |
+
model_b32 = load_openshape('openshape-pointbert-vitb32-rgb', True)
|
| 47 |
model_l14 = load_openshape('openshape-pointbert-vitl14-rgb')
|
| 48 |
model_g14 = load_openshape('openshape-pointbert-vitg14-rgb')
|
| 49 |
torch.set_grad_enabled(False)
|
|
|
|
| 192 |
col2 = misc_utils.render_pc(pc)
|
| 193 |
prog.progress(0.49, "Running Generation")
|
| 194 |
if torch.cuda.is_available():
|
| 195 |
+
with sys.clip_move_lock:
|
| 196 |
+
clip_model.cpu()
|
| 197 |
img = sd_pc2img.pc_to_image(
|
| 198 |
model_l14, pc, prompt, noise_scale, width, height, cfg_scale, steps,
|
| 199 |
lambda i, t, _: prog.progress(0.49 + i / (steps + 1) / 2, "Running Diffusion Step %d" % i)
|
| 200 |
)
|
| 201 |
if torch.cuda.is_available():
|
| 202 |
+
with sys.clip_move_lock:
|
| 203 |
+
clip_model.cuda()
|
| 204 |
with col2:
|
| 205 |
st.image(img)
|
| 206 |
prog.progress(1.0, "Idle")
|
| 207 |
+
if image_examples(samples_index.sd, 3, example_text="Examples (Choose one of the following 3D shapes)"):
|
| 208 |
queue_auto_submit("sdauto")
|
| 209 |
|
| 210 |
|
|
|
|
| 292 |
|
| 293 |
try:
|
| 294 |
if torch.cuda.is_available():
|
| 295 |
+
with sys.clip_move_lock:
|
| 296 |
+
clip_model.cuda()
|
| 297 |
with tab_cls:
|
| 298 |
demo_classification()
|
| 299 |
with tab_cap:
|