Spaces:
Running
on
L40S
Running
on
L40S
Upload app_hg.py with huggingface_hub
Browse files
app_hg.py
CHANGED
|
@@ -39,17 +39,6 @@ import sys
|
|
| 39 |
import subprocess
|
| 40 |
from glob import glob
|
| 41 |
|
| 42 |
-
# @spaces.GPU
|
| 43 |
-
# def check_env():
|
| 44 |
-
# print(glob("/usr/local/cuda/*"))
|
| 45 |
-
# print(torch.cuda.is_available())
|
| 46 |
-
# print(os.environ.get('CUDA_HOME', None))
|
| 47 |
-
# os.environ['CUDA_HOME'] = '/usr/local/cuda'
|
| 48 |
-
# # Optionally, update PATH and LD_LIBRARY_PATH if needed
|
| 49 |
-
# os.environ['PATH'] = os.environ['CUDA_HOME'] + '/bin:' + os.environ['PATH']
|
| 50 |
-
# os.environ['LD_LIBRARY_PATH'] = os.environ['CUDA_HOME'] + '/lib64:' + os.environ.get('LD_LIBRARY_PATH', '')
|
| 51 |
-
# check_env()
|
| 52 |
-
|
| 53 |
def install_cuda_toolkit():
|
| 54 |
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
| 55 |
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
|
@@ -66,18 +55,9 @@ def install_cuda_toolkit():
|
|
| 66 |
)
|
| 67 |
# Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
| 68 |
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
| 69 |
-
|
| 70 |
-
subprocess.check_call([sys.executable, "-m", "pip", "install", "-U", "git+https://github.com/facebookresearch/pytorch3d@stable"])
|
| 71 |
-
import pytorch3d
|
| 72 |
-
except Exception as err:
|
| 73 |
-
print(err)
|
| 74 |
-
print("new inner cuda install pytorch3d fail")
|
| 75 |
install_cuda_toolkit()
|
| 76 |
|
| 77 |
-
# def install_requirements():
|
| 78 |
-
# # Install the packages listed in requirements.txt
|
| 79 |
-
# subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
|
| 80 |
-
#install_requirements()
|
| 81 |
|
| 82 |
from infer import seed_everything, save_gif
|
| 83 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
|
@@ -92,7 +72,7 @@ parser.add_argument("--use_lite", default=False, action="store_true")
|
|
| 92 |
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
| 93 |
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
| 94 |
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
| 95 |
-
parser.add_argument("--save_memory", default=
|
| 96 |
parser.add_argument("--device", default="cuda:0", type=str)
|
| 97 |
args = parser.parse_args()
|
| 98 |
|
|
@@ -365,7 +345,7 @@ with gr.Blocks() as demo:
|
|
| 365 |
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 366 |
textgen_STEP = gr.Number(value=50, label="Gen steps", precision=0,
|
| 367 |
minimum=40, maximum=100, interactive=True)
|
| 368 |
-
textgen_max_faces = gr.Number(value=
|
| 369 |
minimum=5000, maximum=1000000, interactive=True)
|
| 370 |
with gr.Row():
|
| 371 |
textgen_submit = gr.Button("Generate", variant="primary")
|
|
@@ -398,7 +378,7 @@ with gr.Blocks() as demo:
|
|
| 398 |
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 399 |
imggen_STEP = gr.Number(value=50, label="Gen steps", precision=0,
|
| 400 |
minimum=40, maximum=100, interactive=True)
|
| 401 |
-
imggen_max_faces = gr.Number(value=
|
| 402 |
minimum=5000, maximum=1000000, interactive=True)
|
| 403 |
with gr.Row():
|
| 404 |
imggen_submit = gr.Button("Generate", variant="primary")
|
|
|
|
| 39 |
import subprocess
|
| 40 |
from glob import glob
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
def install_cuda_toolkit():
|
| 43 |
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
| 44 |
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
|
|
|
| 55 |
)
|
| 56 |
# Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
| 57 |
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
| 58 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
install_cuda_toolkit()
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
from infer import seed_everything, save_gif
|
| 63 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
|
|
|
| 72 |
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
| 73 |
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
| 74 |
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
| 75 |
+
parser.add_argument("--save_memory", default=True)
|
| 76 |
parser.add_argument("--device", default="cuda:0", type=str)
|
| 77 |
args = parser.parse_args()
|
| 78 |
|
|
|
|
| 345 |
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 346 |
textgen_STEP = gr.Number(value=50, label="Gen steps", precision=0,
|
| 347 |
minimum=40, maximum=100, interactive=True)
|
| 348 |
+
textgen_max_faces = gr.Number(value=60000, label="Face number", precision=0,
|
| 349 |
minimum=5000, maximum=1000000, interactive=True)
|
| 350 |
with gr.Row():
|
| 351 |
textgen_submit = gr.Button("Generate", variant="primary")
|
|
|
|
| 378 |
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 379 |
imggen_STEP = gr.Number(value=50, label="Gen steps", precision=0,
|
| 380 |
minimum=40, maximum=100, interactive=True)
|
| 381 |
+
imggen_max_faces = gr.Number(value=60000, label="Face number", precision=0,
|
| 382 |
minimum=5000, maximum=1000000, interactive=True)
|
| 383 |
with gr.Row():
|
| 384 |
imggen_submit = gr.Button("Generate", variant="primary")
|