Update app.py
Browse files
app.py
CHANGED
|
@@ -1,249 +1,232 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
| 3 |
from pathlib import Path
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
-
from PIL import Image
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
"""
|
| 69 |
-
|
| 70 |
-
kv_pairs: JSON or 'key=value key2=value2' → becomes '--key value --key2 value2'
|
| 71 |
"""
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
args.extend(shlex.split(base_args.strip()))
|
| 77 |
-
|
| 78 |
-
# Add key=value pairs
|
| 79 |
-
if kv_pairs and kv_pairs.strip():
|
| 80 |
-
# try JSON first
|
| 81 |
-
as_json = None
|
| 82 |
-
try:
|
| 83 |
-
as_json = json.loads(kv_pairs)
|
| 84 |
-
except Exception:
|
| 85 |
-
pass
|
| 86 |
-
if isinstance(as_json, dict):
|
| 87 |
-
for k, v in as_json.items():
|
| 88 |
-
if k.startswith("--"): args.append(k)
|
| 89 |
-
else: args.append(f"--{k}")
|
| 90 |
-
if v is not True and v is not None:
|
| 91 |
-
args.append(str(v))
|
| 92 |
-
else:
|
| 93 |
-
# fallback: split by spaces, accept k=v tokens
|
| 94 |
-
for token in shlex.split(kv_pairs.strip()):
|
| 95 |
-
if "=" in token:
|
| 96 |
-
k, v = token.split("=", 1)
|
| 97 |
-
if k.startswith("--"): args.append(k)
|
| 98 |
-
else: args.append(f"--{k}")
|
| 99 |
-
args.append(v)
|
| 100 |
-
else:
|
| 101 |
-
# allow plain flags like --use_poisson
|
| 102 |
-
args.append(token)
|
| 103 |
-
|
| 104 |
-
return args
|
| 105 |
-
|
| 106 |
-
# ---------- streaming runner ----------
|
| 107 |
-
def _run_streaming(
|
| 108 |
-
image,
|
| 109 |
-
script_path,
|
| 110 |
-
input_path,
|
| 111 |
-
workdir,
|
| 112 |
-
output_dir,
|
| 113 |
-
freeform_args, # raw CLI string
|
| 114 |
-
kv_args, # k=v pairs or JSON
|
| 115 |
-
extra_env_json # ENV as JSON (optional)
|
| 116 |
-
):
|
| 117 |
-
depth_path = None; pcd_path = None; mesh_path = None
|
| 118 |
-
viewer_path = None
|
| 119 |
-
log_buf = []
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
if image is None:
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
#
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
d, p, m = _scan_outputs(output_dir)
|
| 168 |
-
depth_path = depth_path or d
|
| 169 |
-
pcd_path = pcd_path or p
|
| 170 |
-
mesh_path = mesh_path or m
|
| 171 |
-
viewer_path = mesh_path or pcd_path
|
| 172 |
-
yield depth_path, viewer_path, pcd_path, mesh_path, "\n".join(log_buf[-800:])
|
| 173 |
-
last_yield = time.time()
|
| 174 |
-
|
| 175 |
-
proc.wait()
|
| 176 |
-
|
| 177 |
-
# Final scan
|
| 178 |
-
d, p, m = _scan_outputs(output_dir)
|
| 179 |
-
depth_path = depth_path or d
|
| 180 |
-
pcd_path = pcd_path or p
|
| 181 |
-
mesh_path = mesh_path or m
|
| 182 |
-
viewer_path = mesh_path or pcd_path
|
| 183 |
-
log_buf.append(f"[app] Script finished with return code {proc.returncode}")
|
| 184 |
-
|
| 185 |
-
yield depth_path, viewer_path, pcd_path, mesh_path, "\n".join(log_buf[-2000:])
|
| 186 |
-
|
| 187 |
-
# ---------- UI ----------
|
| 188 |
-
with gr.Blocks(title="Run main.py — Dynamic Inputs") as demo:
|
| 189 |
-
gr.Markdown(
|
| 190 |
-
"## Run your `main.py` with dynamic user inputs\n"
|
| 191 |
-
"- Upload an image (we’ll save it to the path your script expects)\n"
|
| 192 |
-
"- Enter **CLI arguments** and/or **key=value** pairs (auto-converted to `--key value`)\n"
|
| 193 |
-
"- We stream stdout/stderr live and show any depth/PCD/mesh files your script writes\n"
|
| 194 |
)
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
with gr.Row():
|
| 197 |
-
with gr.Column(
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
with gr.Accordion("Environment (optional)", open=False):
|
| 219 |
-
extra_env = gr.Textbox(
|
| 220 |
-
value="{}",
|
| 221 |
-
label="ENV as JSON",
|
| 222 |
-
placeholder='e.g., {"OMP_NUM_THREADS":"1"}'
|
| 223 |
-
)
|
| 224 |
-
|
| 225 |
-
run_btn = gr.Button("Run script", variant="primary")
|
| 226 |
-
|
| 227 |
-
with gr.Column(scale=2):
|
| 228 |
-
with gr.Tabs():
|
| 229 |
-
with gr.Tab("Depth"):
|
| 230 |
-
depth_img = gr.Image(type="filepath", label="Depth preview (detected)")
|
| 231 |
-
with gr.Tab("3D Reconstruction"):
|
| 232 |
-
model3d = gr.Model3D(label="Mesh / Point Cloud (OBJ/PLY/GLB/GLTF)")
|
| 233 |
-
with gr.Tab("Downloads"):
|
| 234 |
-
pcd_file = gr.File(label="Point cloud (PLY)")
|
| 235 |
-
mesh_file = gr.File(label="Mesh (OBJ/PLY/GLB/GLTF)")
|
| 236 |
-
with gr.Tab("Logs"):
|
| 237 |
-
logs = gr.Textbox(label="Live logs", lines=20)
|
| 238 |
-
|
| 239 |
-
run_btn.click(
|
| 240 |
-
_run_streaming,
|
| 241 |
-
inputs=[img, script_path, input_path, workdir, output_dir, freeform_args, kv_args, extra_env],
|
| 242 |
-
outputs=[depth_img, model3d, pcd_file, mesh_file, logs]
|
| 243 |
)
|
| 244 |
|
| 245 |
-
#
|
| 246 |
-
demo.queue(
|
| 247 |
-
|
| 248 |
-
if __name__ == "__main__":
|
| 249 |
-
demo.launch(show_error=True, server_keepalive_timeout=180)
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ.setdefault("OMP_NUM_THREADS", "1") # silence libgomp spam on HF
|
| 3 |
+
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
| 4 |
+
|
| 5 |
from pathlib import Path
|
| 6 |
+
import io
|
| 7 |
+
import numpy as np
|
| 8 |
+
from PIL import Image
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from transformers import GLPNForDepthEstimation, GLPNImageProcessor
|
| 12 |
|
| 13 |
+
import open3d as o3d
|
| 14 |
import gradio as gr
|
|
|
|
| 15 |
|
| 16 |
+
|
| 17 |
+
# ----------------------------
|
| 18 |
+
# Device & model (load once)
|
| 19 |
+
# ----------------------------
|
| 20 |
+
DEVICE = torch.device(
|
| 21 |
+
"cuda" if torch.cuda.is_available()
|
| 22 |
+
else ("mps" if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available() else "cpu")
|
| 23 |
+
)
|
| 24 |
+
PROCESSOR = GLPNImageProcessor.from_pretrained("vinvino02/glpn-nyu")
|
| 25 |
+
MODEL = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-nyu").to(DEVICE).eval()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# ----------------------------
|
| 29 |
+
# Helpers (faithful to main.py logic)
|
| 30 |
+
# ----------------------------
|
| 31 |
+
def _resize_like_main(pil_img: Image.Image, cap_h: int = 480):
|
| 32 |
+
"""Mirror your main.py: cap height at 480, then round down to multiple of 32, preserve aspect."""
|
| 33 |
+
new_h = min(pil_img.height, cap_h)
|
| 34 |
+
new_h -= (new_h % 32)
|
| 35 |
+
if new_h < 32:
|
| 36 |
+
new_h = 32
|
| 37 |
+
new_w = int(new_h * pil_img.width / pil_img.height)
|
| 38 |
+
return pil_img.resize((new_w, new_h), Image.BILINEAR), (pil_img.width, pil_img.height)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@torch.inference_mode()
|
| 42 |
+
def estimate_depth_glpn(pil_img: Image.Image) -> np.ndarray:
|
| 43 |
+
"""
|
| 44 |
+
GLPN forward that DOES NOT rely on .post_process_depth()
|
| 45 |
+
(fix for your AttributeError). We upsample back to the original size manually.
|
| 46 |
+
Returns depth in float32 (larger = farther).
|
| 47 |
+
"""
|
| 48 |
+
resized, (orig_w, orig_h) = _resize_like_main(pil_img)
|
| 49 |
+
inputs = PROCESSOR(images=resized, return_tensors="pt")
|
| 50 |
+
for k in inputs:
|
| 51 |
+
inputs[k] = inputs[k].to(DEVICE)
|
| 52 |
+
|
| 53 |
+
outputs = MODEL(**inputs)
|
| 54 |
+
pred = outputs.predicted_depth # [B, 1, h, w]
|
| 55 |
+
depth = pred[0, 0].float().detach().cpu().numpy() # resized size
|
| 56 |
+
|
| 57 |
+
# Resize depth back to original image size for downstream Open3D steps
|
| 58 |
+
depth_img = Image.fromarray(depth)
|
| 59 |
+
depth_full = depth_img.resize((orig_w, orig_h), Image.BILINEAR)
|
| 60 |
+
depth_full = np.array(depth_full).astype(np.float32)
|
| 61 |
+
|
| 62 |
+
return depth_full
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def depth_vis(depth: np.ndarray) -> Image.Image:
|
| 66 |
+
"""Normalize depth to 0..255 for a PNG preview (like your matplotlib preview)."""
|
| 67 |
+
d = depth.copy()
|
| 68 |
+
d = d - np.nanmin(d)
|
| 69 |
+
maxv = np.nanmax(d)
|
| 70 |
+
if maxv <= 0:
|
| 71 |
+
maxv = 1.0
|
| 72 |
+
d = (255.0 * d / maxv).astype(np.uint8)
|
| 73 |
+
return Image.fromarray(d)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def rgbd_from_rgb_depth(rgb: Image.Image, depth_f32: np.ndarray) -> o3d.geometry.RGBDImage:
|
| 77 |
+
"""
|
| 78 |
+
Create Open3D RGBD using an 8-bit depth *preview* for visualization consistency
|
| 79 |
+
(same as your main.py normalization step).
|
| 80 |
+
"""
|
| 81 |
+
rgb_np = np.array(rgb)
|
| 82 |
+
# match your main.py: depth to 0..255 uint8 before feeding create_from_color_and_depth
|
| 83 |
+
d8 = (depth_f32 * 255.0 / (depth_f32.max() + 1e-8)).astype(np.uint8)
|
| 84 |
+
depth_o3d = o3d.geometry.Image(d8)
|
| 85 |
+
color_o3d = o3d.geometry.Image(rgb_np)
|
| 86 |
+
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
|
| 87 |
+
color_o3d, depth_o3d, convert_rgb_to_intensity=False
|
| 88 |
+
)
|
| 89 |
+
return rgbd
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def pointcloud_from_rgbd(rgbd: o3d.geometry.RGBDImage, w: int, h: int) -> o3d.geometry.PointCloud:
|
| 93 |
"""
|
| 94 |
+
Reproduce your simple pinhole intrinsics (fx=fy=500, cx=w/2, cy=h/2) and back-project.
|
|
|
|
| 95 |
"""
|
| 96 |
+
K = o3d.camera.PinholeCameraIntrinsic()
|
| 97 |
+
K.set_intrinsics(w, h, 500.0, 500.0, w / 2.0, h / 2.0)
|
| 98 |
+
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, K)
|
| 99 |
+
return pcd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
+
|
| 102 |
+
def filter_pointcloud(pcd: o3d.geometry.PointCloud):
|
| 103 |
+
"""
|
| 104 |
+
Statistical outlier removal ~ your 'noise removal' step. Tuned conservatively.
|
| 105 |
+
"""
|
| 106 |
+
if len(pcd.points) == 0:
|
| 107 |
+
return pcd
|
| 108 |
+
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
|
| 109 |
+
pcd_f = pcd.select_by_index(ind)
|
| 110 |
+
pcd_f.estimate_normals(
|
| 111 |
+
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.05, max_nn=30)
|
| 112 |
+
)
|
| 113 |
+
return pcd_f
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def poisson_mesh(pcd: o3d.geometry.PointCloud, rotate_up=True) -> o3d.geometry.TriangleMesh:
|
| 117 |
+
if len(pcd.points) == 0:
|
| 118 |
+
return o3d.geometry.TriangleMesh()
|
| 119 |
+
mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
|
| 120 |
+
pcd, depth=10, n_threads=1
|
| 121 |
+
)
|
| 122 |
+
# Flip like your main.py (rotate X by pi)
|
| 123 |
+
if rotate_up:
|
| 124 |
+
R = mesh.get_rotation_matrix_from_xyz((np.pi, 0.0, 0.0))
|
| 125 |
+
mesh.rotate(R, center=(0, 0, 0))
|
| 126 |
+
mesh.compute_vertex_normals()
|
| 127 |
+
return mesh
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def o3d_to_ply_bytes(geom: o3d.geometry.Geometry) -> bytes:
|
| 131 |
+
"""Serialize an Open3D geometry to .ply bytes (so Gradio can offer a download)."""
|
| 132 |
+
tmp = Path("tmp_out.ply")
|
| 133 |
+
if isinstance(geom, o3d.geometry.PointCloud):
|
| 134 |
+
o3d.io.write_point_cloud(str(tmp), geom)
|
| 135 |
+
else:
|
| 136 |
+
o3d.io.write_triangle_mesh(str(tmp), geom)
|
| 137 |
+
data = tmp.read_bytes()
|
| 138 |
+
tmp.unlink(missing_ok=True)
|
| 139 |
+
return data
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def render_point_count(pcd: o3d.geometry.PointCloud) -> str:
|
| 143 |
+
return f"Points: {len(pcd.points):,}"
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def render_face_count(mesh: o3d.geometry.TriangleMesh) -> str:
|
| 147 |
+
return f"Vertices: {len(mesh.vertices):,} | Triangles: {len(mesh.triangles):,}"
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# ----------------------------
|
| 151 |
+
# Gradio pipeline
|
| 152 |
+
# ----------------------------
|
| 153 |
+
def pipeline(image: Image.Image):
|
| 154 |
+
logs = []
|
| 155 |
if image is None:
|
| 156 |
+
raise gr.Error("Please upload an image of a room.")
|
| 157 |
+
|
| 158 |
+
logs.append("Step 1 — Loaded image.")
|
| 159 |
+
image = image.convert("RGB")
|
| 160 |
+
w, h = image.size
|
| 161 |
+
|
| 162 |
+
# Depth
|
| 163 |
+
logs.append("Step 2 — Estimating depth with GLPN (vinvino02/glpn-nyu)…")
|
| 164 |
+
depth = estimate_depth_glpn(image)
|
| 165 |
+
depth_preview = depth_vis(depth)
|
| 166 |
+
|
| 167 |
+
# RGBD
|
| 168 |
+
logs.append("Step 3 — Creating RGBD image…")
|
| 169 |
+
rgbd = rgbd_from_rgb_depth(image, depth)
|
| 170 |
+
|
| 171 |
+
# Point cloud
|
| 172 |
+
logs.append("Step 4 — Back-projecting to point cloud…")
|
| 173 |
+
pcd = pointcloud_from_rgbd(rgbd, w, h)
|
| 174 |
+
|
| 175 |
+
logs.append("Step 5 — Filtering noise & estimating normals…")
|
| 176 |
+
pcd_f = filter_pointcloud(pcd)
|
| 177 |
+
|
| 178 |
+
# Mesh
|
| 179 |
+
logs.append("Step 6 — Poisson surface reconstruction…")
|
| 180 |
+
mesh = poisson_mesh(pcd_f, rotate_up=True)
|
| 181 |
+
|
| 182 |
+
# Prepare downloads
|
| 183 |
+
logs.append("Step 7 — Preparing downloads…")
|
| 184 |
+
pcd_bytes = o3d_to_ply_bytes(pcd_f)
|
| 185 |
+
mesh_bytes = o3d_to_ply_bytes(mesh)
|
| 186 |
+
|
| 187 |
+
# Small text stats
|
| 188 |
+
pcd_stats = render_point_count(pcd_f)
|
| 189 |
+
mesh_stats = render_face_count(mesh)
|
| 190 |
+
|
| 191 |
+
logs.append("Done.")
|
| 192 |
+
|
| 193 |
+
return (
|
| 194 |
+
image, # RGB preview
|
| 195 |
+
depth_preview, # Depth preview
|
| 196 |
+
pcd_stats, # point cloud stats
|
| 197 |
+
mesh_stats, # mesh stats
|
| 198 |
+
("point_cloud.ply", pcd_bytes),
|
| 199 |
+
("mesh.ply", mesh_bytes),
|
| 200 |
+
"\n".join(logs),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
)
|
| 202 |
|
| 203 |
+
|
| 204 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 205 |
+
gr.Markdown("# 2D → 3D (GLPN → RGBD → Point Cloud → Poisson Mesh)\nUpload a single image to reproduce your main.py workflow.")
|
| 206 |
+
|
| 207 |
with gr.Row():
|
| 208 |
+
with gr.Column():
|
| 209 |
+
inp = gr.Image(type="pil", label="Input Image")
|
| 210 |
+
run = gr.Button("Reconstruct 3D", variant="primary")
|
| 211 |
+
log_box = gr.Textbox(label="Log", lines=14, interactive=False)
|
| 212 |
+
|
| 213 |
+
with gr.Column():
|
| 214 |
+
rgb_out = gr.Image(label="RGB Preview", interactive=False)
|
| 215 |
+
depth_out = gr.Image(label="Depth Preview (8-bit normalized)", interactive=False)
|
| 216 |
+
|
| 217 |
+
pc_txt = gr.Markdown()
|
| 218 |
+
mesh_txt = gr.Markdown()
|
| 219 |
+
|
| 220 |
+
pc_file = gr.File(label="Download Point Cloud (.ply)")
|
| 221 |
+
mesh_file = gr.File(label="Download Mesh (.ply)")
|
| 222 |
+
|
| 223 |
+
run.click(
|
| 224 |
+
fn=pipeline,
|
| 225 |
+
inputs=[inp],
|
| 226 |
+
outputs=[rgb_out, depth_out, pc_txt, mesh_txt, pc_file, mesh_file, log_box],
|
| 227 |
+
api_name="reconstruct",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
)
|
| 229 |
|
| 230 |
+
# IMPORTANT: older Spaces error came from using unsupported args like concurrency_count.
|
| 231 |
+
demo.queue() # default queue works across Gradio 4.x
|
| 232 |
+
demo.launch()
|
|
|
|
|
|