Tohru127 commited on
Commit
aa1f63c
·
verified ·
1 Parent(s): b0b497a

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +546 -0
  2. packages.txt +14 -0
  3. requirements.txt +16 -0
app.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime as dt
4
+ import io
5
+ import json
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ import textwrap
10
+ import uuid
11
+ import zipfile
12
+ from dataclasses import dataclass
13
+ from pathlib import Path
14
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
15
+
16
+ import gradio as gr
17
+ from PIL import Image
18
+
19
+
20
+ def _run_command(command: List[str], cwd: Optional[Path] = None, env: Optional[Dict[str, str]] = None) -> Tuple[int, str]:
21
+ """Execute a shell command and capture combined stdout/stderr."""
22
+ process = subprocess.run(
23
+ command,
24
+ cwd=str(cwd) if cwd else None,
25
+ env=env,
26
+ stdout=subprocess.PIPE,
27
+ stderr=subprocess.STDOUT,
28
+ text=True,
29
+ )
30
+ return process.returncode, process.stdout
31
+
32
+
33
+ @dataclass
34
+ class Backend:
35
+ name: str
36
+ description: str
37
+ runner: Callable[[Path, Path, Optional[Dict[str, Path]], int], Tuple[Path, List[str]]]
38
+
39
+
40
+ class ReconstructionRunner:
41
+ """Coordinate preprocessing, COLMAP, and neural backends."""
42
+
43
+ def __init__(self, output_root: Optional[Path] = None) -> None:
44
+ root = output_root or Path(os.environ.get("HF3D_OUTPUT_ROOT", "/tmp/hf_3d_runs"))
45
+ root.mkdir(parents=True, exist_ok=True)
46
+ self.output_root = root
47
+ self.backends: Dict[str, Backend] = {}
48
+ self._register_default_backends()
49
+
50
+ # ------------------------------------------------------------------
51
+ # Public API
52
+ # ------------------------------------------------------------------
53
+ def available_methods(self) -> List[str]:
54
+ return list(self.backends.keys())
55
+
56
+ def describe_backend(self, name: str) -> str:
57
+ backend = self.backends.get(name)
58
+ return backend.description if backend else ""
59
+
60
+ def run(
61
+ self,
62
+ uploads: Iterable[Any],
63
+ method: str,
64
+ max_resolution: int,
65
+ skip_colmap: bool,
66
+ ) -> Tuple[str, Optional[Path]]:
67
+ logs: List[str] = []
68
+ timestamp = dt.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
69
+ workspace = self.output_root / f"run_{timestamp}_{uuid.uuid4().hex[:8]}"
70
+ dataset_root = workspace / "dataset"
71
+ images_dir = dataset_root / "images"
72
+ images_dir.mkdir(parents=True, exist_ok=True)
73
+ logs.append(f"Workspace initialized at {workspace}")
74
+
75
+ try:
76
+ ingest_count = self._ingest_uploads(uploads, images_dir, max_resolution)
77
+ except Exception as exc: # noqa: BLE001 - top-level guard for user feedback
78
+ logs.append(f"[ERROR] Failed to ingest inputs: {exc}")
79
+ return "\n".join(logs), None
80
+
81
+ if ingest_count == 0:
82
+ logs.append("[ERROR] No images detected in upload. Provide JPG/PNG files or a ZIP archive.")
83
+ return "\n".join(logs), None
84
+
85
+ logs.append(f"Ingested {ingest_count} image(s). Max resolution capped at {max_resolution}px")
86
+
87
+ colmap_outputs: Optional[Dict[str, Path]] = None
88
+ if skip_colmap:
89
+ logs.append("Skipping COLMAP as requested. Downstream models must rely on precomputed poses.")
90
+ else:
91
+ try:
92
+ colmap_outputs, colmap_logs = self._run_colmap(images_dir, workspace / "colmap", max_resolution)
93
+ logs.extend(colmap_logs)
94
+ except FileNotFoundError as exc:
95
+ logs.append(
96
+ textwrap.dedent(
97
+ f"""
98
+ [ERROR] Required binary `{exc}` was not found. Ensure COLMAP is installed or set
99
+ `skip_colmap=True` if you plan to upload precomputed camera poses.
100
+ """
101
+ ).strip()
102
+ )
103
+ return "\n".join(logs), None
104
+ except RuntimeError as exc:
105
+ logs.append(str(exc))
106
+ return "\n".join(logs), None
107
+
108
+ backend = self.backends.get(method)
109
+ if not backend:
110
+ logs.append(f"[ERROR] Unknown backend '{method}'. Available options: {', '.join(self.available_methods())}")
111
+ return "\n".join(logs), None
112
+
113
+ try:
114
+ artifact_path, backend_logs = backend.runner(workspace, dataset_root, colmap_outputs, max_resolution)
115
+ logs.extend(backend_logs)
116
+ except Exception as exc: # noqa: BLE001 - propagate details to UI
117
+ logs.append(f"[ERROR] Backend '{method}' failed: {exc}")
118
+ return "\n".join(logs), None
119
+
120
+ logs.append(f"Artifacts packaged at {artifact_path}")
121
+ return "\n".join(logs), artifact_path
122
+
123
+ # ------------------------------------------------------------------
124
+ # Backend registration
125
+ # ------------------------------------------------------------------
126
+ def register_backend(self, backend: Backend) -> None:
127
+ self.backends[backend.name] = backend
128
+
129
+ def _register_default_backends(self) -> None:
130
+ self.register_backend(
131
+ Backend(
132
+ name="Nerfstudio (NeRF)",
133
+ description=(
134
+ "Optimizes a NeRF with the nerfacto recipe, exports a Poisson surface mesh, and packs all outputs "
135
+ "(config, checkpoints, mesh, transforms.json) into a ZIP archive."
136
+ ),
137
+ runner=self._run_nerfstudio,
138
+ )
139
+ )
140
+ self.register_backend(
141
+ Backend(
142
+ name="3D Gaussian Splatting",
143
+ description=(
144
+ "Uses the Inria Gaussian Splatting reference implementation initialized from COLMAP cameras. "
145
+ "Returns the optimized Gaussian point cloud and training logs."
146
+ ),
147
+ runner=self._run_gaussian_splatting,
148
+ )
149
+ )
150
+
151
+ # ------------------------------------------------------------------
152
+ # Input ingestion helpers
153
+ # ------------------------------------------------------------------
154
+ def _ingest_uploads(self, uploads: Iterable[Any], images_dir: Path, max_resolution: int) -> int:
155
+ metadata: List[Dict[str, object]] = []
156
+ count = 0
157
+ for item in uploads:
158
+ if not item:
159
+ continue
160
+ src_path = Path(getattr(item, "name", getattr(item, "path", "")))
161
+ if not src_path.exists():
162
+ # Gradio may store temp files in `.name`; fallback to `.path` when available
163
+ if hasattr(item, "path"):
164
+ src_path = Path(item.path)
165
+ if not src_path.exists():
166
+ continue
167
+
168
+ if zipfile.is_zipfile(src_path):
169
+ with zipfile.ZipFile(src_path, "r") as archive:
170
+ for member in archive.namelist():
171
+ lower = member.lower()
172
+ if lower.endswith((".jpg", ".jpeg", ".png")):
173
+ data = archive.read(member)
174
+ image = Image.open(io.BytesIO(data))
175
+ dest = images_dir / Path(member).name
176
+ self._save_image(image, dest, max_resolution)
177
+ metadata.append(self._image_metadata(dest, source=str(member)))
178
+ count += 1
179
+ else:
180
+ image = Image.open(src_path)
181
+ dest = images_dir / src_path.name
182
+ self._save_image(image, dest, max_resolution)
183
+ metadata.append(self._image_metadata(dest, source=str(src_path.name)))
184
+ count += 1
185
+
186
+ if metadata:
187
+ dataset_meta = {
188
+ "created_at": dt.datetime.utcnow().isoformat() + "Z",
189
+ "max_resolution": max_resolution,
190
+ "images": metadata,
191
+ }
192
+ meta_path = images_dir.parent / "metadata.json"
193
+ meta_path.write_text(json.dumps(dataset_meta, indent=2))
194
+ return count
195
+
196
+ @staticmethod
197
+ def _save_image(image: Image.Image, destination: Path, max_resolution: int) -> None:
198
+ image = image.convert("RGB")
199
+ width, height = image.size
200
+ scale = min(1.0, max_resolution / max(width, height))
201
+ if scale < 1.0:
202
+ new_size = (int(width * scale), int(height * scale))
203
+ image = image.resize(new_size, Image.LANCZOS)
204
+ destination.parent.mkdir(parents=True, exist_ok=True)
205
+ image.save(destination, quality=95)
206
+
207
+ @staticmethod
208
+ def _image_metadata(path: Path, source: str) -> Dict[str, object]:
209
+ with Image.open(path) as image:
210
+ width, height = image.size
211
+ return {
212
+ "filename": path.name,
213
+ "width": width,
214
+ "height": height,
215
+ "source": source,
216
+ }
217
+
218
+ # ------------------------------------------------------------------
219
+ # COLMAP integration
220
+ # ------------------------------------------------------------------
221
+ def _run_colmap(self, images_dir: Path, output_dir: Path, max_resolution: int) -> Tuple[Dict[str, Path], List[str]]:
222
+ if shutil.which("colmap") is None:
223
+ raise FileNotFoundError("colmap")
224
+
225
+ logs: List[str] = ["Running COLMAP reconstruction…"]
226
+ output_dir.mkdir(parents=True, exist_ok=True)
227
+ database_path = output_dir / "database.db"
228
+ sparse_dir = output_dir / "sparse"
229
+ dense_dir = output_dir / "dense"
230
+ sparse_dir.mkdir(exist_ok=True)
231
+
232
+ commands = [
233
+ (
234
+ "Feature extraction",
235
+ [
236
+ "colmap",
237
+ "feature_extractor",
238
+ "--database_path",
239
+ str(database_path),
240
+ "--image_path",
241
+ str(images_dir),
242
+ "--SiftExtraction.use_gpu",
243
+ "1",
244
+ "--SiftExtraction.max_image_size",
245
+ str(max_resolution),
246
+ ],
247
+ ),
248
+ (
249
+ "Exhaustive matcher",
250
+ [
251
+ "colmap",
252
+ "exhaustive_matcher",
253
+ "--database_path",
254
+ str(database_path),
255
+ "--SiftMatching.use_gpu",
256
+ "1",
257
+ ],
258
+ ),
259
+ (
260
+ "Mapper",
261
+ [
262
+ "colmap",
263
+ "mapper",
264
+ "--database_path",
265
+ str(database_path),
266
+ "--image_path",
267
+ str(images_dir),
268
+ "--output_path",
269
+ str(sparse_dir),
270
+ ],
271
+ ),
272
+ (
273
+ "Image undistorter",
274
+ [
275
+ "colmap",
276
+ "image_undistorter",
277
+ "--image_path",
278
+ str(images_dir),
279
+ "--input_path",
280
+ str(sparse_dir / "0"),
281
+ "--output_path",
282
+ str(dense_dir),
283
+ "--output_type",
284
+ "COLMAP",
285
+ ],
286
+ ),
287
+ ]
288
+
289
+ for stage, command in commands:
290
+ logs.append(f"\n$ {' '.join(command)}")
291
+ code, output = _run_command(command)
292
+ logs.append(output)
293
+ if code != 0:
294
+ raise RuntimeError(f"[ERROR] COLMAP stage '{stage}' failed with exit code {code}.")
295
+
296
+ outputs = {
297
+ "database": database_path,
298
+ "sparse": sparse_dir / "0",
299
+ "dense": dense_dir,
300
+ }
301
+ logs.append("COLMAP completed successfully.")
302
+ return outputs, logs
303
+
304
+ # ------------------------------------------------------------------
305
+ # Backend implementations
306
+ # ------------------------------------------------------------------
307
+ def _run_nerfstudio(
308
+ self,
309
+ workspace: Path,
310
+ dataset_root: Path,
311
+ colmap_outputs: Optional[Dict[str, Path]],
312
+ max_resolution: int,
313
+ ) -> Tuple[Path, List[str]]:
314
+ if shutil.which("ns-train") is None:
315
+ raise FileNotFoundError("ns-train")
316
+
317
+ logs: List[str] = ["Launching Nerfstudio pipeline…"]
318
+ processed_dir = workspace / "nerfstudio" / "processed"
319
+ runs_dir = workspace / "nerfstudio" / "runs"
320
+ export_dir = workspace / "nerfstudio" / "export"
321
+ processed_dir.mkdir(parents=True, exist_ok=True)
322
+ runs_dir.mkdir(parents=True, exist_ok=True)
323
+ export_dir.mkdir(parents=True, exist_ok=True)
324
+
325
+ data_source = dataset_root / "images"
326
+ process_cmd = [
327
+ "ns-process-data",
328
+ "images",
329
+ "--data",
330
+ str(data_source),
331
+ "--output-dir",
332
+ str(processed_dir),
333
+ "--max-num-downscales",
334
+ str(max(1, int(max_resolution / 512))),
335
+ ]
336
+ if colmap_outputs:
337
+ process_cmd.extend(["--skip-colmap"])
338
+ process_cmd.extend(["--colmap-model-path", str(colmap_outputs["sparse"])])
339
+
340
+ logs.append(f"\n$ {' '.join(process_cmd)}")
341
+ code, output = _run_command(process_cmd)
342
+ logs.append(output)
343
+ if code != 0:
344
+ raise RuntimeError("ns-process-data failed. See logs above.")
345
+
346
+ train_cmd = [
347
+ "ns-train",
348
+ "nerfacto",
349
+ "--data",
350
+ str(processed_dir),
351
+ "--max-num-iterations",
352
+ "3000",
353
+ "--output-dir",
354
+ str(runs_dir),
355
+ "--viewer.quit-on-train-completion",
356
+ "True",
357
+ "--pipeline.model.depth-importance",
358
+ "0.3",
359
+ ]
360
+ logs.append(f"\n$ {' '.join(train_cmd)}")
361
+ code, output = _run_command(train_cmd)
362
+ logs.append(output)
363
+ if code != 0:
364
+ raise RuntimeError("ns-train failed. Consider reducing iterations or verifying GPU availability.")
365
+
366
+ configs = sorted(runs_dir.rglob("config.yml"))
367
+ if not configs:
368
+ raise RuntimeError("Unable to locate Nerfstudio config.yml after training.")
369
+ config_path = configs[-1]
370
+
371
+ export_cmd = [
372
+ "ns-export",
373
+ "poisson",
374
+ "--load-config",
375
+ str(config_path),
376
+ "--output-path",
377
+ str(export_dir),
378
+ ]
379
+ logs.append(f"\n$ {' '.join(export_cmd)}")
380
+ code, output = _run_command(export_cmd)
381
+ logs.append(output)
382
+ if code != 0:
383
+ raise RuntimeError("ns-export failed. Check above logs for details.")
384
+
385
+ mesh_path = export_dir / "mesh.obj"
386
+ artifact_path = workspace / "nerfstudio_result.zip"
387
+ with zipfile.ZipFile(artifact_path, "w") as archive:
388
+ for path in [mesh_path, export_dir / "mesh.mtl", config_path, processed_dir / "transforms.json"]:
389
+ if path.exists():
390
+ archive.write(path, arcname=path.relative_to(workspace))
391
+ for ckpt in runs_dir.rglob("*.ckpt"):
392
+ archive.write(ckpt, arcname=ckpt.relative_to(workspace))
393
+ logs.append("Nerfstudio export complete.")
394
+ return artifact_path, logs
395
+
396
+ def _run_gaussian_splatting(
397
+ self,
398
+ workspace: Path,
399
+ dataset_root: Path,
400
+ colmap_outputs: Optional[Dict[str, Path]],
401
+ max_resolution: int,
402
+ ) -> Tuple[Path, List[str]]:
403
+ default_repo = Path(__file__).resolve().parent / "external" / "gaussian-splatting"
404
+ repo_root = Path(os.environ.get("GAUSSIAN_SPLATTING_ROOT", default_repo))
405
+ convert_script = repo_root / "convert.py"
406
+ train_script = repo_root / "train.py"
407
+ if not convert_script.exists() or not train_script.exists():
408
+ raise FileNotFoundError(
409
+ "Gaussian Splatting repository not found. Clone it to 'external/gaussian-splatting' "
410
+ "or set GAUSSIAN_SPLATTING_ROOT to point at the upstream project."
411
+ )
412
+ if not colmap_outputs:
413
+ raise RuntimeError("Gaussian Splatting requires COLMAP outputs. Disable 'Skip COLMAP'.")
414
+
415
+ logs: List[str] = ["Launching 3D Gaussian Splatting pipeline…"]
416
+ gaussian_root = workspace / "gaussian"
417
+ data_dir = gaussian_root / "data"
418
+ model_dir = gaussian_root / "model"
419
+ gaussian_root.mkdir(parents=True, exist_ok=True)
420
+
421
+ convert_cmd = [
422
+ "python3",
423
+ str(convert_script),
424
+ "-s",
425
+ str(colmap_outputs["dense"]),
426
+ "-o",
427
+ str(data_dir),
428
+ ]
429
+ logs.append(f"\n$ {' '.join(convert_cmd)}")
430
+ code, output = _run_command(convert_cmd, cwd=repo_root)
431
+ logs.append(output)
432
+ if code != 0:
433
+ raise RuntimeError("Gaussian Splatting conversion failed. Verify COLMAP dense output.")
434
+
435
+ train_cmd = [
436
+ "python3",
437
+ str(train_script),
438
+ "-s",
439
+ str(data_dir),
440
+ "-m",
441
+ str(model_dir),
442
+ "--iterations",
443
+ "7000",
444
+ "--resolution",
445
+ str(max(1, max_resolution // 512)),
446
+ ]
447
+ logs.append(f"\n$ {' '.join(train_cmd)}")
448
+ code, output = _run_command(train_cmd, cwd=repo_root)
449
+ logs.append(output)
450
+ if code != 0:
451
+ raise RuntimeError("Gaussian Splatting training failed. See logs for CUDA-related messages.")
452
+
453
+ ply_candidates = sorted(model_dir.rglob("*.ply"))
454
+ if not ply_candidates:
455
+ raise RuntimeError("No PLY point cloud found after Gaussian Splatting training.")
456
+ ply_path = ply_candidates[-1]
457
+
458
+ artifact_path = workspace / "gaussian_result.zip"
459
+ with zipfile.ZipFile(artifact_path, "w") as archive:
460
+ archive.write(ply_path, arcname=ply_path.relative_to(workspace))
461
+ for log_file in gaussian_root.rglob("*.log"):
462
+ archive.write(log_file, arcname=log_file.relative_to(workspace))
463
+ logs.append("Gaussian Splatting export complete.")
464
+ return artifact_path, logs
465
+
466
+
467
+ # ----------------------------------------------------------------------
468
+ # Gradio interface
469
+ # ----------------------------------------------------------------------
470
+
471
+ def build_interface() -> gr.Blocks:
472
+ output_override = os.environ.get("HF3D_OUTPUT_ROOT")
473
+ if output_override:
474
+ output_root = Path(output_override)
475
+ else:
476
+ output_root = Path(__file__).resolve().parent / "runs"
477
+ runner = ReconstructionRunner(output_root=output_root)
478
+
479
+ with gr.Blocks(title="Sparse Images to 3D Reconstruction") as demo:
480
+ gr.Markdown(
481
+ textwrap.dedent(
482
+ """
483
+ # Sparse Images ➜ 3D Reconstruction
484
+
485
+ Upload a folder or ZIP archive of sparse, non-overlapping photographs. The app will run COLMAP to estimate camera
486
+ poses, then optimize either a Nerfstudio NeRF or a 3D Gaussian Splatting model and return a downloadable artifact.
487
+ Expect several minutes of processing time for high-resolution captures.
488
+ """
489
+ )
490
+ )
491
+
492
+ with gr.Row():
493
+ uploads = gr.Files(label="Images or ZIP archive", file_types=["image", ".zip"], file_count="multiple")
494
+ method = gr.Dropdown(
495
+ choices=runner.available_methods(),
496
+ value="Nerfstudio (NeRF)",
497
+ label="Reconstruction backend",
498
+ )
499
+
500
+ with gr.Row():
501
+ max_resolution = gr.Slider(
502
+ minimum=512,
503
+ maximum=4096,
504
+ step=256,
505
+ value=2048,
506
+ label="Max processing resolution (pixels)",
507
+ )
508
+ skip_colmap = gr.Checkbox(
509
+ value=False,
510
+ label="Skip COLMAP (use existing poses)",
511
+ )
512
+
513
+ default_backend = runner.available_methods()[0] if runner.available_methods() else ""
514
+ backend_description = gr.Markdown(runner.describe_backend(default_backend))
515
+ method.change(
516
+ fn=lambda choice: runner.describe_backend(choice),
517
+ inputs=method,
518
+ outputs=backend_description,
519
+ )
520
+ run_button = gr.Button("Start reconstruction", variant="primary")
521
+
522
+ logs = gr.Textbox(label="Pipeline log", lines=20)
523
+ artifact = gr.File(label="Download results")
524
+
525
+ def _execute(files: List[Any], backend: str, resolution: int, skip: bool) -> Tuple[str, Optional[str]]:
526
+ log_text, artifact_path = runner.run(files, backend, resolution, skip)
527
+ if artifact_path is None:
528
+ return log_text, None
529
+ return log_text, str(artifact_path)
530
+
531
+ run_button.click(
532
+ fn=_execute,
533
+ inputs=[uploads, method, max_resolution, skip_colmap],
534
+ outputs=[logs, artifact],
535
+ )
536
+
537
+ return demo
538
+
539
+
540
+ def main() -> None:
541
+ demo = build_interface()
542
+ demo.queue(concurrency_count=1).launch(server_name=os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0"))
543
+
544
+
545
+ if __name__ == "__main__":
546
+ main()
packages.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ build-essential
2
+ cmake
3
+ git
4
+ wget
5
+ ninja-build
6
+ libboost-all-dev
7
+ libeigen3-dev
8
+ libfreeimage-dev
9
+ libmetis-dev
10
+ libgoogle-glog-dev
11
+ libgflags-dev
12
+ libglew-dev
13
+ qtbase5-dev
14
+ mesa-utils
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.31.0
2
+ numpy>=1.24
3
+ opencv-python-headless>=4.8
4
+ scipy>=1.11
5
+ torch>=2.1
6
+ torchvision>=0.16
7
+ tqdm>=4.66
8
+ pyyaml>=6.0
9
+ rich>=13.7
10
+ nerfstudio>=0.3.4
11
+ open3d>=0.17
12
+ plyfile>=1.0
13
+ trimesh>=4.0
14
+ pandas>=2.1
15
+ matplotlib>=3.8
16
+ Pillow>=10.0