MogensR commited on
Commit
f0113d4
Β·
1 Parent(s): 4df47d9

Update startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +24 -6
startup.sh CHANGED
@@ -4,36 +4,54 @@
4
 
5
  set -Eeuo pipefail
6
 
7
- # ── CUDA allocator & MatAnyone scaling (fix fragmentation / VRAM pressure) ──
8
  export PYTORCH_CUDA_ALLOC_CONF="${PYTORCH_CUDA_ALLOC_CONF:-expandable_segments:True,max_split_size_mb:128}"
 
 
 
9
  export MATANYONE_MAX_EDGE="${MATANYONE_MAX_EDGE:-640}"
10
  export MATANYONE_TARGET_PIXELS="${MATANYONE_TARGET_PIXELS:-400000}"
11
 
 
 
 
 
 
 
 
12
  # ── Threads / general ──
13
  export OMP_NUM_THREADS="${OMP_NUM_THREADS:-4}"
14
  export MKL_NUM_THREADS="${MKL_NUM_THREADS:-4}"
15
  export PYTHONUNBUFFERED=1
16
 
 
 
 
 
 
 
 
17
  # ── Gradio ──
18
  export GRADIO_SERVER_NAME="${GRADIO_SERVER_NAME:-0.0.0.0}"
19
  export GRADIO_SERVER_PORT="${GRADIO_SERVER_PORT:-7860}"
20
 
21
- # ── Hugging Face caches (keeps downloads local to repo) ──
22
- export HF_HOME="${HF_HOME:-$PWD/checkpoints/hf}"
23
- export TRANSFORMERS_CACHE="${TRANSFORMERS_CACHE:-$HF_HOME}"
24
-
25
  # ── Banner ──
26
  echo "===== BackgroundFX Pro Starting ====="
27
  echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
28
  echo "MKL_NUM_THREADS=$MKL_NUM_THREADS"
29
  echo "PYTORCH_CUDA_ALLOC_CONF=$PYTORCH_CUDA_ALLOC_CONF"
 
30
  echo "MATANYONE_MAX_EDGE=$MATANYONE_MAX_EDGE"
31
  echo "MATANYONE_TARGET_PIXELS=$MATANYONE_TARGET_PIXELS"
 
 
 
32
  echo "HF_HOME=$HF_HOME"
 
33
  echo "GRADIO_SERVER_NAME=$GRADIO_SERVER_NAME"
34
  echo "GRADIO_SERVER_PORT=$GRADIO_SERVER_PORT"
35
  echo "====================================="
36
  command -v nvidia-smi >/dev/null 2>&1 && nvidia-smi || true
37
 
38
  # ── Launch ──
39
- exec python app.py
 
4
 
5
  set -Eeuo pipefail
6
 
7
+ # ── CUDA allocator & MatAnyone scaling (reduce fragmentation / VRAM pressure) ──
8
  export PYTORCH_CUDA_ALLOC_CONF="${PYTORCH_CUDA_ALLOC_CONF:-expandable_segments:True,max_split_size_mb:128}"
9
+ export CUDA_MODULE_LOADING="${CUDA_MODULE_LOADING:-LAZY}"
10
+
11
+ # MatAnyone loader/adapter hints (these are read in MatAnyoneLoader + CoreVideoProcessor)
12
  export MATANYONE_MAX_EDGE="${MATANYONE_MAX_EDGE:-640}"
13
  export MATANYONE_TARGET_PIXELS="${MATANYONE_TARGET_PIXELS:-400000}"
14
 
15
+ # Windowed two-phase pipeline (SAM2 window β†’ release β†’ MatAnyone window)
16
+ export MATANYONE_WINDOWED="${MATANYONE_WINDOWED:-1}"
17
+ export MATANYONE_WINDOW="${MATANYONE_WINDOW:-8}"
18
+
19
+ # Model-only downscale cap (applied in CoreVideoProcessor)
20
+ export MAX_MODEL_SIZE="${MAX_MODEL_SIZE:-1280}"
21
+
22
  # ── Threads / general ──
23
  export OMP_NUM_THREADS="${OMP_NUM_THREADS:-4}"
24
  export MKL_NUM_THREADS="${MKL_NUM_THREADS:-4}"
25
  export PYTHONUNBUFFERED=1
26
 
27
+ # ── Hugging Face caches (keeps downloads local to repo; avoids symlinks in some envs) ──
28
+ export HF_HOME="${HF_HOME:-$PWD/checkpoints/hf}"
29
+ export TRANSFORMERS_CACHE="${TRANSFORMERS_CACHE:-$HF_HOME}"
30
+ export HF_DATASETS_CACHE="${HF_DATASETS_CACHE:-$HF_HOME}"
31
+ export HF_HUB_DISABLE_SYMLINKS="${HF_HUB_DISABLE_SYMLINKS:-1}"
32
+ export HF_HUB_ENABLE_HF_TRANSFER="${HF_HUB_ENABLE_HF_TRANSFER:-0}"
33
+
34
  # ── Gradio ──
35
  export GRADIO_SERVER_NAME="${GRADIO_SERVER_NAME:-0.0.0.0}"
36
  export GRADIO_SERVER_PORT="${GRADIO_SERVER_PORT:-7860}"
37
 
 
 
 
 
38
  # ── Banner ──
39
  echo "===== BackgroundFX Pro Starting ====="
40
  echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
41
  echo "MKL_NUM_THREADS=$MKL_NUM_THREADS"
42
  echo "PYTORCH_CUDA_ALLOC_CONF=$PYTORCH_CUDA_ALLOC_CONF"
43
+ echo "CUDA_MODULE_LOADING=$CUDA_MODULE_LOADING"
44
  echo "MATANYONE_MAX_EDGE=$MATANYONE_MAX_EDGE"
45
  echo "MATANYONE_TARGET_PIXELS=$MATANYONE_TARGET_PIXELS"
46
+ echo "MATANYONE_WINDOWED=$MATANYONE_WINDOWED"
47
+ echo "MATANYONE_WINDOW=$MATANYONE_WINDOW"
48
+ echo "MAX_MODEL_SIZE=$MAX_MODEL_SIZE"
49
  echo "HF_HOME=$HF_HOME"
50
+ echo "HF_HUB_DISABLE_SYMLINKS=$HF_HUB_DISABLE_SYMLINKS"
51
  echo "GRADIO_SERVER_NAME=$GRADIO_SERVER_NAME"
52
  echo "GRADIO_SERVER_PORT=$GRADIO_SERVER_PORT"
53
  echo "====================================="
54
  command -v nvidia-smi >/dev/null 2>&1 && nvidia-smi || true
55
 
56
  # ── Launch ──
57
+ exec python -X faulthandler app.py