#!/bin/bash set -ex echo "--- Starting setup and server ---" # Use the exact snapshot path resolved at build time so Python can import the model code. # The HF cache's model root doesn't contain the files; they're under snapshots/. HF_MODEL_PATH_FILE="/home/user/app/model_path.txt" if [ -f "$HF_MODEL_PATH_FILE" ]; then export HF_MODEL_PATH="$(cat "$HF_MODEL_PATH_FILE")" fi # Ensure Python sees the model's modules (e.g., modeling_dots_ocr_vllm.py). export PYTHONPATH="${HF_MODEL_PATH}:${PYTHONPATH}" if [ -n "$HF_MODEL_PATH" ] && [ ! -e "DotsOCR" ]; then ln -s "$HF_MODEL_PATH" DotsOCR fi python3 - <<'PY' import sys, os print("HF_MODEL_PATH:", os.environ.get("HF_MODEL_PATH")) print("PYTHONPATH:", sys.path) from DotsOCR import modeling_dots_ocr_vllm print("DotsOCR import OK") PY echo "Starting server..." exec vllm serve ${HF_MODEL_PATH} \ --host 0.0.0.0 \ --port ${PORT} \ --tensor-parallel-size 1 \ --gpu-memory-utilization 0.8 \ --chat-template-content-format string \ --served-model-name dotsocr-model \ --trust-remote-code