| # Start Ollama server in background | |
| echo "π Starting Ollama server..." | |
| ollama serve & | |
| # Wait for Ollama to be ready | |
| echo "β³ Waiting for Ollama to start..." | |
| while ! nc -z localhost 11434; do | |
| sleep 1 | |
| done | |
| # Optional: Pull default model | |
| echo "π₯ Pulling default model..." | |
| ollama pull all-minilm | |
| # Start FastAPI reverse proxy | |
| echo "π Starting FastAPI reverse proxy on port 7860..." | |
| exec python -m uvicorn ollama:app --host 0.0.0.0 --port 7860 --proxy-headers |