Spaces:
Sleeping
Sleeping
| # 1) Python 3.11 | |
| FROM python:3.11-slim | |
| # 2) System deps & git-lfs | |
| RUN pip install --no-cache-dir hf_transfer>=0.1.6 && \ | |
| apt-get update && apt-get install -y \ | |
| build-essential \ | |
| curl \ | |
| git \ | |
| git-lfs \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # 3) Install Ollama | |
| RUN curl -fsSL https://ollama.com/install.sh | sh | |
| # 4) Ollama storage β /tmp (writeable in Spaces) | |
| ENV OLLAMA_HOME=/tmp/ollama | |
| ENV OLLAMA_MODELS=/tmp/ollama | |
| ENV OLLAMA_MODEL=gemma2:9b | |
| RUN install -d -m 777 /tmp/ollama | |
| # 5) App setup | |
| WORKDIR /app | |
| COPY requirements.txt . | |
| RUN pip install --no-cache-dir -r requirements.txt | |
| COPY . /app | |
| RUN git lfs install && git lfs pull || true | |
| # 6) Streamlit & caches | |
| ENV HOME=/app | |
| ENV STREAMLIT_HOME=/app/.streamlit | |
| RUN install -d -m 777 /app/.streamlit | |
| ENV HF_HOME=/tmp/hf-home \ | |
| TRANSFORMERS_CACHE=/tmp/hf-cache \ | |
| HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \ | |
| TORCH_HOME=/tmp/torch-cache \ | |
| XDG_CACHE_HOME=/tmp/xdg-cache | |
| RUN install -d -m 777 /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache | |
| # 7) Internal Ollama host (same container) | |
| ENV OLLAMA_HOST=http://127.0.0.1:11434 | |
| ENV OLLAMA_TIMEOUT=300 | |
| EXPOSE 8501 | |
| # 8) Runtime: clean /tmp/ollama β serve β healthcheck β pull gemma2:9b β Streamlit | |
| CMD bash -lc '\ | |
| set -euo pipefail; \ | |
| rm -rf /tmp/ollama && install -d -m 777 /tmp/ollama; \ | |
| echo "Starting Ollama with model: ${OLLAMA_MODEL}"; \ | |
| env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & \ | |
| for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \ | |
| env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull "${OLLAMA_MODEL}"; \ | |
| exec streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}' |