code-slicer commited on
Commit
63b906d
Β·
verified Β·
1 Parent(s): 73515ef

Delete Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +0 -55
Dockerfile DELETED
@@ -1,55 +0,0 @@
1
- # 1) Python 3.11
2
- FROM python:3.11-slim
3
-
4
- # 2) System deps & git-lfs
5
- RUN pip install --no-cache-dir hf_transfer>=0.1.6 && \
6
- apt-get update && apt-get install -y \
7
- build-essential \
8
- curl \
9
- git \
10
- git-lfs \
11
- && rm -rf /var/lib/apt/lists/*
12
-
13
- # 3) Install Ollama
14
- RUN curl -fsSL https://ollama.com/install.sh | sh
15
-
16
- # 4) Ollama storage β†’ /tmp (writeable in Spaces)
17
- ENV OLLAMA_HOME=/tmp/ollama
18
- ENV OLLAMA_MODELS=/tmp/ollama
19
- ENV OLLAMA_MODEL=gemma2:9b
20
- RUN install -d -m 777 /tmp/ollama
21
-
22
- # 5) App setup
23
- WORKDIR /app
24
- COPY requirements.txt .
25
- RUN pip install --no-cache-dir -r requirements.txt
26
- COPY . /app
27
- RUN git lfs install && git lfs pull || true
28
-
29
- # 6) Streamlit & caches
30
- ENV HOME=/app
31
- ENV STREAMLIT_HOME=/app/.streamlit
32
- RUN install -d -m 777 /app/.streamlit
33
-
34
- ENV HF_HOME=/tmp/hf-home \
35
- TRANSFORMERS_CACHE=/tmp/hf-cache \
36
- HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
37
- TORCH_HOME=/tmp/torch-cache \
38
- XDG_CACHE_HOME=/tmp/xdg-cache
39
- RUN install -d -m 777 /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
40
-
41
- # 7) Internal Ollama host (same container)
42
- ENV OLLAMA_HOST=http://127.0.0.1:11434
43
- ENV OLLAMA_TIMEOUT=300
44
-
45
- EXPOSE 8501
46
-
47
- # 8) Runtime: clean /tmp/ollama β†’ serve β†’ healthcheck β†’ pull gemma2:9b β†’ Streamlit
48
- CMD bash -lc '\
49
- set -euo pipefail; \
50
- rm -rf /tmp/ollama && install -d -m 777 /tmp/ollama; \
51
- echo "Starting Ollama with model: ${OLLAMA_MODEL}"; \
52
- env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & \
53
- for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
54
- env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull "${OLLAMA_MODEL}"; \
55
- exec streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'