Spaces:
Paused
Paused
Try llava-v1.6-mistral-7b.Q5_K_M.gguf
Browse files- Dockerfile +5 -5
Dockerfile
CHANGED
|
@@ -26,8 +26,8 @@ ENV PATH="/usr/local/cuda/bin:$PATH" \
|
|
| 26 |
WORKDIR /app
|
| 27 |
|
| 28 |
# Download ggml and mmproj models from HuggingFace
|
| 29 |
-
RUN wget https://huggingface.co/
|
| 30 |
-
wget https://huggingface.co/
|
| 31 |
|
| 32 |
# Clone and build llava-server with CUDA support
|
| 33 |
RUN git clone https://github.com/ggerganov/llama.cpp.git && \
|
|
@@ -39,10 +39,10 @@ RUN git clone https://github.com/ggerganov/llama.cpp.git && \
|
|
| 39 |
# Create a non-root user for security reasons
|
| 40 |
RUN useradd -m -u 1000 user && \
|
| 41 |
mkdir -p /home/user/app && \
|
| 42 |
-
cp /app/
|
| 43 |
cp /app/mmproj-model-f16.gguf /home/user/app
|
| 44 |
|
| 45 |
-
RUN chown user:user /home/user/app/
|
| 46 |
chown user:user /home/user/app/mmproj-model-f16.gguf
|
| 47 |
|
| 48 |
USER user
|
|
@@ -54,4 +54,4 @@ WORKDIR $HOME/app
|
|
| 54 |
EXPOSE 8080
|
| 55 |
|
| 56 |
# Start the llava-server with models
|
| 57 |
-
CMD ["/app/llama.cpp/server", "--model", "
|
|
|
|
| 26 |
WORKDIR /app
|
| 27 |
|
| 28 |
# Download ggml and mmproj models from HuggingFace
|
| 29 |
+
RUN wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q5_K_M.gguf && \
|
| 30 |
+
wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf
|
| 31 |
|
| 32 |
# Clone and build llava-server with CUDA support
|
| 33 |
RUN git clone https://github.com/ggerganov/llama.cpp.git && \
|
|
|
|
| 39 |
# Create a non-root user for security reasons
|
| 40 |
RUN useradd -m -u 1000 user && \
|
| 41 |
mkdir -p /home/user/app && \
|
| 42 |
+
cp /app/llava-v1.6-mistral-7b.Q5_K_M.gguf /home/user/app && \
|
| 43 |
cp /app/mmproj-model-f16.gguf /home/user/app
|
| 44 |
|
| 45 |
+
RUN chown user:user /home/user/app/llava-v1.6-mistral-7b.Q5_K_M.gguf && \
|
| 46 |
chown user:user /home/user/app/mmproj-model-f16.gguf
|
| 47 |
|
| 48 |
USER user
|
|
|
|
| 54 |
EXPOSE 8080
|
| 55 |
|
| 56 |
# Start the llava-server with models
|
| 57 |
+
CMD ["/app/llama.cpp/server", "--model", "llava-v1.6-mistral-7b.Q5_K_M.gguf", "--mmproj", "mmproj-model-f16.gguf", "--threads", "6", "--host", "0.0.0.0"]
|