Spaces:
Paused
Paused
| # Base image | |
| FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 | |
| ENV DEBIAN_FRONTEND=noninteractive | |
| # Update and install necessary dependencies | |
| RUN apt update && \ | |
| apt install --no-install-recommends -y \ | |
| build-essential \ | |
| python3 \ | |
| python3-pip \ | |
| wget \ | |
| curl \ | |
| git \ | |
| cmake \ | |
| zlib1g-dev \ | |
| libblas-dev && \ | |
| apt clean && \ | |
| rm -rf /var/lib/apt/lists/* | |
| # Setting up CUDA environment variables (this may not be necessary since you're using the official nvidia/cuda image, but it's good to be explicit) | |
| ENV PATH="/usr/local/cuda/bin:$PATH" \ | |
| LD_LIBRARY_PATH="/usr/local/cuda/lib64:$LD_LIBRARY_PATH" \ | |
| CUDA_HOME="/usr/local/cuda" | |
| WORKDIR /app | |
| # Download ggml and mmproj models from HuggingFace | |
| RUN wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k.gguf && \ | |
| wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf | |
| # Clone and build llava-server with CUDA support | |
| RUN git clone https://github.com/ggerganov/llama.cpp.git && \ | |
| cd llama.cpp && \ | |
| git submodule init && \ | |
| git submodule update && \ | |
| make LLAMA_CUBLAS=1 | |
| # Create a non-root user for security reasons | |
| RUN useradd -m -u 1000 user && \ | |
| mkdir -p /home/user/app && \ | |
| cp /app/ggml-model-q4_k.gguf /home/user/app && \ | |
| cp /app/mmproj-model-f16.gguf /home/user/app | |
| RUN chown user:user /home/user/app/ggml-model-q4_k.gguf && \ | |
| chown user:user /home/user/app/mmproj-model-f16.gguf | |
| USER user | |
| ENV HOME=/home/user | |
| WORKDIR $HOME/app | |
| # Expose the port | |
| EXPOSE 8080 | |
| # Start the llava-server with models | |
| CMD ["/app/llama.cpp/server", "--model", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--threads", "4"] | |