Spaces:
Paused
Paused
| # Using the specified base image that's suited for llama-cpp-python | |
| # Define the image argument and provide a default value | |
| ARG IMAGE=python:3-slim-bullseye | |
| # Use the image as specified | |
| FROM ${IMAGE} | |
| # Re-declare the ARG after FROM | |
| ARG IMAGE | |
| # Update and upgrade the existing packages | |
| RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \ | |
| python3 \ | |
| python3-pip \ | |
| ninja-build \ | |
| build-essential | |
| RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context | |
| # Perform the conditional installations based on the image | |
| RUN echo "Image: ${IMAGE}" && \ | |
| if [ "${IMAGE}" = "python:3-slim-bullseye" ] ; then \ | |
| echo "OpenBLAS install:" && \ | |
| apt-get install -y --no-install-recommends libopenblas-dev && \ | |
| LLAMA_OPENBLAS=1 pip install llama-cpp-python --verbose; \ | |
| else \ | |
| echo "CuBLAS install:" && \ | |
| LLAMA_CUBLAS=1 pip install llama-cpp-python --verbose; \ | |
| fi | |
| VOLUME ["/models"] | |
| # Environment variables for model details | |
| ENV MODEL_NAME="llava-1.6-mistral-7b-gguf" | |
| ENV DEFAULT_MODEL_FILE="llava-v1.6-mistral-7b.Q3_K_XS.gguf" | |
| ENV MODEL_USER="cjpais" | |
| ENV DEFAULT_MODEL_BRANCH="main" | |
| ENV DEFAULT_CLIP_MODEL_FILE="mmproj-model-f16.gguf" | |
| ENV MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_MODEL_FILE}" | |
| ENV CLIP_MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_CLIP_MODEL_FILE}" | |
| # Set up the working directory | |
| WORKDIR /app | |
| # Ensure curl is available for downloading the models | |
| RUN apt-get update && apt-get install -y curl && \ | |
| apt-get clean && rm -rf /var/lib/apt/lists/* | |
| # Create a directory for the models | |
| RUN mkdir -p /models | |
| # Download the models | |
| RUN curl -L "${MODEL_URL}" -o /models/${DEFAULT_MODEL_FILE} && \ | |
| curl -L "${CLIP_MODEL_URL}" -o /models/${DEFAULT_CLIP_MODEL_FILE} | |
| ENV HOST=0.0.0.0 | |
| ENV PORT=8000 | |
| # Expose the port the server will run on | |
| EXPOSE 8000 | |
| # Command to run the server, using environment variables for model paths | |
| CMD ["python3", "-m", "llama_cpp.server", "--model", "/models/llava-v1.6-mistral-7b.Q3_K_XS.gguf", "--clip_model_path", "/models/mmproj-model-f16.gguf", "--chat_format", "llava-1-5"] | |