File size: 2,914 Bytes
b269c5d
 
 
 
 
 
 
b9cb4a6
 
 
b269c5d
 
 
b9cb4a6
b269c5d
b9cb4a6
b269c5d
 
 
b9cb4a6
b269c5d
 
 
 
 
 
 
b9cb4a6
 
 
b269c5d
 
b9cb4a6
b269c5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
from typing import Optional

class Config:
    """Application configuration for working with local GGUF models"""
    
    # Model settings - using Hugging Face downloaded model
    MODEL_REPO = "unsloth/gemma-3-270m-it-GGUF"
    MODEL_FILENAME = "gemma-3-270m-it-Q8_0.gguf"
    MODEL_PATH = f"/app/models/{MODEL_FILENAME}"
    HUGGINGFACE_TOKEN: str = os.getenv("HUGGINGFACE_TOKEN", "")
    
    # Model loading settings - optimized for Docker container
    N_CTX: int = int(os.getenv("N_CTX", "1024"))  # Reduced context window for Docker
    N_GPU_LAYERS: int = int(os.getenv("N_GPU_LAYERS", "0"))  # CPU-only for Docker by default
    N_THREADS: int = int(os.getenv("N_THREADS", "2"))  # Conservative thread count
    N_BATCH: int = int(os.getenv("N_BATCH", "512"))  # Smaller batch size for Docker
    USE_MLOCK: bool = os.getenv("USE_MLOCK", "false").lower() == "true"  # Disabled for Docker
    USE_MMAP: bool = os.getenv("USE_MMAP", "true").lower() == "true"  # Keep memory mapping
    F16_KV: bool = os.getenv("F16_KV", "false").lower() == "true"  # Use 16-bit keys and values
    SEED: int = int(os.getenv("SEED", "42"))  # Random seed for reproducibility
    
    # Server settings - Docker compatible
    HOST: str = os.getenv("HOST", "0.0.0.0")
    GRADIO_PORT: int = int(os.getenv("GRADIO_PORT", "7860"))  # Standard HuggingFace Spaces port
    API_PORT: int = int(os.getenv("API_PORT", "8000"))
    
    # Logging settings
    LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")  # INFO, WARNING, ERROR, DEBUG
    
    # Generation settings - optimized for Docker
    MAX_NEW_TOKENS: int = int(os.getenv("MAX_NEW_TOKENS", "256"))  # Reduced for faster response
    TEMPERATURE: float = 1.0
    
    # File upload settings
    MAX_FILE_SIZE: int = int(os.getenv("MAX_FILE_SIZE", "10485760"))  # 10MB
    ALLOWED_IMAGE_EXTENSIONS: set = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp"}
    
    @classmethod
    def is_model_available(cls) -> bool:
        """Check if local model file exists"""
        return os.path.exists(cls.MODEL_PATH)
    
    @classmethod
    def get_model_path(cls) -> str:
        """Get absolute path to model file"""
        return os.path.abspath(cls.MODEL_PATH)
    
    @classmethod
    def get_models_dir(cls) -> str:
        """Get models directory path"""
        return os.path.dirname(cls.MODEL_PATH)
    
    @classmethod
    def load_from_env_file(cls, env_file: str = ".env") -> None:
        """Load configuration from .env file"""
        if os.path.exists(env_file):
            with open(env_file, 'r') as f:
                for line in f:
                    line = line.strip()
                    if line and not line.startswith('#') and '=' in line:
                        key, value = line.split('=', 1)
                        os.environ[key.strip()] = value.strip()

# Automatically load from .env file on import
Config.load_from_env_file()