Spaces:
Sleeping
Sleeping
Update .env
Browse files
.env
CHANGED
|
@@ -1,20 +1,4 @@
|
|
| 1 |
# --- Core LLM backend (switch here only) ---
|
| 2 |
-
BACKEND_LLM=llamacpp
|
| 3 |
-
LLAMACPP_MODEL_PATH=models/qwen2.5-1.5b-instruct-q4_k_m.gguf #tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf
|
| 4 |
-
HF_MODEL_REPO=Qwen/Qwen2.5-1.5B-Instruct-GGUF
|
| 5 |
-
|
| 6 |
-
# llama.cpp runtime knobs
|
| 7 |
-
N_CTX=4096
|
| 8 |
-
N_THREADS=4
|
| 9 |
-
N_GPU_LAYERS=0
|
| 10 |
-
|
| 11 |
-
# ASR device: 'cpu' is safest across Mac/HF; use 'mps' only if you’ve verified support
|
| 12 |
-
ASR_DEVICE=cpu
|
| 13 |
-
|
| 14 |
-
# TTS preference (choose ONE)
|
| 15 |
-
TTS_ENGINE=piper
|
| 16 |
-
PIPER_MODEL=models/piper/en_US-amy-medium.onnx
|
| 17 |
-
|
| 18 |
|
| 19 |
# Files written by TTS go here (and get cleaned by our code)
|
| 20 |
VOICE_AUDIO_DIR=runtime/audio
|
|
@@ -23,15 +7,34 @@ VOICE_AUDIO_DIR=runtime/audio
|
|
| 23 |
# OPENAI_API_KEY=
|
| 24 |
# GROQ_API_KEY=
|
| 25 |
|
| 26 |
-
# Environment flags
|
| 27 |
-
IS_HF_SPACE=false
|
| 28 |
-
DEBUG=true
|
| 29 |
-
CAFE_UNRELATED_LIMIT=3
|
| 30 |
-
|
| 31 |
API_BACKEND=sim # sim | mock | http
|
| 32 |
|
| 33 |
# Switch between old rule router and LLM-driven flow (no code changes needed)
|
| 34 |
ROUTER_MODE=llm
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# --- Core LLM backend (switch here only) ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
# Files written by TTS go here (and get cleaned by our code)
|
| 4 |
VOICE_AUDIO_DIR=runtime/audio
|
|
|
|
| 7 |
# OPENAI_API_KEY=
|
| 8 |
# GROQ_API_KEY=
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
API_BACKEND=sim # sim | mock | http
|
| 11 |
|
| 12 |
# Switch between old rule router and LLM-driven flow (no code changes needed)
|
| 13 |
ROUTER_MODE=llm
|
| 14 |
|
| 15 |
+
SAY_VOICE=Samantha
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Which backend to use
|
| 19 |
+
BACKEND_LLM=llamacpp
|
| 20 |
+
|
| 21 |
+
# Path where the GGUF model will be saved after download
|
| 22 |
+
LLAMACPP_MODEL_PATH=models/qwen2.5-1.5b-instruct-q4_k_m.gguf
|
| 23 |
+
|
| 24 |
+
# HF repo to download the model from
|
| 25 |
+
HF_MODEL_REPO=Qwen/Qwen2.5-1.5B-Instruct-GGUF
|
| 26 |
+
HF_MODEL_FILE=qwen2.5-1.5b-instruct-q4_k_m.gguf
|
| 27 |
+
|
| 28 |
+
# llama.cpp runtime knobs
|
| 29 |
+
N_CTX=4096
|
| 30 |
+
N_THREADS=4
|
| 31 |
+
N_GPU_LAYERS=0
|
| 32 |
+
|
| 33 |
+
# Audio + misc
|
| 34 |
+
ASR_DEVICE=cpu
|
| 35 |
+
TTS_ENGINE=piper
|
| 36 |
+
PIPER_MODEL=models/piper/en_US-amy-medium.onnx
|
| 37 |
+
PIPER_BIN=piper
|
| 38 |
+
|
| 39 |
+
IS_HF_SPACE=true
|
| 40 |
+
DEBUG=false
|