Spaces:
Sleeping
Sleeping
Update model from qwen3:4b to qwen3:1.7b
Browse files- Dockerfile +1 -1
- __pycache__/main.cpython-311.pyc +0 -0
- main.py +1 -1
- static/script.js +1 -1
Dockerfile
CHANGED
|
@@ -3,7 +3,7 @@ FROM ollama/ollama
|
|
| 3 |
|
| 4 |
# --- BUILD ARGUMENT ---
|
| 5 |
# Allows you to specify the model to download when building the image.
|
| 6 |
-
ENV OLLAMA_MODEL=qwen3:
|
| 7 |
|
| 8 |
# --- ENVIRONMENT VARIABLES ---
|
| 9 |
ENV OLLAMA_HOST=0.0.0.0
|
|
|
|
| 3 |
|
| 4 |
# --- BUILD ARGUMENT ---
|
| 5 |
# Allows you to specify the model to download when building the image.
|
| 6 |
+
ENV OLLAMA_MODEL=qwen3:1.7b
|
| 7 |
|
| 8 |
# --- ENVIRONMENT VARIABLES ---
|
| 9 |
ENV OLLAMA_HOST=0.0.0.0
|
__pycache__/main.cpython-311.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
main.py
CHANGED
|
@@ -29,7 +29,7 @@ def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)
|
|
| 29 |
@app.post("/chat_api")
|
| 30 |
async def chat_endpoint(request: Request, api_key: str = Depends(verify_api_key)):
|
| 31 |
body = await request.json()
|
| 32 |
-
model = body.get("model", "qwen3:
|
| 33 |
prompt = body.get("prompt")
|
| 34 |
|
| 35 |
if not prompt:
|
|
|
|
| 29 |
@app.post("/chat_api")
|
| 30 |
async def chat_endpoint(request: Request, api_key: str = Depends(verify_api_key)):
|
| 31 |
body = await request.json()
|
| 32 |
+
model = body.get("model", "qwen3:1.7b")
|
| 33 |
prompt = body.get("prompt")
|
| 34 |
|
| 35 |
if not prompt:
|
static/script.js
CHANGED
|
@@ -48,7 +48,7 @@ function sendMessage() {
|
|
| 48 |
'Authorization': `Bearer ${apiKey}`
|
| 49 |
},
|
| 50 |
body: JSON.stringify({
|
| 51 |
-
model: 'qwen3:
|
| 52 |
prompt: message
|
| 53 |
})
|
| 54 |
})
|
|
|
|
| 48 |
'Authorization': `Bearer ${apiKey}`
|
| 49 |
},
|
| 50 |
body: JSON.stringify({
|
| 51 |
+
model: 'qwen3:1.7b', // You can change the model here
|
| 52 |
prompt: message
|
| 53 |
})
|
| 54 |
})
|