ThongCoding commited on
Commit
e3d6cc3
·
1 Parent(s): 8d85a2c
Files changed (2) hide show
  1. model.py +8 -6
  2. requirements.txt +1 -1
model.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import requests
3
  from llama_cpp import Llama
4
 
@@ -9,12 +10,13 @@ MODEL_PATH = f"./models/{MODEL_FILENAME}"
9
  def download_model():
10
  os.makedirs("./models", exist_ok=True)
11
  print("📦 Downloading GGUF model directly from URL...")
12
- with requests.get(MODEL_URL, stream=True) as r:
13
- r.raise_for_status()
14
- with open(MODEL_PATH, "wb") as f:
15
- for chunk in r.iter_content(chunk_size=8192):
16
- f.write(chunk)
17
- print(f"✅ Model downloaded to {MODEL_PATH}")
 
18
 
19
  # Only download if not already present
20
  if not os.path.exists(MODEL_PATH):
 
1
  import os
2
+ import shutil
3
  import requests
4
  from llama_cpp import Llama
5
 
 
10
  def download_model():
11
  os.makedirs("./models", exist_ok=True)
12
  print("📦 Downloading GGUF model directly from URL...")
13
+ response = requests.get(MODEL_URL, stream=True)
14
+ if response.status_code == 200:
15
+ with open(MODEL_PATH, 'wb') as f:
16
+ shutil.copyfileobj(response.raw, f)
17
+ print(f"✅ Model downloaded to {MODEL_PATH}")
18
+ else:
19
+ raise RuntimeError(f"❌ Failed to download model. Status code: {response.status_code}")
20
 
21
  # Only download if not already present
22
  if not os.path.exists(MODEL_PATH):
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  fastapi
2
  uvicorn
3
- llama-cpp-python==0.2.72 # prebuilt versions now available from PyPI
4
  python-dotenv
5
  huggingface_hub
 
1
  fastapi
2
  uvicorn
3
+ llama-cpp-python==0.2.74 # prebuilt versions now available from PyPI
4
  python-dotenv
5
  huggingface_hub