Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,7 +25,7 @@ from io import StringIO
|
|
| 25 |
last = 0
|
| 26 |
CHROMA_DATA_PATH = "chroma_data/"
|
| 27 |
EMBED_MODEL = "sentence-transformers/all-MiniLM-L6-v2" #"BAAI/bge-m3"
|
| 28 |
-
LLM_NAME = "mistralai/Mistral-Nemo-Instruct"
|
| 29 |
# all-MiniLM-L6-v2
|
| 30 |
CHUNK_SIZE = 800
|
| 31 |
CHUNK_OVERLAP = 50
|
|
@@ -77,7 +77,7 @@ Settings.llm = HuggingFaceInferenceAPI(model_name=LLM_NAME,
|
|
| 77 |
"presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty,
|
| 78 |
"top_k": top_k, "do_sample": False},
|
| 79 |
# tokenizer_kwargs={"max_length": 4096},
|
| 80 |
-
tokenizer_name=
|
| 81 |
)
|
| 82 |
# "BAAI/bge-m3"
|
| 83 |
Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
|
|
| 25 |
last = 0
|
| 26 |
CHROMA_DATA_PATH = "chroma_data/"
|
| 27 |
EMBED_MODEL = "sentence-transformers/all-MiniLM-L6-v2" #"BAAI/bge-m3"
|
| 28 |
+
LLM_NAME = "mistralai/Mistral-Nemo-Instruct-2407"
|
| 29 |
# all-MiniLM-L6-v2
|
| 30 |
CHUNK_SIZE = 800
|
| 31 |
CHUNK_OVERLAP = 50
|
|
|
|
| 77 |
"presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty,
|
| 78 |
"top_k": top_k, "do_sample": False},
|
| 79 |
# tokenizer_kwargs={"max_length": 4096},
|
| 80 |
+
tokenizer_name=LLM_NAME,
|
| 81 |
)
|
| 82 |
# "BAAI/bge-m3"
|
| 83 |
Settings.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|