Spaces:
Runtime error
Runtime error
Update src/main.py
Browse files- src/main.py +2 -2
src/main.py
CHANGED
|
@@ -49,7 +49,7 @@ def split_data(data):
|
|
| 49 |
def ingest_chunks(chunks):
|
| 50 |
embedding = OllamaEmbeddings(
|
| 51 |
base_url='https://thewise-ollama-server.hf.space',
|
| 52 |
-
model="nomic-embed-text
|
| 53 |
)
|
| 54 |
vector_store = DocArrayInMemorySearch.from_documents(chunks, embedding)
|
| 55 |
|
|
@@ -64,7 +64,7 @@ def retreival(vector_store, k):
|
|
| 64 |
#Creating LLM
|
| 65 |
llm = ChatOllama(
|
| 66 |
base_url='https://thewise-ollama-server.hf.space',
|
| 67 |
-
model="codellama
|
| 68 |
|
| 69 |
# Define the system message template
|
| 70 |
#Adding CHAT HISTORY to the System template explicitly because mainly Chat history goes to Condense the Human Question with Backround (Not template), but System template goes straight the LLM Chain
|
|
|
|
| 49 |
def ingest_chunks(chunks):
|
| 50 |
embedding = OllamaEmbeddings(
|
| 51 |
base_url='https://thewise-ollama-server.hf.space',
|
| 52 |
+
model="nomic-embed-text",
|
| 53 |
)
|
| 54 |
vector_store = DocArrayInMemorySearch.from_documents(chunks, embedding)
|
| 55 |
|
|
|
|
| 64 |
#Creating LLM
|
| 65 |
llm = ChatOllama(
|
| 66 |
base_url='https://thewise-ollama-server.hf.space',
|
| 67 |
+
model="codellama")
|
| 68 |
|
| 69 |
# Define the system message template
|
| 70 |
#Adding CHAT HISTORY to the System template explicitly because mainly Chat history goes to Condense the Human Question with Backround (Not template), but System template goes straight the LLM Chain
|