feat: use lighter embedding model
Browse files- app.py +1 -1
- vector_store.py +1 -5
app.py
CHANGED
|
@@ -71,4 +71,4 @@ with st.sidebar:
|
|
| 71 |
|
| 72 |
st.write("filename:", uploaded_file.name)
|
| 73 |
process_pdf(temp_file, vector_store)
|
| 74 |
-
st.success("PDFs
|
|
|
|
| 71 |
|
| 72 |
st.write("filename:", uploaded_file.name)
|
| 73 |
process_pdf(temp_file, vector_store)
|
| 74 |
+
st.success("PDFs processed successfully. ✅")
|
vector_store.py
CHANGED
|
@@ -9,17 +9,13 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
| 9 |
|
| 10 |
@st.cache_resource()
|
| 11 |
def load_embedding_model(model):
|
| 12 |
-
"""
|
| 13 |
-
sentence-transformers/all-mpnet-base-v2
|
| 14 |
-
sentence-transformers/all-MiniLM-L6-v2
|
| 15 |
-
"""
|
| 16 |
model = HuggingFaceEmbeddings(model_name=model)
|
| 17 |
return model
|
| 18 |
|
| 19 |
|
| 20 |
@st.cache_resource()
|
| 21 |
def load_vector_store():
|
| 22 |
-
model = load_embedding_model("sentence-transformers/all-
|
| 23 |
|
| 24 |
vector_store = Chroma(
|
| 25 |
collection_name="main_store",
|
|
|
|
| 9 |
|
| 10 |
@st.cache_resource()
|
| 11 |
def load_embedding_model(model):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
model = HuggingFaceEmbeddings(model_name=model)
|
| 13 |
return model
|
| 14 |
|
| 15 |
|
| 16 |
@st.cache_resource()
|
| 17 |
def load_vector_store():
|
| 18 |
+
model = load_embedding_model("sentence-transformers/all-MiniLM-L12-v2")
|
| 19 |
|
| 20 |
vector_store = Chroma(
|
| 21 |
collection_name="main_store",
|