File size: 3,791 Bytes
eeb0f9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
import time
import traceback
from langchain_community.document_loaders import PyPDFLoader, CSVLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from config.settings import CHROMA_PATH, EMBEDDING_MODEL


# ============================================================
# Load and split documents
# ============================================================
def load_documents(path: str):
    """
    Load and split a single uploaded document (PDF, CSV, or MD/TXT).
    Args:
        path (str): Full path to the uploaded file.
    Returns:
        List[Document]: List of document chunks ready for embedding.
    """
    print(f"\nReading uploaded file: {path}")

    if not os.path.exists(path):
        print(f"[ERROR] File not found: {path}")
        return []

    ext = os.path.splitext(path)[1].lower()
    all_docs = []

    try:
        # Select appropriate loader based on file type
        if ext == ".pdf":
            loader = PyPDFLoader(path)
        elif ext == ".csv":
            loader = CSVLoader(path, encoding="utf-8")
        elif ext in [".md"]:
            loader = TextLoader(path, encoding="utf-8")
        else:
            print(
                f"[WARNING] Unsupported file type: {ext}. Only PDF, CSV, or MD allowed."
            )
            return []

        # Load the document
        docs = loader.load()
        all_docs.extend(docs)
        print(f"Loaded {len(docs)} documents from {os.path.basename(path)}")

        # Split the text into smaller chunks for embeddings
        splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
        split_docs = splitter.split_documents(all_docs)
        print(f"Split into {len(split_docs)} text chunks.")
        return split_docs

    except Exception as e:
        print(f"[ERROR] Failed to load or split document: {path}")
        print(f"Reason: {e}")
        traceback.print_exc()
        return []


# ============================================================
# Select embedding model (OpenAI → fallback to HuggingFace)
# ============================================================
def get_embedding_model():
    """
    Try using OpenAIEmbeddings if a valid API key is available.
    If it fails (401, missing key, etc.), fall back to HuggingFaceEmbeddings.
    """

    # Custom endpoint only supports GPT-4o-mini, not embeddings
    # So we skip OpenAI embeddings and use HuggingFace directly
    print("[INFO] Using HuggingFace embeddings (custom endpoint doesn't support embeddings)")

    print(f"Using HuggingFaceEmbeddings ({EMBEDDING_MODEL})...")
    return HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)


# ============================================================
# Main ingestion process
# ============================================================
def ingest_data(path: str):
    """
    Generate embeddings for an uploaded file and store them in a local ChromaDB.
    """
    start_time = time.time()
    print("\nStarting ingestion for uploaded file...")

    documents = load_documents(path)
    if not documents:
        print("No valid document to process. Skipping embedding step.")
        return

    embeddings = get_embedding_model()

    try:
        vectordb = Chroma.from_documents(
            documents, embeddings, persist_directory=CHROMA_PATH
        )
        elapsed = time.time() - start_time
        count = vectordb._collection.count()
        print(f"\nIngestion complete in {elapsed:.2f} seconds.")
        print(f"Data stored in {CHROMA_PATH} ({count} vectors).")
    except Exception as e:
        print(f"[ERROR] Failed to store vectors in ChromaDB: {e}")
        traceback.print_exc()