Spaces:
Build error
Build error
| import gradio as gr | |
| import os | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_community.vectorstores import Chroma | |
| from langchain.chains import ConversationalRetrievalChain | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.llms import HuggingFacePipeline | |
| from langchain.chains import ConversationChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain_community.llms import HuggingFaceEndpoint | |
| import torch | |
| api_token = os.getenv("HF_TOKEN") | |
| list_llm = ["microsoft/Phi-3-mini-4k-instruct", "mistralai/Mistral-7B-Instruct-v0.3"] | |
| list_llm_simple = [os.path.basename(llm) for llm in list_llm] | |
| # Load and split PDF document | |
| def load_doc(list_file_path, chunk_size, chunk_overlap): | |
| loaders = [PyPDFLoader(x) for x in list_file_path] | |
| pages = [] | |
| for loader in loaders: | |
| pages.extend(loader.load()) | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=chunk_size, | |
| chunk_overlap=chunk_overlap | |
| ) | |
| doc_splits = text_splitter.split_documents(pages) | |
| return doc_splits | |
| # Create vector database | |
| def create_db(splits): | |
| embeddings = HuggingFaceEmbeddings() | |
| vectordb = FAISS.from_documents(splits, embeddings) | |
| return vectordb | |
| # Initialize langchain LLM chain | |
| def initialize_llmchain(llm_model, vector_db, progress=gr.Progress()): | |
| llm = HuggingFaceEndpoint( | |
| huggingfacehub_api_token=api_token, | |
| repo_id=llm_model, | |
| temperature=0.1, | |
| max_new_tokens=2000, | |
| top_k=3, | |
| ) | |
| memory = ConversationBufferMemory( | |
| memory_key="chat_history", | |
| output_key='answer', | |
| return_messages=True | |
| ) | |
| retriever = vector_db.as_retriever() | |
| qa_chain = ConversationalRetrievalChain.from_llm( | |
| llm, | |
| retriever=retriever, | |
| chain_type="stuff", | |
| memory=memory, | |
| return_source_documents=True, | |
| verbose=False, | |
| ) | |
| return qa_chain | |
| # Initialize database | |
| def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()): | |
| list_file_path = [x.name for x in list_file_obj if x is not None] | |
| doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap) | |
| vector_db = create_db(doc_splits) | |
| if vector_db is None: | |
| print("Vector database creation failed") | |
| else: | |
| print("Embedding database created successfully") | |
| return vector_db, "Embedding database created!" | |
| # Initialize LLM | |
| def initialize_LLM(llm_option, vector_db, progress=gr.Progress()): | |
| if vector_db is None: | |
| print("Vector database is None") | |
| return None, "Failed to initialize RAG System: Vector database is None" | |
| llm_name = list_llm[llm_option] | |
| qa_chain = initialize_llmchain(llm_name, vector_db, progress) | |
| return qa_chain, "RAG System initialized!" | |
| def format_chat_history(message, chat_history): | |
| formatted_chat_history = [] | |
| for user_message, bot_message in chat_history: | |
| formatted_chat_history.append(f"User: {user_message}") | |
| formatted_chat_history.append(f"Assistant: {bot_message}") | |
| return formatted_chat_history | |
| def conversation(qa_chain, message, history): | |
| formatted_chat_history = format_chat_history(message, history) | |
| response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history}) | |
| response_answer = response["answer"] | |
| if response_answer.find("Helpful Answer:") != -1: | |
| response_answer = response_answer.split("Helpful Answer:")[-1] | |
| response_sources = response["source_documents"] | |
| response_source1 = response_sources[0].page_content.strip() | |
| response_source2 = response_sources[1].page_content.strip() | |
| response_source3 = response_sources[2].page_content.strip() | |
| response_source1_page = response_sources[0].metadata["page"] + 1 | |
| response_source2_page = response_sources[1].metadata["page"] + 1 | |
| response_source3_page = response_sources[2].metadata["page"] + 1 | |
| new_history = history + [(message, response_answer)] | |
| return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page | |
| def upload_file(file_obj): | |
| list_file_path = [] | |
| for idx, file in enumerate(file_obj): | |
| file_path = file_obj.name | |
| list_file_path.append(file_path) | |
| return list_file_path | |
| def demo(): | |
| with gr.Blocks(theme=gr.themes.Default(primary_hue="green")) as demo: | |
| vector_db = gr.State() | |
| qa_chain = gr.State() | |
| gr.HTML("<center><h1>RAG System</h1><center>") | |
| gr.Markdown("""This App is designed to perform retrieval augmented generation (RAG) on PDF documents. \ | |
| <b>Please do not upload confidential documents.</b> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=86): | |
| gr.Markdown("<b>Step 1 - Upload PDF documents and Initialize the RAG system</b>") | |
| with gr.Row(): | |
| document = gr.Files(height=300, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload PDF documents") | |
| with gr.Row(): | |
| slider_chunk_size = gr.Slider(minimum=10, maximum=1000, value=200, step=5, label="Chunk Size") | |
| slider_chunk_overlap = gr.Slider(minimum=0, maximum=512, value=20, step=5, label="Chunk Overlap") | |
| with gr.Row(): | |
| db_btn = gr.Button("Create Embeddings") | |
| with gr.Row(): | |
| db_progress = gr.Textbox(value="Not initialized", show_label=False) | |
| gr.Markdown("<style>body { font-size: 16px; }</style><b>Select Large Language Model (LLM)</b>") | |
| with gr.Row(): | |
| llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", value=list_llm_simple[0], type="index") | |
| with gr.Row(): | |
| qachain_btn = gr.Button("Initialize RAG system") | |
| with gr.Row(): | |
| llm_progress = gr.Textbox(value="Not initialized", show_label=False) | |
| with gr.Column(scale=200): | |
| gr.Markdown("<b>Step 2 - Chat with your Document</b>") | |
| chatbot = gr.Chatbot(height=505) | |
| with gr.Accordion("Similar context from the source document", open=False): | |
| with gr.Row(): | |
| doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20) | |
| source1_page = gr.Number(label="Page", scale=1) | |
| with gr.Row(): | |
| doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20) | |
| source2_page = gr.Number(label="Page", scale=1) | |
| with gr.Row(): | |
| doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20) | |
| source3_page = gr.Number(label="Page", scale=1) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask a question", container=True) | |
| with gr.Row(): | |
| submit_btn = gr.Button("Submit") | |
| clear_btn = gr.ClearButton([msg, chatbot], value="Clear") | |
| # Preprocessing events | |
| db_btn.click(initialize_database, | |
| inputs=[document, slider_chunk_size, slider_chunk_overlap], | |
| outputs=[vector_db, db_progress]) | |
| qachain_btn.click(initialize_LLM, | |
| inputs=[llm_btn, vector_db], | |
| outputs=[qa_chain, llm_progress]).then(lambda:[None, "", 0, "", 0, "", 0], | |
| inputs=None, | |
| outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
| queue=False) | |
| # Chatbot events | |
| msg.submit(conversation, | |
| inputs=[qa_chain, msg, chatbot], | |
| outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
| queue=False) | |
| submit_btn.click(conversation, | |
| inputs=[qa_chain, msg, chatbot], | |
| outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
| queue=False) | |
| clear_btn.click(lambda: [None, "", 0, "", 0, "", 0], | |
| inputs=None, | |
| outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
| queue=False) | |
| demo.queue().launch(debug=True) | |
| if __name__ == "__main__": | |
| demo() | |