Spaces:
Sleeping
Sleeping
| from langchain.llms import OpenAI | |
| from langchain.chains.qa_with_sources import load_qa_with_sources_chain | |
| from langchain.docstore.document import Document | |
| import requests | |
| import pathlib | |
| import subprocess | |
| import tempfile | |
| import os | |
| import gradio as gr | |
| import pickle | |
| # using a vector space for our search | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.vectorstores.faiss import FAISS | |
| from langchain.text_splitter import CharacterTextSplitter | |
| #loading FAISS search index from disk | |
| with open("search_index.pickle", "rb") as f: | |
| search_index = pickle.load(f) | |
| #Get GPT3 response using Langchain | |
| def print_answer(question, openai): #openai_embeddings | |
| #search_index = get_search_index() | |
| chain = load_qa_with_sources_chain(openai) #(OpenAI(temperature=0)) | |
| response = ( | |
| chain( | |
| { | |
| "input_documents": search_index.similarity_search(question, k=4), | |
| "question": question, | |
| }, | |
| return_only_outputs=True, | |
| )["output_text"] | |
| ) | |
| if len(response.split('\n')[-1].split())>2: | |
| response = response.split('\n')[0] + ', '.join([' <a href="' + response.split('\n')[-1].split()[i] + '" target="_blank"><u>Click Link' + str(i) + '</u></a>' for i in range(1,len(response.split('\n')[-1].split()))]) | |
| else: | |
| response = response.split('\n')[0] + ' <a href="' + response.split('\n')[-1].split()[-1] + '" target="_blank"><u>Click Link</u></a>' | |
| return response | |
| def chat(message, history, openai_api_key): | |
| #openai_embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) | |
| openai = OpenAI(temperature=0, openai_api_key=openai_api_key ) | |
| #os.environ["OPENAI_API_KEY"] = openai_api_key | |
| history = history or [] | |
| message = message.lower() | |
| response = print_answer(message, openai) #openai_embeddings | |
| history.append((message, response)) | |
| return history, history | |
| with gr.Blocks() as demo: | |
| gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;"> | |
| <div | |
| style=" | |
| display: inline-flex; | |
| align-items: center; | |
| gap: 0.8rem; | |
| font-size: 1.75rem; | |
| " | |
| > | |
| <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;"> | |
| heyoo QandA - LangChain Bot | |
| </h1> | |
| </div> | |
| <p style="margin-bottom: 10px; font-size: 94%"> | |
| Hi, I'm a Q and A heyoo expert bot, start by typing in your OpenAI API key, questions/issues you are facing in your heyoo implementations and then press enter.<br> | |
| <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate Space with GPU Upgrade for fast Inference & no queue<br> | |
| Built using <a href="https://langchain.readthedocs.io/en/latest/" target="_blank">LangChain</a> and <a href="https://github.com/gradio-app/gradio" target="_blank">Gradio</a> for the heyoo Repo | |
| </p> | |
| </div>""") | |
| with gr.Row(): | |
| question = gr.Textbox(label = 'Type in your questions about heyoo here and press Enter!', placeholder = 'What questions do you want to ask about the heyoo library?') | |
| openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here") | |
| state = gr.State() | |
| chatbot = gr.Chatbot() | |
| question.submit(chat, [question, state, openai_api_key], [chatbot, state]) | |
| if __name__ == "__main__": | |
| demo.launch() |