Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,10 @@ import os
|
|
| 2 |
import json
|
| 3 |
import re
|
| 4 |
import hashlib
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
from functools import partial
|
| 7 |
import threading
|
|
@@ -104,14 +108,13 @@ knowledge_base = KnowledgeBase()
|
|
| 104 |
# LLMs
|
| 105 |
# repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
|
| 106 |
repharser_llm = ChatNVIDIA(model="microsoft/phi-3-mini-4k-instruct") | StrOutputParser()
|
| 107 |
-
|
| 108 |
-
instruct_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.2") | StrOutputParser()
|
| 109 |
-
#instruct_llm = ChatNVIDIA(model="mistralai/mixtral-8x22b-instruct-v0.1") | StrOutputParser()
|
| 110 |
relevance_llm = ChatNVIDIA(model="nvidia/llama-3.1-nemotron-70b-instruct") | StrOutputParser()
|
| 111 |
answer_llm = ChatOpenAI(
|
| 112 |
model="gpt-4o",
|
| 113 |
temperature=0.3,
|
| 114 |
-
openai_api_key=os.getenv("OPENAI_API_KEY")
|
|
|
|
| 115 |
) | StrOutputParser()
|
| 116 |
|
| 117 |
|
|
@@ -395,7 +398,7 @@ select_and_prompt = RunnableLambda(lambda x:
|
|
| 395 |
answer_chain = (
|
| 396 |
prepare_answer_inputs
|
| 397 |
| select_and_prompt
|
| 398 |
-
|
|
| 399 |
)
|
| 400 |
|
| 401 |
def RExtract(pydantic_class: Type[BaseModel], llm, prompt):
|
|
@@ -460,7 +463,7 @@ def update_knowledge_base(user_input: str, assistant_response: str):
|
|
| 460 |
full_pipeline = hybrid_chain | RunnableAssign({"validation": validation_chain}) | answer_chain
|
| 461 |
|
| 462 |
|
| 463 |
-
def chat_interface(message, history):
|
| 464 |
inputs = {
|
| 465 |
"query": message,
|
| 466 |
"all_queries": [message],
|
|
@@ -530,7 +533,7 @@ demo = gr.ChatInterface(
|
|
| 530 |
description="💡 Ask anything about Krishna Vamsi Dhulipalla",
|
| 531 |
examples=[
|
| 532 |
"Give me an overview of Krishna Vamsi Dhulipalla’s work experience across different roles?",
|
| 533 |
-
"What programming languages and tools does Krishna use for data science
|
| 534 |
"Can this chatbot tell me what Krishna's chatbot architecture looks like and how it works?"
|
| 535 |
],
|
| 536 |
)
|
|
|
|
| 2 |
import json
|
| 3 |
import re
|
| 4 |
import hashlib
|
| 5 |
+
import os
|
| 6 |
+
import json
|
| 7 |
+
import re
|
| 8 |
+
import hashlib
|
| 9 |
import gradio as gr
|
| 10 |
from functools import partial
|
| 11 |
import threading
|
|
|
|
| 108 |
# LLMs
|
| 109 |
# repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
|
| 110 |
repharser_llm = ChatNVIDIA(model="microsoft/phi-3-mini-4k-instruct") | StrOutputParser()
|
| 111 |
+
instruct_llm = ChatNVIDIA(model="mistralai/mixtral-8x22b-instruct-v0.1") | StrOutputParser()
|
|
|
|
|
|
|
| 112 |
relevance_llm = ChatNVIDIA(model="nvidia/llama-3.1-nemotron-70b-instruct") | StrOutputParser()
|
| 113 |
answer_llm = ChatOpenAI(
|
| 114 |
model="gpt-4o",
|
| 115 |
temperature=0.3,
|
| 116 |
+
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
| 117 |
+
streaming=True
|
| 118 |
) | StrOutputParser()
|
| 119 |
|
| 120 |
|
|
|
|
| 398 |
answer_chain = (
|
| 399 |
prepare_answer_inputs
|
| 400 |
| select_and_prompt
|
| 401 |
+
| relevance_llm
|
| 402 |
)
|
| 403 |
|
| 404 |
def RExtract(pydantic_class: Type[BaseModel], llm, prompt):
|
|
|
|
| 463 |
full_pipeline = hybrid_chain | RunnableAssign({"validation": validation_chain}) | answer_chain
|
| 464 |
|
| 465 |
|
| 466 |
+
def chat_interface(message, history):
|
| 467 |
inputs = {
|
| 468 |
"query": message,
|
| 469 |
"all_queries": [message],
|
|
|
|
| 533 |
description="💡 Ask anything about Krishna Vamsi Dhulipalla",
|
| 534 |
examples=[
|
| 535 |
"Give me an overview of Krishna Vamsi Dhulipalla’s work experience across different roles?",
|
| 536 |
+
"What programming languages and tools does Krishna use for data science?",
|
| 537 |
"Can this chatbot tell me what Krishna's chatbot architecture looks like and how it works?"
|
| 538 |
],
|
| 539 |
)
|