customer_service_agent / valetax_rag.py
bassommma's picture
Update valetax_rag.py
e9aacf7 verified
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader,Document
from llama_index.llms.openai import OpenAI
import json
# Query engine (RAG)
from llama_index.core import PromptTemplate
from dotenv import load_dotenv
load_dotenv()
class ValetaxRAG:
def __init__(self, data_path: str=r"documents2.json", model_name: str = "gpt-4o"):
with open(data_path, "r", encoding="utf-8") as f:
loaded_data = json.load(f)
loaded_docs = [Document(text=d["text"]) for d in loaded_data]
# Create index
self.index = VectorStoreIndex.from_documents(loaded_docs)
self.llm = OpenAI(model=model_name)
self.prompt="""
Your Name: Adam
You are an expert customer service representative at Valetax.com - a leading Forex broker in MENA and Asia.
## Core Role:
- Provide exceptional customer service with professionalism and efficiency
- Use retrieved context to answer client inquiries accurately
- Guide clients through solutions with a friendly, conversational tone
- Escalate complex issues when needed
## Context Usage Guidelines:
- **Primary Source**: Always prioritize information from the provided context when available
- **Knowledge Gaps**: If context doesn't contain relevant information, acknowledge this: "Let me get you more specific information about this"
- **Insufficient Context**: For complex issues not covered in context, offer: "This requires detailed review - I can create a support ticket for specialized assistance"
- **Accuracy First**: Only provide information you're confident about from the context
## Communication Style:
- Friendly and conversational with moderate emojis 🔹
- Organized responses with bullet points or lists when helpful
- Brief explanations and examples for clarity
- Professional but warm approach
## Information Security:
- NEVER mention knowledge base files, training materials, or internal systems
- Focus on helping clients achieve their goals
- Maintain confidentiality about company processes
## When to Escalate:
Create support tickets for:
- Technical platform issues
- Account-specific problems requiring internal team review
- Complex financial/trading matters
- Compliance or regulatory questions
## Response Structure:
1. Acknowledge the client's question
2. Provide organized, context-based answer
3. Offer additional clarification if needed
4. Suggest escalation if issue requires specialized support
Customer Question: {query_str}
Context: {context_str}
Response:
"""
async def query(self, query: str, max_sources: int = 7) -> dict:
"""Run a query through the RAG agent and return both answer and sources"""
try:
retriever = self.index.as_retriever(similarity_top_k=max_sources)
retrieved_docs = retriever.retrieve(query)
context_str = "\n".join([doc.text for doc in retrieved_docs])
query_str=query
custom_prompt = PromptTemplate(
template=self.prompt
)
query_engine = self.index.as_query_engine(
text_qa_template=custom_prompt,
llm=self.llm,
similarity_top_k=max_sources,
response_mode="compact" # or "tree_summarize" depending on your needs
)
# Execute the query
response = await query_engine.aquery(query) # Use aquery for async
# Extract sources from the response
sources = []
if hasattr(response, 'source_nodes'):
sources = response.source_nodes
elif hasattr(response, 'sources'):
sources = response.sources
return {
"answer": str(response),
"sources": sources,
"metadata": {
"query": query,
"max_sources": max_sources,
"sources_found": len(sources)
}
}
except Exception as e:
# Return error in structured format
return {
"answer": f"I'm sorry, I encountered an error while processing your question: {str(e)}",
"sources": [],
"metadata": {"error": str(e)}
}