Spaces:
Sleeping
Sleeping
Update veryfinal.py
Browse files- veryfinal.py +138 -86
veryfinal.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
-
"""Enhanced LangGraph + Agno Hybrid Agent System"""
|
|
|
|
| 2 |
import os
|
| 3 |
import time
|
| 4 |
import random
|
|
@@ -25,11 +26,11 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
| 25 |
|
| 26 |
# Agno imports
|
| 27 |
from agno.agent import Agent
|
| 28 |
-
from agno.models.groq import
|
| 29 |
-
from agno.models.google import
|
| 30 |
-
from agno.tools.
|
| 31 |
from agno.memory.agent import AgentMemory
|
| 32 |
-
from agno.storage.sqlite import SqliteStorage
|
| 33 |
|
| 34 |
load_dotenv()
|
| 35 |
|
|
@@ -38,7 +39,7 @@ class PerformanceRateLimiter:
|
|
| 38 |
def __init__(self, rpm: int, name: str):
|
| 39 |
self.rpm = rpm
|
| 40 |
self.name = name
|
| 41 |
-
self.times = []
|
| 42 |
self.failures = 0
|
| 43 |
|
| 44 |
def wait_if_needed(self):
|
|
@@ -58,12 +59,12 @@ class PerformanceRateLimiter:
|
|
| 58 |
def record_failure(self):
|
| 59 |
self.failures += 1
|
| 60 |
|
| 61 |
-
#
|
| 62 |
gemini_limiter = PerformanceRateLimiter(28, "Gemini")
|
| 63 |
groq_limiter = PerformanceRateLimiter(28, "Groq")
|
| 64 |
nvidia_limiter = PerformanceRateLimiter(4, "NVIDIA")
|
| 65 |
|
| 66 |
-
#
|
| 67 |
def create_agno_agents():
|
| 68 |
storage = SqliteStorage(
|
| 69 |
table_name="agent_sessions",
|
|
@@ -99,13 +100,22 @@ def create_agno_agents():
|
|
| 99 |
api_key=os.getenv("GOOGLE_API_KEY"),
|
| 100 |
temperature=0
|
| 101 |
),
|
| 102 |
-
description="Expert research and information specialist",
|
| 103 |
instructions=[
|
| 104 |
-
"
|
| 105 |
-
"Synthesize information
|
| 106 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
],
|
| 108 |
-
tools=[DuckDuckGoTools()],
|
| 109 |
memory=AgentMemory(
|
| 110 |
db=storage,
|
| 111 |
create_user_memories=True,
|
|
@@ -116,6 +126,7 @@ def create_agno_agents():
|
|
| 116 |
)
|
| 117 |
return {"math": math_agent, "research": research_agent}
|
| 118 |
|
|
|
|
| 119 |
@tool
|
| 120 |
def multiply(a: int, b: int) -> int:
|
| 121 |
"""Multiply two numbers."""
|
|
@@ -133,33 +144,39 @@ def subtract(a: int, b: int) -> int:
|
|
| 133 |
|
| 134 |
@tool
|
| 135 |
def divide(a: int, b: int) -> float:
|
| 136 |
-
"""Divide two numbers
|
| 137 |
if b == 0:
|
| 138 |
raise ValueError("Cannot divide by zero.")
|
| 139 |
return a / b
|
| 140 |
|
| 141 |
@tool
|
| 142 |
def modulus(a: int, b: int) -> int:
|
| 143 |
-
"""
|
| 144 |
return a % b
|
| 145 |
-
|
| 146 |
@tool
|
| 147 |
def optimized_web_search(query: str) -> str:
|
| 148 |
-
|
| 149 |
try:
|
| 150 |
time.sleep(random.uniform(1, 2))
|
| 151 |
docs = TavilySearchResults(max_results=2).invoke(query=query)
|
| 152 |
-
return "\n\n---\n\n".join(
|
|
|
|
|
|
|
|
|
|
| 153 |
except Exception as e:
|
| 154 |
return f"Web search failed: {e}"
|
| 155 |
|
| 156 |
@tool
|
| 157 |
def optimized_wiki_search(query: str) -> str:
|
| 158 |
-
|
| 159 |
try:
|
| 160 |
-
time.sleep(random.uniform(0.5,1))
|
| 161 |
docs = WikipediaLoader(query=query, load_max_docs=1).load()
|
| 162 |
-
return "\n\n---\n\n".join(
|
|
|
|
|
|
|
|
|
|
| 163 |
except Exception as e:
|
| 164 |
return f"Wikipedia search failed: {e}"
|
| 165 |
|
|
@@ -167,20 +184,25 @@ def optimized_wiki_search(query: str) -> str:
|
|
| 167 |
def setup_faiss():
|
| 168 |
try:
|
| 169 |
schema = """
|
| 170 |
-
{
|
|
|
|
|
|
|
|
|
|
| 171 |
"""
|
| 172 |
-
loader = JSONLoader("metadata.jsonl", jq_schema=schema, json_lines=True, text_content=False)
|
| 173 |
docs = loader.load()
|
| 174 |
-
|
| 175 |
-
chunks =
|
| 176 |
-
embeds = NVIDIAEmbeddings(
|
|
|
|
|
|
|
|
|
|
| 177 |
return FAISS.from_documents(chunks, embeds)
|
| 178 |
except Exception as e:
|
| 179 |
print(f"FAISS setup failed: {e}")
|
| 180 |
return None
|
| 181 |
|
| 182 |
-
|
| 183 |
-
class State(TypedDict):
|
| 184 |
messages: Annotated[List[HumanMessage|AIMessage], operator.add]
|
| 185 |
query: str
|
| 186 |
agent_type: str
|
|
@@ -188,85 +210,115 @@ class State(TypedDict):
|
|
| 188 |
perf: Dict[str,Any]
|
| 189 |
agno_resp: str
|
| 190 |
|
| 191 |
-
class
|
| 192 |
def __init__(self):
|
| 193 |
self.agno = create_agno_agents()
|
| 194 |
self.store = setup_faiss()
|
| 195 |
-
self.tools = [
|
|
|
|
|
|
|
|
|
|
| 196 |
if self.store:
|
| 197 |
retr = self.store.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
| 198 |
-
self.tools.append(create_retriever_tool(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
self.graph = self._build_graph()
|
|
|
|
| 200 |
def _build_graph(self):
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
groq_limiter.wait_if_needed()
|
| 213 |
-
t0=time.time()
|
| 214 |
-
|
| 215 |
-
sys=SystemMessage(content="
|
| 216 |
-
res=
|
| 217 |
-
return {**st,"final_answer":res.content,"perf":{"time":time.time()-t0,"prov":"LG-Groq"}}
|
| 218 |
-
|
|
|
|
| 219 |
gemini_limiter.wait_if_needed()
|
| 220 |
-
t0=time.time()
|
| 221 |
-
resp=self.agno["research"].run(st["query"],stream=False)
|
| 222 |
-
return {**st,"final_answer":resp,"perf":{"time":time.time()-t0,"prov":"Agno-Gemini"}}
|
| 223 |
-
|
|
|
|
| 224 |
groq_limiter.wait_if_needed()
|
| 225 |
-
t0=time.time()
|
| 226 |
-
|
| 227 |
-
sys=SystemMessage(content="Retrieve
|
| 228 |
-
res=
|
| 229 |
-
return {**st,"final_answer":res.content,"perf":{"time":time.time()-t0,"prov":"LG-Retrieval"}}
|
| 230 |
-
|
|
|
|
| 231 |
nvidia_limiter.wait_if_needed()
|
| 232 |
-
t0=time.time()
|
| 233 |
-
if any(
|
| 234 |
-
resp=self.agno["math"].run(st["query"],stream=False)
|
| 235 |
else:
|
| 236 |
-
resp=self.agno["research"].run(st["query"],stream=False)
|
| 237 |
-
return {**st,"final_answer":resp,"perf":{"time":time.time()-t0,"prov":"Agno-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
g
|
| 243 |
-
g.add_node("
|
| 244 |
-
g.add_node("
|
|
|
|
|
|
|
|
|
|
| 245 |
g.set_entry_point("router")
|
| 246 |
-
g.add_conditional_edges("router",pick,{
|
| 247 |
-
"lg_math":"lg_math",
|
|
|
|
|
|
|
|
|
|
| 248 |
})
|
| 249 |
for n in ["lg_math","agno_research","lg_retrieval","agno_general"]:
|
| 250 |
-
g.add_edge(n,"END")
|
| 251 |
return g.compile(checkpointer=MemorySaver())
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
try:
|
| 256 |
-
out=self.graph.invoke(
|
| 257 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
except Exception as e:
|
| 259 |
-
return {"answer":f"Error: {e}","
|
| 260 |
|
| 261 |
-
def build_graph(provider:str="hybrid"):
|
| 262 |
if provider=="hybrid":
|
| 263 |
-
return
|
| 264 |
raise ValueError("Only 'hybrid' supported")
|
| 265 |
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
res=graph.invoke({"messages":msgs},{"configurable":{"thread_id":"test"}})
|
| 271 |
for m in res["messages"]:
|
| 272 |
m.pretty_print()
|
|
|
|
| 1 |
+
"""Enhanced LangGraph + Agno Hybrid Agent System with TavilyTools"""
|
| 2 |
+
|
| 3 |
import os
|
| 4 |
import time
|
| 5 |
import random
|
|
|
|
| 26 |
|
| 27 |
# Agno imports
|
| 28 |
from agno.agent import Agent
|
| 29 |
+
from agno.models.groq import GroqChat
|
| 30 |
+
from agno.models.google import GeminiChat
|
| 31 |
+
from agno.tools.tavily import TavilyTools
|
| 32 |
from agno.memory.agent import AgentMemory
|
| 33 |
+
from agno.storage.sqlite import SqliteStorage
|
| 34 |
|
| 35 |
load_dotenv()
|
| 36 |
|
|
|
|
| 39 |
def __init__(self, rpm: int, name: str):
|
| 40 |
self.rpm = rpm
|
| 41 |
self.name = name
|
| 42 |
+
self.times: List[float] = []
|
| 43 |
self.failures = 0
|
| 44 |
|
| 45 |
def wait_if_needed(self):
|
|
|
|
| 59 |
def record_failure(self):
|
| 60 |
self.failures += 1
|
| 61 |
|
| 62 |
+
# Initialize rate limiters
|
| 63 |
gemini_limiter = PerformanceRateLimiter(28, "Gemini")
|
| 64 |
groq_limiter = PerformanceRateLimiter(28, "Groq")
|
| 65 |
nvidia_limiter = PerformanceRateLimiter(4, "NVIDIA")
|
| 66 |
|
| 67 |
+
# Create Agno agents with SQLite storage
|
| 68 |
def create_agno_agents():
|
| 69 |
storage = SqliteStorage(
|
| 70 |
table_name="agent_sessions",
|
|
|
|
| 100 |
api_key=os.getenv("GOOGLE_API_KEY"),
|
| 101 |
temperature=0
|
| 102 |
),
|
| 103 |
+
description="Expert research and information gathering specialist",
|
| 104 |
instructions=[
|
| 105 |
+
"Conduct thorough research using available tools",
|
| 106 |
+
"Synthesize information from multiple sources",
|
| 107 |
+
"Provide comprehensive, well-cited answers",
|
| 108 |
+
"Finish with: FINAL ANSWER: [answer]"
|
| 109 |
+
],
|
| 110 |
+
tools=[
|
| 111 |
+
TavilyTools(
|
| 112 |
+
api_key=os.getenv("TAVILY_API_KEY"),
|
| 113 |
+
search=True,
|
| 114 |
+
max_tokens=6000,
|
| 115 |
+
search_depth="advanced",
|
| 116 |
+
format="markdown"
|
| 117 |
+
)
|
| 118 |
],
|
|
|
|
| 119 |
memory=AgentMemory(
|
| 120 |
db=storage,
|
| 121 |
create_user_memories=True,
|
|
|
|
| 126 |
)
|
| 127 |
return {"math": math_agent, "research": research_agent}
|
| 128 |
|
| 129 |
+
# LangGraph tools
|
| 130 |
@tool
|
| 131 |
def multiply(a: int, b: int) -> int:
|
| 132 |
"""Multiply two numbers."""
|
|
|
|
| 144 |
|
| 145 |
@tool
|
| 146 |
def divide(a: int, b: int) -> float:
|
| 147 |
+
"""Divide two numbers."""
|
| 148 |
if b == 0:
|
| 149 |
raise ValueError("Cannot divide by zero.")
|
| 150 |
return a / b
|
| 151 |
|
| 152 |
@tool
|
| 153 |
def modulus(a: int, b: int) -> int:
|
| 154 |
+
"""Get the remainder of division."""
|
| 155 |
return a % b
|
| 156 |
+
|
| 157 |
@tool
|
| 158 |
def optimized_web_search(query: str) -> str:
|
| 159 |
+
"""Optimized Tavily web search."""
|
| 160 |
try:
|
| 161 |
time.sleep(random.uniform(1, 2))
|
| 162 |
docs = TavilySearchResults(max_results=2).invoke(query=query)
|
| 163 |
+
return "\n\n---\n\n".join(
|
| 164 |
+
f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
|
| 165 |
+
for d in docs
|
| 166 |
+
)
|
| 167 |
except Exception as e:
|
| 168 |
return f"Web search failed: {e}"
|
| 169 |
|
| 170 |
@tool
|
| 171 |
def optimized_wiki_search(query: str) -> str:
|
| 172 |
+
"""Optimized Wikipedia search."""
|
| 173 |
try:
|
| 174 |
+
time.sleep(random.uniform(0.5, 1))
|
| 175 |
docs = WikipediaLoader(query=query, load_max_docs=1).load()
|
| 176 |
+
return "\n\n---\n\n".join(
|
| 177 |
+
f"<Doc src='{d.metadata['source']}'>{d.page_content[:800]}</Doc>"
|
| 178 |
+
for d in docs
|
| 179 |
+
)
|
| 180 |
except Exception as e:
|
| 181 |
return f"Wikipedia search failed: {e}"
|
| 182 |
|
|
|
|
| 184 |
def setup_faiss():
|
| 185 |
try:
|
| 186 |
schema = """
|
| 187 |
+
{
|
| 188 |
+
page_content: .Question,
|
| 189 |
+
metadata: { task_id: .task_id, Final_answer: ."Final answer" }
|
| 190 |
+
}
|
| 191 |
"""
|
| 192 |
+
loader = JSONLoader(file_path="metadata.jsonl", jq_schema=schema, json_lines=True, text_content=False)
|
| 193 |
docs = loader.load()
|
| 194 |
+
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=50)
|
| 195 |
+
chunks = splitter.split_documents(docs)
|
| 196 |
+
embeds = NVIDIAEmbeddings(
|
| 197 |
+
model="nvidia/nv-embedqa-e5-v5",
|
| 198 |
+
api_key=os.getenv("NVIDIA_API_KEY")
|
| 199 |
+
)
|
| 200 |
return FAISS.from_documents(chunks, embeds)
|
| 201 |
except Exception as e:
|
| 202 |
print(f"FAISS setup failed: {e}")
|
| 203 |
return None
|
| 204 |
|
| 205 |
+
class EnhancedAgentState(TypedDict):
|
|
|
|
| 206 |
messages: Annotated[List[HumanMessage|AIMessage], operator.add]
|
| 207 |
query: str
|
| 208 |
agent_type: str
|
|
|
|
| 210 |
perf: Dict[str,Any]
|
| 211 |
agno_resp: str
|
| 212 |
|
| 213 |
+
class HybridLangGraphAgnoSystem:
|
| 214 |
def __init__(self):
|
| 215 |
self.agno = create_agno_agents()
|
| 216 |
self.store = setup_faiss()
|
| 217 |
+
self.tools = [
|
| 218 |
+
multiply, add, subtract, divide, modulus,
|
| 219 |
+
optimized_web_search, optimized_wiki_search
|
| 220 |
+
]
|
| 221 |
if self.store:
|
| 222 |
retr = self.store.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
| 223 |
+
self.tools.append(create_retriever_tool(
|
| 224 |
+
retriever=retr,
|
| 225 |
+
name="Question_Search",
|
| 226 |
+
description="Retrieve similar questions"
|
| 227 |
+
))
|
| 228 |
self.graph = self._build_graph()
|
| 229 |
+
|
| 230 |
def _build_graph(self):
|
| 231 |
+
groq_llm = ChatGroq(model="llama-3.3-70b-versatile", temperature=0)
|
| 232 |
+
gemini_llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", temperature=0)
|
| 233 |
+
nvidia_llm = ChatNVIDIA(model="meta/llama-3.1-70b-instruct", temperature=0)
|
| 234 |
+
|
| 235 |
+
def router(st: EnhancedAgentState) -> EnhancedAgentState:
|
| 236 |
+
q = st["query"].lower()
|
| 237 |
+
if any(k in q for k in ["calculate","math"]):
|
| 238 |
+
t = "lg_math"
|
| 239 |
+
elif any(k in q for k in ["research","analyze"]):
|
| 240 |
+
t = "agno_research"
|
| 241 |
+
elif any(k in q for k in ["what is","who is"]):
|
| 242 |
+
t = "lg_retrieval"
|
| 243 |
+
else:
|
| 244 |
+
t = "agno_general"
|
| 245 |
+
return {**st, "agent_type": t}
|
| 246 |
+
|
| 247 |
+
def lg_math(st: EnhancedAgentState) -> EnhancedAgentState:
|
| 248 |
groq_limiter.wait_if_needed()
|
| 249 |
+
t0 = time.time()
|
| 250 |
+
llm = groq_llm.bind_tools([multiply, add, subtract, divide, modulus])
|
| 251 |
+
sys = SystemMessage(content="Fast calculator. FINAL ANSWER: [result]")
|
| 252 |
+
res = llm.invoke([sys, HumanMessage(content=st["query"])])
|
| 253 |
+
return {**st, "final_answer": res.content, "perf": {"time": time.time()-t0, "prov":"LG-Groq"}}
|
| 254 |
+
|
| 255 |
+
def agno_research(st: EnhancedAgentState) -> EnhancedAgentState:
|
| 256 |
gemini_limiter.wait_if_needed()
|
| 257 |
+
t0 = time.time()
|
| 258 |
+
resp = self.agno["research"].run(st["query"], stream=False)
|
| 259 |
+
return {**st, "final_answer": resp, "perf": {"time": time.time()-t0, "prov":"Agno-Gemini"}}
|
| 260 |
+
|
| 261 |
+
def lg_retrieval(st: EnhancedAgentState) -> EnhancedAgentState:
|
| 262 |
groq_limiter.wait_if_needed()
|
| 263 |
+
t0 = time.time()
|
| 264 |
+
llm = groq_llm.bind_tools(self.tools)
|
| 265 |
+
sys = SystemMessage(content="Retrieve. FINAL ANSWER: [answer]")
|
| 266 |
+
res = llm.invoke([sys, HumanMessage(content=st["query"])])
|
| 267 |
+
return {**st, "final_answer": res.content, "perf": {"time": time.time()-t0, "prov":"LG-Retrieval"}}
|
| 268 |
+
|
| 269 |
+
def agno_general(st: EnhancedAgentState) -> EnhancedAgentState:
|
| 270 |
nvidia_limiter.wait_if_needed()
|
| 271 |
+
t0 = time.time()
|
| 272 |
+
if any(k in st["query"].lower() for k in ["calculate","compute"]):
|
| 273 |
+
resp = self.agno["math"].run(st["query"], stream=False)
|
| 274 |
else:
|
| 275 |
+
resp = self.agno["research"].run(st["query"], stream=False)
|
| 276 |
+
return {**st, "final_answer": resp, "perf": {"time": time.time()-t0, "prov":"Agno-General"}}
|
| 277 |
+
|
| 278 |
+
def pick(st: EnhancedAgentState) -> str:
|
| 279 |
+
return st["agent_type"]
|
| 280 |
+
|
| 281 |
+
g = StateGraph(EnhancedAgentState)
|
| 282 |
+
g.add_node("router", router)
|
| 283 |
+
g.add_node("lg_math", lg_math)
|
| 284 |
+
g.add_node("agno_research", agno_research)
|
| 285 |
+
g.add_node("lg_retrieval", lg_retrieval)
|
| 286 |
+
g.add_node("agno_general", agno_general)
|
| 287 |
g.set_entry_point("router")
|
| 288 |
+
g.add_conditional_edges("router", pick, {
|
| 289 |
+
"lg_math":"lg_math",
|
| 290 |
+
"agno_research":"agno_research",
|
| 291 |
+
"lg_retrieval":"lg_retrieval",
|
| 292 |
+
"agno_general":"agno_general"
|
| 293 |
})
|
| 294 |
for n in ["lg_math","agno_research","lg_retrieval","agno_general"]:
|
| 295 |
+
g.add_edge(n, "END")
|
| 296 |
return g.compile(checkpointer=MemorySaver())
|
| 297 |
+
|
| 298 |
+
def process_query(self, q: str) -> Dict[str,Any]:
|
| 299 |
+
state = {
|
| 300 |
+
"messages":[HumanMessage(content=q)],
|
| 301 |
+
"query":q, "agent_type":"", "final_answer":"", "perf":{}, "agno_resp":""
|
| 302 |
+
}
|
| 303 |
+
cfg = {"configurable":{"thread_id":f"hyb_{hash(q)}"}}
|
| 304 |
try:
|
| 305 |
+
out = self.graph.invoke(state, cfg)
|
| 306 |
+
return {
|
| 307 |
+
"answer": out["final_answer"],
|
| 308 |
+
"performance_metrics": out["perf"],
|
| 309 |
+
"provider_used": out["perf"].get("prov")
|
| 310 |
+
}
|
| 311 |
except Exception as e:
|
| 312 |
+
return {"answer":f"Error: {e}", "performance_metrics":{}, "provider_used":"Error"}
|
| 313 |
|
| 314 |
+
def build_graph(provider: str="hybrid"):
|
| 315 |
if provider=="hybrid":
|
| 316 |
+
return HybridLangGraphAgnoSystem().graph
|
| 317 |
raise ValueError("Only 'hybrid' supported")
|
| 318 |
|
| 319 |
+
if __name__ == "__main__":
|
| 320 |
+
graph = build_graph()
|
| 321 |
+
msgs = [HumanMessage(content="What are the names of the US presidents who were assassinated?")]
|
| 322 |
+
res = graph.invoke({"messages":msgs},{"configurable":{"thread_id":"test"}})
|
|
|
|
| 323 |
for m in res["messages"]:
|
| 324 |
m.pretty_print()
|