csabakecskemeti commited on
Commit
ebf8e32
·
verified ·
1 Parent(s): 0542ac5

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -67
app.py CHANGED
@@ -3,12 +3,10 @@ import gradio as gr
3
  from typing import List
4
  import logging
5
  import logging.handlers
6
- import time
7
- import random
8
  from langchain_openai import ChatOpenAI
9
  from langchain_core.tools import tool
10
- from langgraph.prebuilt import create_react_agent
11
- from langchain_core.messages import HumanMessage
12
  from langchain_tavily import TavilySearch
13
 
14
  # Configuration - set to False to disable detailed logging
@@ -58,27 +56,27 @@ if ENABLE_DETAILED_LOGGING:
58
  else:
59
  logger.warning("No Tavily API key found in environment variables")
60
 
61
- # Tavily search tool integration
62
-
63
- class ReactAgentChat:
64
  def __init__(self, ip: str, port: str, api_key: str, model: str):
65
  self.ip = ip
66
  self.port = port
67
  self.api_key = api_key
68
  self.model = model
69
- self.agent = None
 
70
  self._setup_agent()
71
 
72
  def _setup_agent(self):
73
- """Initialize the LangGraph ReAct agent"""
74
  try:
75
  if ENABLE_DETAILED_LOGGING:
76
- logger.info(f"=== SETTING UP AGENT ===")
77
  logger.info(f"LLM URL: http://{self.ip}:{self.port}/v1")
78
  logger.info(f"Model: {self.model}")
79
 
80
  # Create OpenAI-compatible model
81
- llm = ChatOpenAI(
82
  base_url=f"http://{self.ip}:{self.port}/v1",
83
  api_key=self.api_key,
84
  model=self.model,
@@ -87,15 +85,14 @@ class ReactAgentChat:
87
  if ENABLE_DETAILED_LOGGING:
88
  logger.info("LLM created successfully")
89
 
90
- # Define tools - use Tavily search API with graceful error handling
91
  if tavily_key:
92
  if ENABLE_DETAILED_LOGGING:
93
  logger.info("Setting up Tavily search tool")
94
  try:
95
- # Create custom wrapper for Tavily with error handling
96
  @tool
97
  def web_search(query: str) -> str:
98
- """Search the web for current information about any topic."""
99
  try:
100
  tavily_tool = TavilySearch(
101
  tavily_api_key=tavily_key,
@@ -113,8 +110,6 @@ class ReactAgentChat:
113
  if ENABLE_DETAILED_LOGGING:
114
  logger.error(f"Tavily search failed for query '{query}': {e}")
115
  logger.error(f"Exception type: {type(e).__name__}")
116
- import traceback
117
- logger.error(f"Full traceback: {traceback.format_exc()}")
118
 
119
  # Check for rate limit or quota issues
120
  if any(keyword in error_str for keyword in ['rate limit', 'quota', 'limit exceeded', 'usage limit', 'billing']):
@@ -126,42 +121,30 @@ class ReactAgentChat:
126
  logger.error(f"Tavily API error: {e}")
127
  return f"I can't search the web right now. Error: {str(e)[:100]}"
128
 
129
- search_tool = web_search
130
  if ENABLE_DETAILED_LOGGING:
131
- logger.info("Tavily search tool wrapper created successfully")
132
  except Exception as e:
133
  if ENABLE_DETAILED_LOGGING:
134
- logger.error(f"Failed to create Tavily tool wrapper: {e}")
135
- # Fallback tool
136
- @tool
137
- def no_search(query: str) -> str:
138
- """Search tool unavailable."""
139
- return "I can't search the web right now."
140
- search_tool = no_search
141
  else:
142
  if ENABLE_DETAILED_LOGGING:
143
- logger.warning("No Tavily API key found, creating fallback tool")
144
- @tool
145
- def no_search(query: str) -> str:
146
- """Search tool unavailable."""
147
- if ENABLE_DETAILED_LOGGING:
148
- logger.error("Search attempted but no Tavily API key configured")
149
- return "I can't search the web right now."
150
- search_tool = no_search
151
-
152
- tools = [search_tool]
153
- if ENABLE_DETAILED_LOGGING:
154
- logger.info(f"Tools defined: {[tool.name for tool in tools]}")
155
 
156
  # Bind tools to the model
157
- model_with_tools = llm.bind_tools(tools)
158
- if ENABLE_DETAILED_LOGGING:
159
- logger.info("Tools bound to model")
 
 
 
 
 
160
 
161
- # Create the ReAct agent
162
- self.agent = create_react_agent(model_with_tools, tools)
163
  if ENABLE_DETAILED_LOGGING:
164
- logger.info("ReAct agent created successfully")
165
 
166
  except Exception as e:
167
  logger.error(f"=== AGENT SETUP ERROR ===")
@@ -181,9 +164,9 @@ class ReactAgentChat:
181
  self._setup_agent()
182
 
183
  def chat(self, message: str, history: List[List[str]]) -> str:
184
- """Generate chat response using ReAct agent"""
185
  try:
186
- if not self.agent:
187
  return "Error: Agent not initialized"
188
 
189
  if ENABLE_DETAILED_LOGGING:
@@ -191,34 +174,75 @@ class ReactAgentChat:
191
  logger.info(f"Message: {message}")
192
  logger.info(f"History length: {len(history)}")
193
 
194
- # Convert history to messages for context handling
195
  messages = []
196
  for user_msg, assistant_msg in history:
197
  messages.append(HumanMessage(content=user_msg))
198
  if assistant_msg: # Only add if assistant responded
199
- from langchain_core.messages import AIMessage
200
  messages.append(AIMessage(content=assistant_msg))
201
 
202
  # Add current message
203
  messages.append(HumanMessage(content=message))
204
 
205
- # Invoke the agent
206
  if ENABLE_DETAILED_LOGGING:
207
- logger.info(f"=== INVOKING AGENT ===")
208
- logger.info(f"Total messages in history: {len(messages)}")
209
- response = self.agent.invoke({"messages": messages})
 
210
 
211
  if ENABLE_DETAILED_LOGGING:
212
- logger.info(f"=== AGENT RESPONSE ===")
213
- logger.info(f"Full response: {response}")
214
- logger.info(f"Number of messages: {len(response.get('messages', []))}")
 
 
 
 
 
 
215
 
216
- # Log each message in the response
217
- for i, msg in enumerate(response.get("messages", [])):
218
- logger.info(f"Message {i}: Type={type(msg).__name__}, Content={getattr(msg, 'content', 'No content')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
- # Extract the final response
221
- final_message = response["messages"][-1].content
222
  if ENABLE_DETAILED_LOGGING:
223
  logger.info(f"=== FINAL MESSAGE ===")
224
  logger.info(f"Final message: {final_message}")
@@ -235,19 +259,19 @@ class ReactAgentChat:
235
  return error_msg
236
 
237
  # Global agent instance
238
- react_agent = ReactAgentChat(llm_ip, llm_port, llm_key, llm_model)
239
 
240
  def generate_response(message: str, history: List[List[str]], system_prompt: str,
241
  max_tokens: int, ip: str, port: str, api_key: str, model: str):
242
- """Generate response using ReAct agent"""
243
- global react_agent
244
 
245
  try:
246
  # Update agent configuration if changed
247
- react_agent.update_config(ip, port, api_key, model)
248
 
249
  # Generate response
250
- response = react_agent.chat(message, history)
251
 
252
  # Stream the response word by word for better UX
253
  words = response.split()
@@ -273,7 +297,7 @@ chatbot = gr.ChatInterface(
273
  ),
274
  additional_inputs=[
275
  gr.Textbox(
276
- "You are a helpful AI assistant with web search capabilities.",
277
  label="System Prompt",
278
  lines=2
279
  ),
@@ -288,8 +312,8 @@ chatbot = gr.ChatInterface(
288
  gr.Textbox(llm_model, label="Model Name",
289
  info="Name of the model to use"),
290
  ],
291
- title="🤖 LangGraph ReAct Agent with Tavily Search",
292
- description="Chat with a LangGraph ReAct agent that can search the web using Tavily. Ask about current events, research topics, or any questions that require up-to-date information!",
293
  theme="finlaymacklon/smooth_slate"
294
  )
295
 
 
3
  from typing import List
4
  import logging
5
  import logging.handlers
6
+ import json
 
7
  from langchain_openai import ChatOpenAI
8
  from langchain_core.tools import tool
9
+ from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
 
10
  from langchain_tavily import TavilySearch
11
 
12
  # Configuration - set to False to disable detailed logging
 
56
  else:
57
  logger.warning("No Tavily API key found in environment variables")
58
 
59
+ # Tool calling agent implementation
60
+ class ToolCallingAgentChat:
 
61
  def __init__(self, ip: str, port: str, api_key: str, model: str):
62
  self.ip = ip
63
  self.port = port
64
  self.api_key = api_key
65
  self.model = model
66
+ self.llm = None
67
+ self.tools = []
68
  self._setup_agent()
69
 
70
  def _setup_agent(self):
71
+ """Initialize the tool calling agent"""
72
  try:
73
  if ENABLE_DETAILED_LOGGING:
74
+ logger.info(f"=== SETTING UP TOOL CALLING AGENT ===")
75
  logger.info(f"LLM URL: http://{self.ip}:{self.port}/v1")
76
  logger.info(f"Model: {self.model}")
77
 
78
  # Create OpenAI-compatible model
79
+ self.llm = ChatOpenAI(
80
  base_url=f"http://{self.ip}:{self.port}/v1",
81
  api_key=self.api_key,
82
  model=self.model,
 
85
  if ENABLE_DETAILED_LOGGING:
86
  logger.info("LLM created successfully")
87
 
88
+ # Define web search tool
89
  if tavily_key:
90
  if ENABLE_DETAILED_LOGGING:
91
  logger.info("Setting up Tavily search tool")
92
  try:
 
93
  @tool
94
  def web_search(query: str) -> str:
95
+ """Search the web for current information about any topic. Use this when you need up-to-date information, current events, or real-time data."""
96
  try:
97
  tavily_tool = TavilySearch(
98
  tavily_api_key=tavily_key,
 
110
  if ENABLE_DETAILED_LOGGING:
111
  logger.error(f"Tavily search failed for query '{query}': {e}")
112
  logger.error(f"Exception type: {type(e).__name__}")
 
 
113
 
114
  # Check for rate limit or quota issues
115
  if any(keyword in error_str for keyword in ['rate limit', 'quota', 'limit exceeded', 'usage limit', 'billing']):
 
121
  logger.error(f"Tavily API error: {e}")
122
  return f"I can't search the web right now. Error: {str(e)[:100]}"
123
 
124
+ self.tools = [web_search]
125
  if ENABLE_DETAILED_LOGGING:
126
+ logger.info("Tavily search tool created successfully")
127
  except Exception as e:
128
  if ENABLE_DETAILED_LOGGING:
129
+ logger.error(f"Failed to create Tavily tool: {e}")
130
+ self.tools = []
 
 
 
 
 
131
  else:
132
  if ENABLE_DETAILED_LOGGING:
133
+ logger.warning("No Tavily API key found, no web search tool available")
134
+ self.tools = []
 
 
 
 
 
 
 
 
 
 
135
 
136
  # Bind tools to the model
137
+ if self.tools:
138
+ self.llm_with_tools = self.llm.bind_tools(self.tools)
139
+ if ENABLE_DETAILED_LOGGING:
140
+ logger.info(f"Tools bound to model: {[tool.name for tool in self.tools]}")
141
+ else:
142
+ self.llm_with_tools = self.llm
143
+ if ENABLE_DETAILED_LOGGING:
144
+ logger.info("No tools available, using base model")
145
 
 
 
146
  if ENABLE_DETAILED_LOGGING:
147
+ logger.info("Tool calling agent created successfully")
148
 
149
  except Exception as e:
150
  logger.error(f"=== AGENT SETUP ERROR ===")
 
164
  self._setup_agent()
165
 
166
  def chat(self, message: str, history: List[List[str]]) -> str:
167
+ """Generate chat response using tool calling"""
168
  try:
169
+ if not self.llm_with_tools:
170
  return "Error: Agent not initialized"
171
 
172
  if ENABLE_DETAILED_LOGGING:
 
174
  logger.info(f"Message: {message}")
175
  logger.info(f"History length: {len(history)}")
176
 
177
+ # Convert history to messages for context
178
  messages = []
179
  for user_msg, assistant_msg in history:
180
  messages.append(HumanMessage(content=user_msg))
181
  if assistant_msg: # Only add if assistant responded
 
182
  messages.append(AIMessage(content=assistant_msg))
183
 
184
  # Add current message
185
  messages.append(HumanMessage(content=message))
186
 
187
+ # Get initial response from LLM
188
  if ENABLE_DETAILED_LOGGING:
189
+ logger.info(f"=== INVOKING LLM ===")
190
+ logger.info(f"Total messages in context: {len(messages)}")
191
+
192
+ response = self.llm_with_tools.invoke(messages)
193
 
194
  if ENABLE_DETAILED_LOGGING:
195
+ logger.info(f"=== LLM RESPONSE ===")
196
+ logger.info(f"Response type: {type(response)}")
197
+ logger.info(f"Has tool calls: {bool(response.tool_calls if hasattr(response, 'tool_calls') else False)}")
198
+
199
+ # Check if LLM wants to call tools
200
+ if hasattr(response, 'tool_calls') and response.tool_calls:
201
+ if ENABLE_DETAILED_LOGGING:
202
+ logger.info(f"=== TOOL CALLS DETECTED ===")
203
+ logger.info(f"Number of tool calls: {len(response.tool_calls)}")
204
 
205
+ # Add the LLM response to messages
206
+ messages.append(response)
207
+
208
+ # Execute tool calls
209
+ for tool_call in response.tool_calls:
210
+ if ENABLE_DETAILED_LOGGING:
211
+ logger.info(f"Executing tool: {tool_call['name']} with args: {tool_call['args']}")
212
+
213
+ # Find and execute the tool
214
+ tool_result = None
215
+ for tool in self.tools:
216
+ if tool.name == tool_call['name']:
217
+ try:
218
+ tool_result = tool.invoke(tool_call['args'])
219
+ if ENABLE_DETAILED_LOGGING:
220
+ logger.info(f"Tool executed successfully: {tool_call['name']}")
221
+ break
222
+ except Exception as e:
223
+ tool_result = f"Tool execution failed: {str(e)}"
224
+ if ENABLE_DETAILED_LOGGING:
225
+ logger.error(f"Tool execution failed: {e}")
226
+
227
+ if tool_result is None:
228
+ tool_result = f"Tool {tool_call['name']} not found"
229
+
230
+ # Add tool result to messages
231
+ messages.append(ToolMessage(
232
+ content=str(tool_result),
233
+ tool_call_id=tool_call['id']
234
+ ))
235
+
236
+ # Get final response from LLM after tool execution
237
+ if ENABLE_DETAILED_LOGGING:
238
+ logger.info(f"=== GETTING FINAL RESPONSE ===")
239
+
240
+ final_response = self.llm_with_tools.invoke(messages)
241
+ final_message = final_response.content
242
+ else:
243
+ # No tool calls, use the direct response
244
+ final_message = response.content
245
 
 
 
246
  if ENABLE_DETAILED_LOGGING:
247
  logger.info(f"=== FINAL MESSAGE ===")
248
  logger.info(f"Final message: {final_message}")
 
259
  return error_msg
260
 
261
  # Global agent instance
262
+ tool_calling_agent = ToolCallingAgentChat(llm_ip, llm_port, llm_key, llm_model)
263
 
264
  def generate_response(message: str, history: List[List[str]], system_prompt: str,
265
  max_tokens: int, ip: str, port: str, api_key: str, model: str):
266
+ """Generate response using tool calling agent"""
267
+ global tool_calling_agent
268
 
269
  try:
270
  # Update agent configuration if changed
271
+ tool_calling_agent.update_config(ip, port, api_key, model)
272
 
273
  # Generate response
274
+ response = tool_calling_agent.chat(message, history)
275
 
276
  # Stream the response word by word for better UX
277
  words = response.split()
 
297
  ),
298
  additional_inputs=[
299
  gr.Textbox(
300
+ "You are a helpful AI assistant with web search capabilities. Use web search when you need current information, recent events, or real-time data.",
301
  label="System Prompt",
302
  lines=2
303
  ),
 
312
  gr.Textbox(llm_model, label="Model Name",
313
  info="Name of the model to use"),
314
  ],
315
+ title="🚀 Fast Tool Calling Agent with Tavily Search",
316
+ description="Chat with a fast tool calling agent that can search the web using Tavily. The agent automatically decides when to search based on your query - much faster than ReAct agents!",
317
  theme="finlaymacklon/smooth_slate"
318
  )
319