bassommma commited on
Commit
c04310b
·
verified ·
1 Parent(s): 3c39663

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +764 -0
app.py CHANGED
@@ -2,6 +2,770 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def respond(
6
  message,
7
  history: list[dict[str, str]],
 
2
  from huggingface_hub import InferenceClient
3
 
4
 
5
+ from fastapi import FastAPI, HTTPException
6
+ from pydantic import BaseModel
7
+ import openai
8
+ import json
9
+ import os
10
+ import time
11
+ import logging
12
+ from typing import List, Optional, Dict, Any
13
+ from contextlib import asynccontextmanager
14
+
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+ # Configure logging
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # --- GPT Tool Class (Using standard Assistants API) ---
22
+ class PersistentCustomGPT:
23
+ """Custom GPT using standard Assistants API with improved error handling"""
24
+
25
+ def __init__(self, api_key: str, config_file: str = "assistant_config.json"):
26
+ self.client = openai.OpenAI(api_key=api_key)
27
+ self.config_file = config_file
28
+ self.assistant_id = None
29
+ self.uploaded_files = []
30
+ self.threads = {} # Store threads per customer
31
+
32
+ # Load existing configuration
33
+ self.load_config()
34
+
35
+ def load_config(self):
36
+ """Load existing assistant configuration"""
37
+ if os.path.exists(self.config_file):
38
+ try:
39
+ with open(self.config_file, 'r') as f:
40
+ config = json.load(f)
41
+ self.assistant_id = config.get('assistant_id')
42
+ self.uploaded_files = config.get('uploaded_files', [])
43
+ logger.info(f"Loaded existing assistant: {self.assistant_id}")
44
+ except Exception as e:
45
+ logger.error(f"Error loading config: {e}")
46
+
47
+ def save_config(self):
48
+ """Save assistant configuration"""
49
+ config = {
50
+ 'assistant_id': self.assistant_id,
51
+ 'uploaded_files': self.uploaded_files
52
+ }
53
+ try:
54
+ with open(self.config_file, 'w') as f:
55
+ json.dump(config, f, indent=2)
56
+ logger.info(f"Saved configuration to {self.config_file}")
57
+ except Exception as e:
58
+ logger.error(f"Error saving config: {e}")
59
+
60
+ def upload_files(self, file_paths: List[str]) -> List[str]:
61
+ """Upload files to OpenAI with improved error handling and format validation"""
62
+ file_ids = []
63
+
64
+ # Supported file formats for Assistants API
65
+ supported_formats = {
66
+ '.txt', '.md', '.json', '.csv', '.pdf', '.docx',
67
+ '.xlsx', '.xls', '.py', '.js', '.html', '.css'
68
+ }
69
+
70
+ for file_path in file_paths:
71
+ if not os.path.exists(file_path):
72
+ logger.warning(f"File not found: {file_path}")
73
+ continue
74
+
75
+ # Check file format
76
+ file_ext = os.path.splitext(file_path)[1].lower()
77
+ if file_ext not in supported_formats:
78
+ logger.warning(f"Unsupported file format {file_ext}: {file_path}")
79
+ continue
80
+
81
+ # Check file size (max 512MB for Assistants API)
82
+ file_size = os.path.getsize(file_path)
83
+ max_size = 512 * 1024 * 1024 # 512MB
84
+ if file_size > max_size:
85
+ logger.warning(f"File too large ({file_size / 1024 / 1024:.1f}MB): {file_path}")
86
+ continue
87
+
88
+ try:
89
+ with open(file_path, "rb") as file:
90
+ uploaded_file = self.client.files.create(
91
+ file=file,
92
+ purpose="assistants"
93
+ )
94
+ file_ids.append(uploaded_file.id)
95
+ self.uploaded_files.append(uploaded_file.id)
96
+ logger.info(f"Uploaded {file_ext} file: {file_path} -> {uploaded_file.id} ({file_size / 1024:.1f}KB)")
97
+ except Exception as e:
98
+ logger.error(f"Error uploading {file_path}: {e}")
99
+
100
+ return file_ids
101
+
102
+ def create_or_load_assistant(self, instructions: str, file_paths: List[str] = None, force_update: bool = False) -> str:
103
+ """Create assistant or update existing one"""
104
+
105
+ # If assistant already exists, verify it still works
106
+ if self.assistant_id and not force_update:
107
+ try:
108
+ assistant = self.client.beta.assistants.retrieve(self.assistant_id)
109
+ logger.info(f"Using existing assistant: {self.assistant_id}")
110
+ return self.assistant_id
111
+ except Exception as e:
112
+ logger.warning(f"Assistant no longer exists, creating new one: {e}")
113
+ self.assistant_id = None
114
+
115
+ # Update existing assistant if force_update is True
116
+ if self.assistant_id and force_update:
117
+ try:
118
+ logger.info(f"Updating instructions for existing assistant: {self.assistant_id}")
119
+ updated_assistant = self.client.beta.assistants.update(
120
+ assistant_id=self.assistant_id,
121
+ instructions=instructions
122
+ )
123
+ logger.info(f"✅ Assistant instructions updated: {self.assistant_id}")
124
+ return self.assistant_id
125
+ except Exception as e:
126
+ logger.warning(f"Failed to update assistant, creating new one: {e}")
127
+ self.assistant_id = None
128
+
129
+ # Create new assistant
130
+ return self._create_assistant(instructions, file_paths)
131
+
132
+ def _create_assistant(self, instructions: str, file_paths: List[str] = None) -> str:
133
+ """Create assistant using current API"""
134
+
135
+ # Upload files if provided
136
+ file_ids = []
137
+ if file_paths:
138
+ file_ids = self.upload_files(file_paths)
139
+
140
+ # Prepare tools
141
+ tools = [{"type": "code_interpreter"}] # Always include code interpreter
142
+ if file_ids:
143
+ tools.append({"type": "file_search"})
144
+
145
+ try:
146
+ # Create assistant
147
+ assistant = self.client.beta.assistants.create(
148
+ name="Valetax CS Assistant",
149
+ instructions=instructions,
150
+ model="gpt-4o",
151
+ tools=tools
152
+ )
153
+
154
+ # If we have files, create a vector store and attach them
155
+ if file_ids:
156
+ try:
157
+ # Create vector store
158
+ vector_store = self.client.beta.vector_stores.create(
159
+ name="Valetax Knowledge Base"
160
+ )
161
+
162
+ # Add files to vector store
163
+ self.client.beta.vector_stores.file_batches.create(
164
+ vector_store_id=vector_store.id,
165
+ file_ids=file_ids
166
+ )
167
+
168
+ # Update assistant with vector store
169
+ assistant = self.client.beta.assistants.update(
170
+ assistant_id=assistant.id,
171
+ tool_resources={
172
+ "file_search": {
173
+ "vector_store_ids": [vector_store.id]
174
+ }
175
+ }
176
+ )
177
+ logger.info(f"Assistant created with {len(file_ids)} files in vector store")
178
+ except Exception as e:
179
+ logger.warning(f"Could not create vector store: {e}")
180
+
181
+ self.assistant_id = assistant.id
182
+ self.save_config()
183
+
184
+ logger.info(f"Assistant created: {self.assistant_id}")
185
+ return self.assistant_id
186
+
187
+ except Exception as e:
188
+ logger.error(f"Error creating assistant: {e}")
189
+ raise
190
+
191
+ def get_customer_thread(self, customer_id: str) -> str:
192
+ """Get or create thread for specific customer"""
193
+ if customer_id not in self.threads:
194
+ try:
195
+ thread = self.client.beta.threads.create()
196
+ self.threads[customer_id] = thread.id
197
+ logger.info(f"Created new thread for customer {customer_id}")
198
+ except Exception as e:
199
+ logger.error(f"Error creating thread for {customer_id}: {e}")
200
+ raise
201
+ return self.threads[customer_id]
202
+
203
+ def query(self, message: str, customer_id: str = "default") -> str:
204
+ """Query assistant with improved error handling and timeout"""
205
+ if not self.assistant_id:
206
+ raise ValueError("No assistant configured")
207
+
208
+ # Get customer-specific thread
209
+ thread_id = self.get_customer_thread(customer_id)
210
+
211
+ try:
212
+ # Add message to thread
213
+ self.client.beta.threads.messages.create(
214
+ thread_id=thread_id,
215
+ role="user",
216
+ content=message
217
+ )
218
+
219
+ # Create and run the assistant
220
+ run = self.client.beta.threads.runs.create(
221
+ thread_id=thread_id,
222
+ assistant_id=self.assistant_id
223
+ )
224
+
225
+ # Wait for completion with timeout
226
+ max_wait_time = 120 # 2 minutes max
227
+ start_time = time.time()
228
+
229
+ while run.status in ['queued', 'in_progress', 'cancelling']:
230
+ if time.time() - start_time > max_wait_time:
231
+ logger.error(f"Query timeout for customer {customer_id}")
232
+ return "I apologize, but your request is taking longer than expected. Please try again or contact support."
233
+
234
+ time.sleep(1)
235
+ run = self.client.beta.threads.runs.retrieve(
236
+ thread_id=thread_id,
237
+ run_id=run.id
238
+ )
239
+
240
+ if run.status == 'completed':
241
+ messages = self.client.beta.threads.messages.list(
242
+ thread_id=thread_id,
243
+ order="desc",
244
+ limit=1
245
+ )
246
+ return messages.data[0].content[0].text.value
247
+ elif run.status == 'failed':
248
+ logger.error(f"Assistant run failed: {run.last_error}")
249
+ return "I apologize, but I encountered an error processing your request. Please try again or contact support."
250
+ else:
251
+ logger.warning(f"Unexpected run status: {run.status}")
252
+ return f"I apologize, but I couldn't process your request (status: {run.status}). Please try again."
253
+
254
+ except Exception as e:
255
+ logger.error(f"Query error for customer {customer_id}: {str(e)}")
256
+ return "I apologize, but I'm experiencing technical difficulties. Please try again in a moment or contact support."
257
+
258
+ # --- Fallback: Knowledge in Instructions ---
259
+ class InstructionBasedGPT:
260
+ """GPT with knowledge embedded in instructions as fallback"""
261
+
262
+ def __init__(self, api_key: str, knowledge_content: str = ""):
263
+ self.client = openai.OpenAI(api_key=api_key)
264
+ self.assistant_id = None
265
+ self.threads = {}
266
+ self.knowledge_content = knowledge_content
267
+
268
+ def create_assistant(self, base_instructions: str) -> str:
269
+ """Create assistant with knowledge in instructions"""
270
+
271
+ full_instructions = f"""
272
+ {base_instructions}
273
+
274
+ KNOWLEDGE BASE:
275
+ {self.knowledge_content[:50000]} # Limit knowledge size
276
+
277
+ Use the above knowledge base to answer customer questions accurately.
278
+ If the knowledge base doesn't contain specific information, provide general helpful guidance.
279
+ """
280
+
281
+ try:
282
+ assistant = self.client.beta.assistants.create(
283
+ name="Valetax CS Assistant (Knowledge Based)",
284
+ instructions=full_instructions,
285
+ model="gpt-4o",
286
+ tools=[{"type": "code_interpreter"}]
287
+ )
288
+
289
+ self.assistant_id = assistant.id
290
+ logger.info(f"Created knowledge-based assistant: {assistant.id}")
291
+ return assistant.id
292
+ except Exception as e:
293
+ logger.error(f"Error creating instruction-based assistant: {e}")
294
+ raise
295
+
296
+ def get_customer_thread(self, customer_id: str) -> str:
297
+ """Get or create thread for specific customer"""
298
+ if customer_id not in self.threads:
299
+ try:
300
+ thread = self.client.beta.threads.create()
301
+ self.threads[customer_id] = thread.id
302
+ logger.info(f"Created new thread for customer {customer_id}")
303
+ except Exception as e:
304
+ logger.error(f"Error creating thread: {e}")
305
+ raise
306
+ return self.threads[customer_id]
307
+
308
+ def query(self, message: str, customer_id: str = "default") -> str:
309
+ """Query assistant with embedded knowledge"""
310
+ if not self.assistant_id:
311
+ raise ValueError("No assistant configured")
312
+
313
+ thread_id = self.get_customer_thread(customer_id)
314
+
315
+ try:
316
+ # Add message
317
+ self.client.beta.threads.messages.create(
318
+ thread_id=thread_id,
319
+ role="user",
320
+ content=message
321
+ )
322
+
323
+ # Run assistant
324
+ run = self.client.beta.threads.runs.create(
325
+ thread_id=thread_id,
326
+ assistant_id=self.assistant_id
327
+ )
328
+
329
+ # Wait for completion with timeout
330
+ max_wait_time = 120
331
+ start_time = time.time()
332
+
333
+ while run.status in ['queued', 'in_progress', 'cancelling']:
334
+ if time.time() - start_time > max_wait_time:
335
+ return "Request timeout. Please try again."
336
+
337
+ time.sleep(1)
338
+ run = self.client.beta.threads.runs.retrieve(
339
+ thread_id=thread_id,
340
+ run_id=run.id
341
+ )
342
+
343
+ if run.status == 'completed':
344
+ messages = self.client.beta.threads.messages.list(
345
+ thread_id=thread_id,
346
+ order="desc",
347
+ limit=1
348
+ )
349
+ return messages.data[0].content[0].text.value
350
+ else:
351
+ return f"Error processing request: {run.status}"
352
+
353
+ except Exception as e:
354
+ logger.error(f"Query error: {str(e)}")
355
+ return "I apologize, but I'm experiencing technical difficulties. Please try again."
356
+
357
+ # --- FastAPI App with Lifespan ---
358
+ @asynccontextmanager
359
+ async def lifespan(app: FastAPI):
360
+ """Initialize GPT tool at startup"""
361
+ global gpt_tool, instruction_gpt
362
+
363
+ # Configuration
364
+ openai_api_key = os.getenv("OPENAI_API_KEY")
365
+ if not openai_api_key:
366
+ logger.error("OPENAI_API_KEY environment variable not set")
367
+ raise ValueError("OpenAI API key is required")
368
+
369
+ # Control flags - Set these to control behavior
370
+ FORCE_UPDATE_INSTRUCTIONS = os.getenv("FORCE_UPDATE_INSTRUCTIONS", "false").lower() == "true"
371
+ FORCE_NEW_ASSISTANT = os.getenv("FORCE_NEW_ASSISTANT", "false").lower() == "true"
372
+
373
+ base_instructions = """
374
+ Your Name is: Adam
375
+ You are an expert customer service representative working for Valetax.com
376
+ Valetax is a leading Forex broker operating across the MENA region and Asia, providing world-class trading services and support.
377
+
378
+ Your Core Responsibilities:
379
+ 1. Provide exceptional customer service by addressing all client inquiries with professionalism, accuracy, and efficiency
380
+ 2. Guide clients through their requests using the most optimal and organized approach to ensure complete satisfaction
381
+ 3. Resolve technical and account-related issues by providing clear, step-by-step solutions when possible
382
+ 4. Escalate complex issues to internal teams when necessary
383
+ 5. Maintain positive brand representation of Valetax at all times
384
+
385
+ Service Guidelines:
386
+ - Always greet clients warmly and introduce yourself as Adam from Valetax customer service
387
+ - Ask about their specific request to understand their needs
388
+ - Be direct, organized, and solution-focused
389
+ - Provide step-by-step responses when applicable
390
+ - Follow up to confirm satisfaction
391
+ - Maintain confidentiality about internal systems and processes
392
+
393
+ Communication Style:
394
+ - Professional but friendly
395
+ - Direct and efficient
396
+ - Organized with clear steps
397
+ - Positive and reassuring
398
+ - Focus on solutions
399
+
400
+ Remember: Every interaction should enhance the client's experience and confidence in Valetax services.
401
+ """
402
+
403
+ # Knowledge files - Add all your knowledge files here
404
+ knowledge_files = []
405
+
406
+ # Define all possible knowledge file paths
407
+ possible_files = [
408
+ r"F:\ai\makookAi\custom_gpt\data_row\Valetax GuideBook V0.1.2 AR - copy finale .pdf",
409
+ r"F:\ai\makookAi\custom_gpt\data_row\Eaxem 2025.xlsx",
410
+ r"F:\ai\makookAi\custom_gpt\data_row\issues.xlsx",
411
+ # Add more files as needed
412
+ ]
413
+
414
+ # Check which files exist and add them
415
+ for file_path in possible_files:
416
+ if os.path.exists(file_path):
417
+ knowledge_files.append(file_path)
418
+ logger.info(f"Found knowledge file: {file_path}")
419
+ else:
420
+ logger.debug(f"Knowledge file not found (skipping): {file_path}")
421
+
422
+ if not knowledge_files:
423
+ logger.warning("No knowledge files found - will use instruction-based fallback")
424
+ else:
425
+ logger.info(f"Total knowledge files found: {len(knowledge_files)}")
426
+
427
+ # Load knowledge content for fallback (from first available file)
428
+ knowledge_content = ""
429
+ if knowledge_files:
430
+ try:
431
+ first_file = knowledge_files[0]
432
+ file_ext = os.path.splitext(first_file)[1].lower()
433
+
434
+ if file_ext == '.json':
435
+ with open(first_file, 'r', encoding='utf-8') as f:
436
+ knowledge_data = json.load(f)
437
+ knowledge_content = json.dumps(knowledge_data, ensure_ascii=False)[:50000]
438
+ elif file_ext in ['.txt', '.md', '.csv']:
439
+ with open(first_file, 'r', encoding='utf-8') as f:
440
+ knowledge_content = f.read()[:50000]
441
+ else:
442
+ # For binary files (PDF, Excel, etc.), use a general description
443
+ knowledge_content = f"Knowledge base includes: {', '.join([os.path.basename(f) for f in knowledge_files])}"
444
+
445
+ logger.info(f"Loaded fallback knowledge content from: {first_file}")
446
+ except Exception as e:
447
+ logger.warning(f"Could not load knowledge content for fallback: {e}")
448
+ knowledge_content = "Comprehensive Valetax knowledge base with trading guides, FAQs, and support procedures."
449
+
450
+ try:
451
+ # Try file-based approach first
452
+ logger.info("Attempting file-based GPT initialization...")
453
+ gpt_tool = PersistentCustomGPT(api_key=openai_api_key)
454
+
455
+ # Handle force new assistant
456
+ if FORCE_NEW_ASSISTANT:
457
+ logger.info("🔄 FORCE_NEW_ASSISTANT=true - Creating new assistant...")
458
+ gpt_tool.assistant_id = None # Force new creation
459
+ gpt_tool.save_config()
460
+
461
+ assistant_id = gpt_tool.create_or_load_assistant(
462
+ base_instructions,
463
+ knowledge_files,
464
+ force_update=FORCE_UPDATE_INSTRUCTIONS
465
+ )
466
+ logger.info(f"✅ File-based GPT initialized: {assistant_id}")
467
+
468
+ except Exception as e:
469
+ logger.warning(f"⚠️ File-based GPT failed: {e}")
470
+ logger.info("🔄 Falling back to instruction-based GPT...")
471
+
472
+ try:
473
+ # Fallback to instruction-based approach
474
+ instruction_gpt = InstructionBasedGPT(openai_api_key, knowledge_content)
475
+ assistant_id = instruction_gpt.create_assistant(base_instructions)
476
+ logger.info(f"✅ Instruction-based GPT initialized: {assistant_id}")
477
+
478
+ except Exception as e2:
479
+ logger.error(f"❌ All GPT initialization failed: {e2}")
480
+ raise
481
+
482
+ yield # App is running
483
+
484
+ # Cleanup (if needed)
485
+ logger.info("Shutting down application")
486
+
487
+ app = FastAPI(
488
+ title="Valetax Customer Service API",
489
+ description="AI-powered customer service for Valetax.com",
490
+ version="1.0.0",
491
+ lifespan=lifespan
492
+ )
493
+
494
+ # Global GPT instances
495
+ gpt_tool: Optional[PersistentCustomGPT] = None
496
+ instruction_gpt: Optional[InstructionBasedGPT] = None
497
+
498
+
499
+
500
+
501
+ # --- Request/Response Models ---
502
+ class QueryRequest(BaseModel):
503
+ message: str
504
+ customer_id: Optional[str] = "anonymous"
505
+
506
+ class QueryResponse(BaseModel):
507
+ response: str
508
+ customer_id: str
509
+ method: str
510
+ timestamp: str
511
+
512
+ class HealthResponse(BaseModel):
513
+ status: str
514
+ method: str
515
+ assistant_id: Optional[str] = None
516
+ uptime: str
517
+
518
+ class ErrorResponse(BaseModel):
519
+ error: str
520
+ detail: str
521
+ timestamp: str
522
+
523
+ # --- API Endpoints ---
524
+
525
+ @app.exception_handler(HTTPException)
526
+ async def http_exception_handler(request, exc):
527
+ """Custom HTTP exception handler"""
528
+ import datetime
529
+ return {
530
+ "error": exc.detail,
531
+ "status_code": exc.status_code,
532
+ "timestamp": datetime.datetime.now().isoformat()
533
+ }
534
+
535
+ @app.post("/reload-knowledge")
536
+ async def reload_knowledge_files():
537
+ """Reload knowledge files and update assistant"""
538
+ global gpt_tool
539
+
540
+ if not gpt_tool:
541
+ raise HTTPException(status_code=503, detail="GPT tool not initialized")
542
+
543
+ try:
544
+ # Get current knowledge files
545
+ possible_files = [
546
+ r"F:\ai\makookAi\custom_gpt\data_row\Valetax GuideBook V0.1.2 AR - copy finale .pdf",
547
+ r"F:\ai\makookAi\custom_gpt\data_row\Eaxem 2025.xlsx",
548
+ r"F:\ai\makookAi\custom_gpt\data_row\issues.xlsx",
549
+ ]
550
+
551
+ current_files = [f for f in possible_files if os.path.exists(f)]
552
+
553
+ if current_files:
554
+ # Upload new files
555
+ new_file_ids = gpt_tool.upload_files(current_files)
556
+ logger.info(f"Reloaded {len(new_file_ids)} knowledge files")
557
+
558
+ return {
559
+ "status": "success",
560
+ "files_reloaded": len(new_file_ids),
561
+ "file_names": [os.path.basename(f) for f in current_files],
562
+ "new_file_ids": new_file_ids
563
+ }
564
+ else:
565
+ return {
566
+ "status": "warning",
567
+ "message": "No knowledge files found to reload"
568
+ }
569
+
570
+ except Exception as e:
571
+ logger.error(f"Error reloading knowledge files: {e}")
572
+ raise HTTPException(status_code=500, detail=f"Failed to reload knowledge files: {str(e)}")
573
+ @app.get("/", response_model=Dict[str, Any])
574
+ async def root():
575
+ """Root endpoint with API information"""
576
+ return {
577
+ "service": "Valetax Customer Service API",
578
+ "version": "1.0.0",
579
+ "status": "active",
580
+ "endpoints": {
581
+ "health": "/health",
582
+ "query": "/query",
583
+ "api_info": "/api-info"
584
+ }
585
+ }
586
+
587
+ @app.get("/health", response_model=HealthResponse)
588
+ async def health_check():
589
+ """Health check endpoint"""
590
+ import datetime
591
+
592
+ if gpt_tool is not None:
593
+ return HealthResponse(
594
+ status="healthy",
595
+ method="file-based",
596
+ assistant_id=gpt_tool.assistant_id,
597
+ uptime=str(datetime.datetime.now())
598
+ )
599
+ elif instruction_gpt is not None:
600
+ return HealthResponse(
601
+ status="healthy",
602
+ method="instruction-based",
603
+ assistant_id=instruction_gpt.assistant_id,
604
+ uptime=str(datetime.datetime.now())
605
+ )
606
+ else:
607
+ raise HTTPException(status_code=503, detail="No GPT tool initialized")
608
+
609
+ @app.post("/query", response_model=QueryResponse)
610
+ async def query_gpt(request: QueryRequest):
611
+ """Main endpoint to query the GPT"""
612
+ import datetime
613
+
614
+ if not request.message.strip():
615
+ raise HTTPException(status_code=400, detail="Message cannot be empty")
616
+
617
+ try:
618
+ # Try file-based GPT first
619
+ if gpt_tool is not None:
620
+ response = gpt_tool.query(
621
+ message=request.message,
622
+ customer_id=request.customer_id
623
+ )
624
+ return QueryResponse(
625
+ response=response,
626
+ customer_id=request.customer_id,
627
+ method="file-based",
628
+ timestamp=datetime.datetime.now().isoformat()
629
+ )
630
+
631
+ # Fallback to instruction-based GPT
632
+ elif instruction_gpt is not None:
633
+ response = instruction_gpt.query(
634
+ message=request.message,
635
+ customer_id=request.customer_id
636
+ )
637
+ return QueryResponse(
638
+ response=response,
639
+ customer_id=request.customer_id,
640
+ method="instruction-based",
641
+ timestamp=datetime.datetime.now().isoformat()
642
+ )
643
+
644
+ else:
645
+ raise HTTPException(status_code=503, detail="No GPT tool available")
646
+
647
+ except Exception as e:
648
+ logger.error(f"Query failed for customer {request.customer_id}: {str(e)}")
649
+ raise HTTPException(status_code=500, detail=f"Query failed: {str(e)}")
650
+
651
+ @app.get("/api-info")
652
+ async def api_info():
653
+ """Get API capability information"""
654
+ try:
655
+ client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
656
+
657
+ return {
658
+ "openai_version": openai.__version__,
659
+ "has_beta": hasattr(client, 'beta'),
660
+ "has_assistants": hasattr(client.beta, 'assistants') if hasattr(client, 'beta') else False,
661
+ "has_vector_stores": hasattr(client.beta, 'vector_stores') if hasattr(client, 'beta') else False,
662
+ "current_method": "file-based" if gpt_tool else "instruction-based" if instruction_gpt else "none",
663
+ "assistant_id": gpt_tool.assistant_id if gpt_tool else instruction_gpt.assistant_id if instruction_gpt else None
664
+ }
665
+ except Exception as e:
666
+ logger.error(f"Error getting API info: {e}")
667
+ return {"error": str(e)}
668
+
669
+ @app.get("/knowledge-files")
670
+ async def list_knowledge_files():
671
+ """List all knowledge files and their status"""
672
+ files_info = []
673
+
674
+ # Get the same file list from startup
675
+ possible_files = [
676
+ "F:/ai/makookAi/custom_gpt/data_cleaned/documents.json",
677
+ "F:/ai/makookAi/custom_gpt/data_cleaned/valetax_faq.pdf",
678
+ "F:/ai/makookAi/custom_gpt/data_cleaned/trading_guide.pdf",
679
+ "F:/ai/makookAi/custom_gpt/data_cleaned/account_info.xlsx",
680
+ "F:/ai/makookAi/custom_gpt/data_cleaned/support_procedures.docx",
681
+ "F:/ai/makookAi/custom_gpt/data_cleaned/fee_structure.csv",
682
+ ]
683
+
684
+ for file_path in possible_files:
685
+ file_info = {
686
+ "path": file_path,
687
+ "filename": os.path.basename(file_path),
688
+ "exists": os.path.exists(file_path),
689
+ "type": os.path.splitext(file_path)[1].lower(),
690
+ "size_kb": round(os.path.getsize(file_path) / 1024, 2) if os.path.exists(file_path) else 0
691
+ }
692
+ files_info.append(file_info)
693
+
694
+ # Get uploaded files from GPT tool
695
+ uploaded_files = []
696
+ if gpt_tool:
697
+ uploaded_files = gpt_tool.uploaded_files
698
+
699
+ return {
700
+ "knowledge_files": files_info,
701
+ "uploaded_file_ids": uploaded_files,
702
+ "total_existing": len([f for f in files_info if f["exists"]]),
703
+ "supported_formats": [".txt", ".md", ".json", ".csv", ".pdf", ".docx", ".xlsx", ".xls", ".py", ".js", ".html", ".css"]
704
+ }
705
+
706
+ @app.post("/reload-knowledge")
707
+ async def reload_knowledge_files():
708
+ """Reload knowledge files and update assistant"""
709
+ global gpt_tool
710
+
711
+ if not gpt_tool:
712
+ raise HTTPException(status_code=503, detail="GPT tool not initialized")
713
+
714
+ try:
715
+ # Get current knowledge files
716
+ possible_files = [
717
+ r"F:\ai\makookAi\custom_gpt\data_row\Valetax GuideBook V0.1.2 AR - copy finale .pdf",
718
+ r"F:\ai\makookAi\custom_gpt\data_row\Eaxem 2025.xlsx",
719
+ r"F:\ai\makookAi\custom_gpt\data_row\issues.xlsx",
720
+ ]
721
+
722
+ current_files = [f for f in possible_files if os.path.exists(f)]
723
+
724
+ if current_files:
725
+ # Upload new files
726
+ new_file_ids = gpt_tool.upload_files(current_files)
727
+ logger.info(f"Reloaded {len(new_file_ids)} knowledge files")
728
+
729
+ return {
730
+ "status": "success",
731
+ "files_reloaded": len(new_file_ids),
732
+ "file_names": [os.path.basename(f) for f in current_files],
733
+ "new_file_ids": new_file_ids
734
+ }
735
+ else:
736
+ return {
737
+ "status": "warning",
738
+ "message": "No knowledge files found to reload"
739
+ }
740
+
741
+ except Exception as e:
742
+ logger.error(f"Error reloading knowledge files: {e}")
743
+ raise HTTPException(status_code=500, detail=f"Failed to reload knowledge files: {str(e)}")
744
+
745
+ # --- Exception Handlers ---
746
+ @app.exception_handler(HTTPException)
747
+ async def http_exception_handler(request, exc):
748
+ """Custom HTTP exception handler"""
749
+ import datetime
750
+ return {
751
+ "error": exc.detail,
752
+ "status_code": exc.status_code,
753
+ "timestamp": datetime.datetime.now().isoformat()
754
+ }
755
+
756
+ # --- Run the app ---
757
+ if __name__ == "__main__":
758
+ import uvicorn
759
+
760
+ # Ensure environment variable is set
761
+ if not os.getenv("OPENAI_API_KEY"):
762
+ print("❌ Error: OPENAI_API_KEY environment variable is not set")
763
+ print("Please set it with: export OPENAI_API_KEY='your-api-key-here'")
764
+ exit(1)
765
+
766
+ print("🚀 Starting Valetax Customer Service API...")
767
+ uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
768
+
769
  def respond(
770
  message,
771
  history: list[dict[str, str]],