Spaces:
Running
Running
Upload 36 files
Browse files- hybrid_chat_endpoint.py +65 -68
- hybrid_chat_endpoint_backup.py +214 -0
- main.py +2 -3
- scenario_handlers/__init__.py +7 -0
- scenario_handlers/__pycache__/base_handler.cpython-312.pyc +0 -0
- scenario_handlers/__pycache__/event_recommendation.cpython-312.pyc +0 -0
- scenario_handlers/__pycache__/exit_intent_rescue.cpython-312.pyc +0 -0
- scenario_handlers/__pycache__/post_event_feedback.cpython-312.pyc +0 -0
- scenario_handlers/__pycache__/price_inquiry.cpython-312.pyc +0 -0
- scenario_handlers/base_handler.py +186 -0
- scenario_handlers/event_recommendation.py +261 -0
- scenario_handlers/exit_intent_rescue.py +100 -0
- scenario_handlers/post_event_feedback.py +176 -0
- scenario_handlers/price_inquiry.py +224 -0
hybrid_chat_endpoint.py
CHANGED
|
@@ -5,21 +5,25 @@ Routes between scripted scenarios and knowledge retrieval
|
|
| 5 |
from fastapi import HTTPException
|
| 6 |
from datetime import datetime
|
| 7 |
from typing import Dict, Any
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
async def hybrid_chat_endpoint(
|
| 12 |
request, # ChatRequest
|
| 13 |
conversation_service,
|
| 14 |
intent_classifier,
|
| 15 |
-
|
|
|
|
| 16 |
tools_service,
|
| 17 |
advanced_rag,
|
| 18 |
-
embedding_service,
|
| 19 |
-
qdrant_service,
|
| 20 |
chat_history_collection,
|
| 21 |
hf_token,
|
| 22 |
-
lead_storage
|
| 23 |
):
|
| 24 |
"""
|
| 25 |
Hybrid conversational chatbot: Scenario FSM + RAG
|
|
@@ -28,7 +32,7 @@ async def hybrid_chat_endpoint(
|
|
| 28 |
1. Load session & scenario state
|
| 29 |
2. Classify intent (scenario vs RAG)
|
| 30 |
3. Route:
|
| 31 |
-
- Scenario: Execute FSM flow
|
| 32 |
- RAG: Knowledge retrieval
|
| 33 |
- RAG+Resume: Answer question then resume scenario
|
| 34 |
4. Save state & history
|
|
@@ -55,16 +59,16 @@ async def hybrid_chat_endpoint(
|
|
| 55 |
|
| 56 |
# ===== ROUTING =====
|
| 57 |
if intent.startswith("scenario:"):
|
| 58 |
-
# Route to scenario
|
| 59 |
response_data = await handle_scenario(
|
| 60 |
intent,
|
| 61 |
request.message,
|
| 62 |
session_id,
|
| 63 |
scenario_state,
|
| 64 |
-
|
|
|
|
| 65 |
conversation_service,
|
| 66 |
-
|
| 67 |
-
lead_storage # NEW: Pass for action handling
|
| 68 |
)
|
| 69 |
|
| 70 |
elif intent == "rag:with_resume":
|
|
@@ -73,7 +77,6 @@ async def hybrid_chat_endpoint(
|
|
| 73 |
request,
|
| 74 |
session_id,
|
| 75 |
scenario_state,
|
| 76 |
-
advanced_rag,
|
| 77 |
embedding_service,
|
| 78 |
qdrant_service,
|
| 79 |
conversation_service
|
|
@@ -107,7 +110,7 @@ async def hybrid_chat_endpoint(
|
|
| 107 |
response_data["response"],
|
| 108 |
metadata={
|
| 109 |
"mode": response_data.get("mode", "unknown"),
|
| 110 |
-
"context_used": response_data.get("context_used", [])[:3]
|
| 111 |
}
|
| 112 |
)
|
| 113 |
|
|
@@ -129,71 +132,75 @@ async def handle_scenario(
|
|
| 129 |
user_message,
|
| 130 |
session_id,
|
| 131 |
scenario_state,
|
| 132 |
-
|
|
|
|
| 133 |
conversation_service,
|
| 134 |
-
|
| 135 |
-
lead_storage=None
|
| 136 |
):
|
| 137 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
if intent == "scenario:continue":
|
| 140 |
# Continue existing scenario
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
user_input=user_message,
|
| 145 |
-
scenario_data=scenario_state.get("scenario_data", {})
|
| 146 |
-
rag_service=advanced_rag
|
| 147 |
)
|
| 148 |
else:
|
| 149 |
# Start new scenario
|
| 150 |
scenario_type = intent.split(":", 1)[1]
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
# Update scenario state
|
| 154 |
-
if result.get("end_scenario"):
|
| 155 |
conversation_service.clear_scenario(session_id)
|
| 156 |
scenario_active = False
|
| 157 |
-
|
| 158 |
conversation_service.set_scenario_state(session_id, result["new_state"])
|
| 159 |
scenario_active = True
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
action = result['action']
|
| 164 |
-
scenario_data = result.get('new_state', {}).get('scenario_data', scenario_state.get('scenario_data', {}))
|
| 165 |
-
|
| 166 |
-
if action == "send_pdf_email":
|
| 167 |
-
# Save lead with email
|
| 168 |
-
lead_storage.save_lead(
|
| 169 |
-
event_name=scenario_data.get('step_1_input', 'Unknown Event'),
|
| 170 |
-
email=scenario_data.get('step_5_input'), # Email from step 5
|
| 171 |
-
interests={
|
| 172 |
-
"group": scenario_data.get('group_size'),
|
| 173 |
-
"wants_pdf": True
|
| 174 |
-
},
|
| 175 |
-
session_id=session_id
|
| 176 |
-
)
|
| 177 |
-
print(f"📧 Lead saved: email sent (saved to DB)")
|
| 178 |
-
|
| 179 |
-
elif action == "save_lead_phone":
|
| 180 |
-
# Save lead with phone
|
| 181 |
-
lead_storage.save_lead(
|
| 182 |
-
event_name=scenario_data.get('step_1_input', 'Unknown Event'),
|
| 183 |
-
email=scenario_data.get('step_5_input'),
|
| 184 |
-
phone=scenario_data.get('step_8_input'), # Phone from step 8
|
| 185 |
-
interests={
|
| 186 |
-
"group": scenario_data.get('group_size'),
|
| 187 |
-
"wants_reminder": True
|
| 188 |
-
},
|
| 189 |
-
session_id=session_id
|
| 190 |
-
)
|
| 191 |
-
print(f"📱 Lead saved: SMS reminder (saved to DB)")
|
| 192 |
|
| 193 |
return {
|
| 194 |
-
"response": result
|
| 195 |
"mode": "scenario",
|
| 196 |
-
"scenario_active": scenario_active
|
|
|
|
| 197 |
}
|
| 198 |
|
| 199 |
|
|
@@ -201,7 +208,6 @@ async def handle_rag_with_resume(
|
|
| 201 |
request,
|
| 202 |
session_id,
|
| 203 |
scenario_state,
|
| 204 |
-
advanced_rag,
|
| 205 |
embedding_service,
|
| 206 |
qdrant_service,
|
| 207 |
conversation_service
|
|
@@ -287,12 +293,3 @@ async def handle_pure_rag(
|
|
| 287 |
"mode": "rag",
|
| 288 |
"context_used": result.get("context_used", [])
|
| 289 |
}
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
async def simple_rag_response(message, context, system_message):
|
| 293 |
-
"""Simple RAG response without LLM (for quick answers)"""
|
| 294 |
-
if context:
|
| 295 |
-
# Return top context
|
| 296 |
-
top = context[0]
|
| 297 |
-
return f"{top['metadata'].get('text', 'Không tìm thấy thông tin.')}"
|
| 298 |
-
return "Xin lỗi, tôi không tìm thấy thông tin về điều này."
|
|
|
|
| 5 |
from fastapi import HTTPException
|
| 6 |
from datetime import datetime
|
| 7 |
from typing import Dict, Any
|
| 8 |
+
|
| 9 |
+
# Import scenario handlers
|
| 10 |
+
from scenario_handlers.price_inquiry import PriceInquiryHandler
|
| 11 |
+
from scenario_handlers.event_recommendation import EventRecommendationHandler
|
| 12 |
+
from scenario_handlers.post_event_feedback import PostEventFeedbackHandler
|
| 13 |
+
from scenario_handlers.exit_intent_rescue import ExitIntentRescueHandler
|
| 14 |
|
| 15 |
|
| 16 |
async def hybrid_chat_endpoint(
|
| 17 |
request, # ChatRequest
|
| 18 |
conversation_service,
|
| 19 |
intent_classifier,
|
| 20 |
+
embedding_service, # For handlers
|
| 21 |
+
qdrant_service, # For handlers
|
| 22 |
tools_service,
|
| 23 |
advanced_rag,
|
|
|
|
|
|
|
| 24 |
chat_history_collection,
|
| 25 |
hf_token,
|
| 26 |
+
lead_storage
|
| 27 |
):
|
| 28 |
"""
|
| 29 |
Hybrid conversational chatbot: Scenario FSM + RAG
|
|
|
|
| 32 |
1. Load session & scenario state
|
| 33 |
2. Classify intent (scenario vs RAG)
|
| 34 |
3. Route:
|
| 35 |
+
- Scenario: Execute FSM flow with dedicated handlers
|
| 36 |
- RAG: Knowledge retrieval
|
| 37 |
- RAG+Resume: Answer question then resume scenario
|
| 38 |
4. Save state & history
|
|
|
|
| 59 |
|
| 60 |
# ===== ROUTING =====
|
| 61 |
if intent.startswith("scenario:"):
|
| 62 |
+
# Route to dedicated scenario handler
|
| 63 |
response_data = await handle_scenario(
|
| 64 |
intent,
|
| 65 |
request.message,
|
| 66 |
session_id,
|
| 67 |
scenario_state,
|
| 68 |
+
embedding_service,
|
| 69 |
+
qdrant_service,
|
| 70 |
conversation_service,
|
| 71 |
+
lead_storage
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
elif intent == "rag:with_resume":
|
|
|
|
| 77 |
request,
|
| 78 |
session_id,
|
| 79 |
scenario_state,
|
|
|
|
| 80 |
embedding_service,
|
| 81 |
qdrant_service,
|
| 82 |
conversation_service
|
|
|
|
| 110 |
response_data["response"],
|
| 111 |
metadata={
|
| 112 |
"mode": response_data.get("mode", "unknown"),
|
| 113 |
+
"context_used": response_data.get("context_used", [])[:3]
|
| 114 |
}
|
| 115 |
)
|
| 116 |
|
|
|
|
| 132 |
user_message,
|
| 133 |
session_id,
|
| 134 |
scenario_state,
|
| 135 |
+
embedding_service,
|
| 136 |
+
qdrant_service,
|
| 137 |
conversation_service,
|
| 138 |
+
lead_storage
|
|
|
|
| 139 |
):
|
| 140 |
+
"""
|
| 141 |
+
Handle scenario-based conversation using dedicated handlers
|
| 142 |
+
|
| 143 |
+
Replaces old scenario_engine with per-scenario handlers
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
# Initialize all scenario handlers
|
| 147 |
+
handlers = {
|
| 148 |
+
'price_inquiry': PriceInquiryHandler(embedding_service, qdrant_service, lead_storage),
|
| 149 |
+
'event_recommendation': EventRecommendationHandler(embedding_service, qdrant_service, lead_storage),
|
| 150 |
+
'post_event_feedback': PostEventFeedbackHandler(embedding_service, qdrant_service, lead_storage),
|
| 151 |
+
'exit_intent_rescue': ExitIntentRescueHandler(embedding_service, qdrant_service, lead_storage)
|
| 152 |
+
}
|
| 153 |
|
| 154 |
if intent == "scenario:continue":
|
| 155 |
# Continue existing scenario
|
| 156 |
+
scenario_id = scenario_state.get("active_scenario")
|
| 157 |
+
|
| 158 |
+
if scenario_id not in handlers:
|
| 159 |
+
return {
|
| 160 |
+
"response": f"Xin lỗi, scenario '{scenario_id}' không tồn tại.",
|
| 161 |
+
"mode": "error",
|
| 162 |
+
"scenario_active": False
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
handler = handlers[scenario_id]
|
| 166 |
+
result = handler.next_step(
|
| 167 |
+
current_step=scenario_state.get("scenario_step", 1),
|
| 168 |
user_input=user_message,
|
| 169 |
+
scenario_data=scenario_state.get("scenario_data", {})
|
|
|
|
| 170 |
)
|
| 171 |
else:
|
| 172 |
# Start new scenario
|
| 173 |
scenario_type = intent.split(":", 1)[1]
|
| 174 |
+
|
| 175 |
+
if scenario_type not in handlers:
|
| 176 |
+
return {
|
| 177 |
+
"response": f"Xin lỗi, scenario '{scenario_type}' không tồn tại.",
|
| 178 |
+
"mode": "error",
|
| 179 |
+
"scenario_active": False
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
handler = handlers[scenario_type]
|
| 183 |
+
|
| 184 |
+
# Get initial_data from scenario_state (if any)
|
| 185 |
+
initial_data = scenario_state.get("scenario_data", {})
|
| 186 |
+
result = handler.start(initial_data=initial_data)
|
| 187 |
|
| 188 |
# Update scenario state
|
| 189 |
+
if result.get("end_scenario") or not result.get("scenario_active", True):
|
| 190 |
conversation_service.clear_scenario(session_id)
|
| 191 |
scenario_active = False
|
| 192 |
+
elif result.get("new_state"):
|
| 193 |
conversation_service.set_scenario_state(session_id, result["new_state"])
|
| 194 |
scenario_active = True
|
| 195 |
+
else:
|
| 196 |
+
# new_state is None → stay at same step (e.g., validation failed)
|
| 197 |
+
scenario_active = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
return {
|
| 200 |
+
"response": result.get("message", ""),
|
| 201 |
"mode": "scenario",
|
| 202 |
+
"scenario_active": scenario_active,
|
| 203 |
+
"loading_message": result.get("loading_message") # For UI
|
| 204 |
}
|
| 205 |
|
| 206 |
|
|
|
|
| 208 |
request,
|
| 209 |
session_id,
|
| 210 |
scenario_state,
|
|
|
|
| 211 |
embedding_service,
|
| 212 |
qdrant_service,
|
| 213 |
conversation_service
|
|
|
|
| 293 |
"mode": "rag",
|
| 294 |
"context_used": result.get("context_used", [])
|
| 295 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hybrid_chat_endpoint_backup.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hybrid Chat Endpoint: RAG + Scenario FSM
|
| 3 |
+
Routes between scripted scenarios and knowledge retrieval
|
| 4 |
+
"""
|
| 5 |
+
from fastapi import HTTPException
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
# Import scenario handlers
|
| 11 |
+
from scenario_handlers.price_inquiry import PriceInquiryHandler
|
| 12 |
+
from scenario_handlers.event_recommendation import EventRecommendationHandler
|
| 13 |
+
from scenario_handlers.post_event_feedback import PostEventFeedbackHandler
|
| 14 |
+
from scenario_handlers.exit_intent_rescue import ExitIntentRescueHandler
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def hybrid_chat_endpoint(
|
| 18 |
+
request, # ChatRequest
|
| 19 |
+
conversation_service,
|
| 20 |
+
intent_classifier,
|
| 21 |
+
embedding_service, # NEW: For handlers
|
| 22 |
+
qdrant_service, # NEW: For handlers
|
| 23 |
+
tools_service,
|
| 24 |
+
advanced_rag,
|
| 25 |
+
chat_history_collection,
|
| 26 |
+
hf_token,
|
| 27 |
+
lead_storage
|
| 28 |
+
):
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
Hybrid conversational chatbot: Scenario FSM + RAG
|
| 32 |
+
|
| 33 |
+
Flow:
|
| 34 |
+
1. Load session & scenario state
|
| 35 |
+
2. Classify intent (scenario vs RAG)
|
| 36 |
+
3. Route:
|
| 37 |
+
- Scenario: Execute FSM flow
|
| 38 |
+
- RAG: Knowledge retrieval
|
| 39 |
+
- RAG+Resume: Answer question then resume scenario
|
| 40 |
+
4. Save state & history
|
| 41 |
+
"""
|
| 42 |
+
try:
|
| 43 |
+
# ===== SESSION MANAGEMENT =====
|
| 44 |
+
session_id = request.session_id
|
| 45 |
+
if not session_id:
|
| 46 |
+
session_id = conversation_service.create_session(
|
| 47 |
+
metadata={"user_agent": "api", "created_via": "hybrid_chat"},
|
| 48 |
+
user_id=request.user_id
|
| 49 |
+
)
|
| 50 |
+
print(f"✓ Created session: {session_id} (user: {request.user_id or 'anon'})")
|
| 51 |
+
else:
|
| 52 |
+
if not conversation_service.session_exists(session_id):
|
| 53 |
+
raise HTTPException(404, detail=f"Session {session_id} not found")
|
| 54 |
+
|
| 55 |
+
# ===== LOAD SCENARIO STATE =====
|
| 56 |
+
scenario_state = conversation_service.get_scenario_state(session_id) or {}
|
| 57 |
+
|
| 58 |
+
# ===== INTENT CLASSIFICATION =====
|
| 59 |
+
intent = intent_classifier.classify(request.message, scenario_state)
|
| 60 |
+
print(f"🎯 Intent: {intent}")
|
| 61 |
+
|
| 62 |
+
# ===== ROUTING =====
|
| 63 |
+
if intent.startswith("scenario:"):
|
| 64 |
+
# Route to scenario engine
|
| 65 |
+
response_data = await handle_scenario(
|
| 66 |
+
intent,
|
| 67 |
+
request.message,
|
| 68 |
+
session_id,
|
| 69 |
+
scenario_state,
|
| 70 |
+
scenario_engine,
|
| 71 |
+
conversation_service,
|
| 72 |
+
advanced_rag,
|
| 73 |
+
lead_storage # NEW: Pass for action handling
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
elif intent == "rag:with_resume":
|
| 77 |
+
# Answer question but keep scenario active
|
| 78 |
+
response_data = await handle_rag_with_resume(
|
| 79 |
+
request,
|
| 80 |
+
session_id,
|
| 81 |
+
scenario_state,
|
| 82 |
+
advanced_rag,
|
| 83 |
+
embedding_service,
|
| 84 |
+
qdrant_service,
|
| 85 |
+
conversation_service
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
else: # rag:general
|
| 89 |
+
# Pure RAG query
|
| 90 |
+
response_data = await handle_pure_rag(
|
| 91 |
+
request,
|
| 92 |
+
session_id,
|
| 93 |
+
advanced_rag,
|
| 94 |
+
embedding_service,
|
| 95 |
+
qdrant_service,
|
| 96 |
+
tools_service,
|
| 97 |
+
chat_history_collection,
|
| 98 |
+
hf_token,
|
| 99 |
+
conversation_service
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# ===== SAVE HISTORY =====
|
| 103 |
+
conversation_service.add_message(
|
| 104 |
+
session_id,
|
| 105 |
+
"user",
|
| 106 |
+
request.message,
|
| 107 |
+
metadata={"intent": intent}
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
conversation_service.add_message(
|
| 111 |
+
session_id,
|
| 112 |
+
"assistant",
|
| 113 |
+
response_data["response"],
|
| 114 |
+
metadata={
|
| 115 |
+
"mode": response_data.get("mode", "unknown"),
|
| 116 |
+
"context_used": response_data.get("context_used", [])[:3] # Limit size
|
| 117 |
+
request,
|
| 118 |
+
session_id,
|
| 119 |
+
scenario_state,
|
| 120 |
+
advanced_rag,
|
| 121 |
+
embedding_service,
|
| 122 |
+
qdrant_service,
|
| 123 |
+
conversation_service
|
| 124 |
+
):
|
| 125 |
+
"""
|
| 126 |
+
Handle RAG query mid-scenario
|
| 127 |
+
Answer question properly, then remind user to continue scenario
|
| 128 |
+
"""
|
| 129 |
+
# Query RAG with proper search
|
| 130 |
+
context_used = []
|
| 131 |
+
if request.use_rag:
|
| 132 |
+
query_embedding = embedding_service.encode_text(request.message)
|
| 133 |
+
results = qdrant_service.search(
|
| 134 |
+
query_embedding=query_embedding,
|
| 135 |
+
limit=request.top_k,
|
| 136 |
+
score_threshold=request.score_threshold,
|
| 137 |
+
ef=256
|
| 138 |
+
)
|
| 139 |
+
context_used = results
|
| 140 |
+
|
| 141 |
+
# Build REAL RAG response (not placeholder)
|
| 142 |
+
if context_used and len(context_used) > 0:
|
| 143 |
+
# Format top results nicely
|
| 144 |
+
top_result = context_used[0]
|
| 145 |
+
text = top_result['metadata'].get('text', '')
|
| 146 |
+
|
| 147 |
+
# Extract most relevant snippet (first 300 chars)
|
| 148 |
+
if text:
|
| 149 |
+
rag_response = text[:300].strip()
|
| 150 |
+
if len(text) > 300:
|
| 151 |
+
rag_response += "..."
|
| 152 |
+
else:
|
| 153 |
+
rag_response = "Tôi tìm thấy thông tin nhưng không thể hiển thị chi tiết."
|
| 154 |
+
|
| 155 |
+
# If multiple results, add count
|
| 156 |
+
if len(context_used) > 1:
|
| 157 |
+
rag_response += f"\n\n(Tìm thấy {len(context_used)} kết quả liên quan)"
|
| 158 |
+
else:
|
| 159 |
+
rag_response = "Xin lỗi, tôi không tìm thấy thông tin về câu hỏi này trong tài liệu."
|
| 160 |
+
|
| 161 |
+
# Add resume hint
|
| 162 |
+
resume_hint = "\n\n---\n💬 Vậy nha! Quay lại câu hỏi trước, bạn đã quyết định chưa?"
|
| 163 |
+
|
| 164 |
+
return {
|
| 165 |
+
"response": rag_response + resume_hint,
|
| 166 |
+
"mode": "rag_with_resume",
|
| 167 |
+
"scenario_active": True,
|
| 168 |
+
"context_used": context_used
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
async def handle_pure_rag(
|
| 173 |
+
request,
|
| 174 |
+
session_id,
|
| 175 |
+
advanced_rag,
|
| 176 |
+
embedding_service,
|
| 177 |
+
qdrant_service,
|
| 178 |
+
tools_service,
|
| 179 |
+
chat_history_collection,
|
| 180 |
+
hf_token,
|
| 181 |
+
conversation_service
|
| 182 |
+
):
|
| 183 |
+
"""
|
| 184 |
+
Handle pure RAG query (fallback to existing logic)
|
| 185 |
+
"""
|
| 186 |
+
# Import existing chat_endpoint logic
|
| 187 |
+
from chat_endpoint import chat_endpoint
|
| 188 |
+
|
| 189 |
+
# Call existing endpoint
|
| 190 |
+
result = await chat_endpoint(
|
| 191 |
+
request,
|
| 192 |
+
conversation_service,
|
| 193 |
+
tools_service,
|
| 194 |
+
advanced_rag,
|
| 195 |
+
embedding_service,
|
| 196 |
+
qdrant_service,
|
| 197 |
+
chat_history_collection,
|
| 198 |
+
hf_token
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
return {
|
| 202 |
+
"response": result["response"],
|
| 203 |
+
"mode": "rag",
|
| 204 |
+
"context_used": result.get("context_used", [])
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
async def simple_rag_response(message, context, system_message):
|
| 209 |
+
"""Simple RAG response without LLM (for quick answers)"""
|
| 210 |
+
if context:
|
| 211 |
+
# Return top context
|
| 212 |
+
top = context[0]
|
| 213 |
+
return f"{top['metadata'].get('text', 'Không tìm thấy thông tin.')}"
|
| 214 |
+
return "Xin lỗi, tôi không tìm thấy thông tin về điều này."
|
main.py
CHANGED
|
@@ -776,11 +776,10 @@ async def chat(request: ChatRequest):
|
|
| 776 |
request=request,
|
| 777 |
conversation_service=conversation_service,
|
| 778 |
intent_classifier=intent_classifier,
|
| 779 |
-
|
|
|
|
| 780 |
tools_service=tools_service,
|
| 781 |
advanced_rag=advanced_rag,
|
| 782 |
-
embedding_service=embedding_service,
|
| 783 |
-
qdrant_service=qdrant_service,
|
| 784 |
chat_history_collection=chat_history_collection,
|
| 785 |
hf_token=hf_token,
|
| 786 |
lead_storage=lead_storage
|
|
|
|
| 776 |
request=request,
|
| 777 |
conversation_service=conversation_service,
|
| 778 |
intent_classifier=intent_classifier,
|
| 779 |
+
embedding_service=embedding_service, # NEW: Required by handlers
|
| 780 |
+
qdrant_service=qdrant_service, # NEW: Required by handlers
|
| 781 |
tools_service=tools_service,
|
| 782 |
advanced_rag=advanced_rag,
|
|
|
|
|
|
|
| 783 |
chat_history_collection=chat_history_collection,
|
| 784 |
hf_token=hf_token,
|
| 785 |
lead_storage=lead_storage
|
scenario_handlers/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Scenario Handlers Package
|
| 3 |
+
Dedicated handlers for each scenario with proper RAG integration
|
| 4 |
+
"""
|
| 5 |
+
from .base_handler import BaseScenarioHandler
|
| 6 |
+
|
| 7 |
+
__all__ = ['BaseScenarioHandler']
|
scenario_handlers/__pycache__/base_handler.cpython-312.pyc
ADDED
|
Binary file (8.03 kB). View file
|
|
|
scenario_handlers/__pycache__/event_recommendation.cpython-312.pyc
ADDED
|
Binary file (9.65 kB). View file
|
|
|
scenario_handlers/__pycache__/exit_intent_rescue.cpython-312.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
scenario_handlers/__pycache__/post_event_feedback.cpython-312.pyc
ADDED
|
Binary file (5.99 kB). View file
|
|
|
scenario_handlers/__pycache__/price_inquiry.cpython-312.pyc
ADDED
|
Binary file (6.88 kB). View file
|
|
|
scenario_handlers/base_handler.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base Scenario Handler - Abstract class for all scenario handlers
|
| 3 |
+
Provides common functionality: RAG search, formatting, unexpected input handling
|
| 4 |
+
"""
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
from typing import Dict, Any, Optional
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BaseScenarioHandler(ABC):
|
| 10 |
+
"""
|
| 11 |
+
Abstract base class for scenario handlers
|
| 12 |
+
|
| 13 |
+
Each scenario (price_inquiry, event_recommendation, etc.)
|
| 14 |
+
should inherit from this and implement start() and next_step()
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, embedding_service, qdrant_service, lead_storage):
|
| 18 |
+
"""
|
| 19 |
+
Initialize handler with required services
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
embedding_service: JinaClipEmbeddingService for text encoding
|
| 23 |
+
qdrant_service: QdrantVectorService for vector search
|
| 24 |
+
lead_storage: LeadStorageService for saving customer data
|
| 25 |
+
"""
|
| 26 |
+
self.embedding_service = embedding_service
|
| 27 |
+
self.qdrant_service = qdrant_service
|
| 28 |
+
self.lead_storage = lead_storage
|
| 29 |
+
|
| 30 |
+
@abstractmethod
|
| 31 |
+
def start(self, initial_data: Dict = None) -> Dict[str, Any]:
|
| 32 |
+
"""
|
| 33 |
+
Start the scenario - return first message
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
initial_data: Optional initial context (e.g., event_name, mood)
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
{
|
| 40 |
+
"message": "First bot message",
|
| 41 |
+
"new_state": {
|
| 42 |
+
"active_scenario": "scenario_id",
|
| 43 |
+
"scenario_step": 1,
|
| 44 |
+
"scenario_data": {...}
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
"""
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
@abstractmethod
|
| 51 |
+
def next_step(self, current_step: int, user_input: str, scenario_data: Dict) -> Dict[str, Any]:
|
| 52 |
+
"""
|
| 53 |
+
Process user input and advance to next step
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
current_step: Current step number (1-indexed)
|
| 57 |
+
user_input: User's message
|
| 58 |
+
scenario_data: Accumulated scenario data
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
{
|
| 62 |
+
"message": "Bot response",
|
| 63 |
+
"new_state": {...} or None if don't advance,
|
| 64 |
+
"loading_message": "Optional loading text",
|
| 65 |
+
"end_scenario": True/False,
|
| 66 |
+
"action": "Optional action to execute"
|
| 67 |
+
}
|
| 68 |
+
"""
|
| 69 |
+
pass
|
| 70 |
+
|
| 71 |
+
def _search_rag(self, query: str, limit: int = 3) -> list:
|
| 72 |
+
"""
|
| 73 |
+
Execute RAG search using Qdrant
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
query: Search query text
|
| 77 |
+
limit: Max number of results
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
List of search results with metadata
|
| 81 |
+
"""
|
| 82 |
+
try:
|
| 83 |
+
embedding = self.embedding_service.encode_text(query)
|
| 84 |
+
results = self.qdrant_service.search(
|
| 85 |
+
query_embedding=embedding,
|
| 86 |
+
limit=limit,
|
| 87 |
+
score_threshold=0.5,
|
| 88 |
+
ef=256
|
| 89 |
+
)
|
| 90 |
+
return results
|
| 91 |
+
except Exception as e:
|
| 92 |
+
print(f"⚠️ RAG search error: {e}")
|
| 93 |
+
return []
|
| 94 |
+
|
| 95 |
+
def _format_rag_results(self, results: list, max_length: int = 200) -> str:
|
| 96 |
+
"""
|
| 97 |
+
Format RAG search results as readable text
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
results: Search results from _search_rag()
|
| 101 |
+
max_length: Max chars per result
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
Formatted string with numbered results
|
| 105 |
+
"""
|
| 106 |
+
if not results or len(results) == 0:
|
| 107 |
+
return "Không tìm thấy kết quả."
|
| 108 |
+
|
| 109 |
+
formatted = []
|
| 110 |
+
for i, r in enumerate(results[:3], 1):
|
| 111 |
+
text = r['metadata'].get('text', '')
|
| 112 |
+
if text:
|
| 113 |
+
snippet = text[:max_length].strip()
|
| 114 |
+
if len(text) > max_length:
|
| 115 |
+
snippet += "..."
|
| 116 |
+
formatted.append(f"{i}. {snippet}")
|
| 117 |
+
|
| 118 |
+
return "\n".join(formatted) if formatted else "Không tìm thấy kết quả."
|
| 119 |
+
|
| 120 |
+
def handle_unexpected_input(
|
| 121 |
+
self,
|
| 122 |
+
user_input: str,
|
| 123 |
+
expected_type: str,
|
| 124 |
+
current_step: int
|
| 125 |
+
) -> Optional[Dict[str, Any]]:
|
| 126 |
+
"""
|
| 127 |
+
Handle when user gives unexpected input (e.g., asks question instead of answering)
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
user_input: User's message
|
| 131 |
+
expected_type: What we expected (email, choice, event_name, etc.)
|
| 132 |
+
current_step: Current step number
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
None - Continue with normal flow
|
| 136 |
+
Dict - Return this response (RAG answer + retry prompt)
|
| 137 |
+
"""
|
| 138 |
+
# Detect if user is asking a question instead of answering
|
| 139 |
+
question_indicators = [
|
| 140 |
+
"?", "đâu", "gì", "sao", "where", "what", "how",
|
| 141 |
+
"khi nào", "mấy giờ", "thế nào", "bao nhiêu"
|
| 142 |
+
]
|
| 143 |
+
|
| 144 |
+
message_lower = user_input.lower()
|
| 145 |
+
is_question = any(q in message_lower for q in question_indicators)
|
| 146 |
+
|
| 147 |
+
if is_question:
|
| 148 |
+
# User asking off-topic question → Answer with RAG, then retry
|
| 149 |
+
print(f"🔀 Unexpected input detected: '{user_input}' (expected: {expected_type})")
|
| 150 |
+
|
| 151 |
+
results = self._search_rag(user_input)
|
| 152 |
+
rag_answer = self._format_rag_results(results)
|
| 153 |
+
|
| 154 |
+
# Build retry prompt based on expected_type
|
| 155 |
+
retry_prompts = {
|
| 156 |
+
'interest_tag': "Vậy nha! Quay lại câu hỏi: Bạn thích vibe nào? (Chill / Sôi động / Hài / Workshop)",
|
| 157 |
+
'event_name': "OK! Vậy bạn muốn xem event nào trong danh sách trên?",
|
| 158 |
+
'email': "Được rồi! Cho mình xin email nhé?",
|
| 159 |
+
'phone': "Okie! Vậy cho mình số điện thoại để liên hệ nhé?",
|
| 160 |
+
'choice': "Hiểu rồi! Vậy bạn chọn gì?",
|
| 161 |
+
'rating': "Vậy nha! Bạn đánh giá mấy sao? (1-5)"
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
retry_msg = retry_prompts.get(expected_type, "Vậy nha! Quay lại câu hỏi trước nhé ^^")
|
| 165 |
+
|
| 166 |
+
return {
|
| 167 |
+
"message": f"{rag_answer}\n\n---\n💬 {retry_msg}",
|
| 168 |
+
"new_state": None, # Don't advance step
|
| 169 |
+
"scenario_active": True,
|
| 170 |
+
"loading_message": "⏳ Bạn đợi tôi tìm 1 chút nhé..."
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
return None # Continue normal flow
|
| 174 |
+
|
| 175 |
+
def _validate_email(self, email: str) -> bool:
|
| 176 |
+
"""Simple email validation"""
|
| 177 |
+
import re
|
| 178 |
+
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
| 179 |
+
return re.match(pattern, email) is not None
|
| 180 |
+
|
| 181 |
+
def _validate_phone(self, phone: str) -> bool:
|
| 182 |
+
"""Simple phone validation (Vietnam format)"""
|
| 183 |
+
import re
|
| 184 |
+
# Accept formats: 0123456789, +84123456789, 84123456789
|
| 185 |
+
pattern = r'^(\+?84|0)[0-9]{9,10}$'
|
| 186 |
+
return re.match(pattern, phone.replace(' ', '')) is not None
|
scenario_handlers/event_recommendation.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Event Recommendation Scenario Handler
|
| 3 |
+
Recommends events based on user's vibe/mood with RAG integration
|
| 4 |
+
"""
|
| 5 |
+
from typing import Dict, Any
|
| 6 |
+
from .base_handler import BaseScenarioHandler
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class EventRecommendationHandler(BaseScenarioHandler):
|
| 10 |
+
"""
|
| 11 |
+
Handle event recommendation flow
|
| 12 |
+
|
| 13 |
+
Steps:
|
| 14 |
+
1. Ask for vibe/mood (Chill, Sôi động, Hài, Workshop)
|
| 15 |
+
2. Search events matching vibe → RAG
|
| 16 |
+
3. Show event list, ask which to see details
|
| 17 |
+
4. Ask what info needed (price, lineup, location, time)
|
| 18 |
+
5-8. Show specific info → RAG
|
| 19 |
+
9. Ask if want to save event to email
|
| 20 |
+
10. Collect email + send summary
|
| 21 |
+
11-12. End scenario
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def start(self, initial_data: Dict = None) -> Dict[str, Any]:
|
| 25 |
+
"""Start event recommendation flow"""
|
| 26 |
+
return {
|
| 27 |
+
"message": "Hello! 👋 Bạn muốn tìm sự kiện theo vibe gì nè? Chill – Sôi động – Hài – Workshop?",
|
| 28 |
+
"new_state": {
|
| 29 |
+
"active_scenario": "event_recommendation",
|
| 30 |
+
"scenario_step": 1,
|
| 31 |
+
"scenario_data": initial_data or {}
|
| 32 |
+
}
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
def next_step(self, current_step: int, user_input: str, scenario_data: Dict) -> Dict[str, Any]:
|
| 36 |
+
"""Process user input and advance scenario"""
|
| 37 |
+
|
| 38 |
+
# Get expected input type for this step
|
| 39 |
+
expected_type = self._get_expected_type(current_step)
|
| 40 |
+
|
| 41 |
+
# Check for unexpected input (off-topic questions)
|
| 42 |
+
unexpected = self.handle_unexpected_input(user_input, expected_type, current_step)
|
| 43 |
+
if unexpected:
|
| 44 |
+
return unexpected
|
| 45 |
+
|
| 46 |
+
# ===== STEP 1: Collect interest tag =====
|
| 47 |
+
if current_step == 1:
|
| 48 |
+
scenario_data['interest_tag'] = user_input
|
| 49 |
+
|
| 50 |
+
return {
|
| 51 |
+
"message": f"Mình hiểu rồi! Để mình tìm sự kiện hợp vibe **{user_input}** nha",
|
| 52 |
+
"new_state": {
|
| 53 |
+
"active_scenario": "event_recommendation",
|
| 54 |
+
"scenario_step": 2,
|
| 55 |
+
"scenario_data": scenario_data
|
| 56 |
+
},
|
| 57 |
+
"scenario_active": True
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# ===== STEP 2: Execute RAG search for events =====
|
| 61 |
+
elif current_step == 2:
|
| 62 |
+
# Search events matching interest_tag
|
| 63 |
+
query = f"sự kiện phù hợp với {scenario_data.get('interest_tag', 'mọi người')}"
|
| 64 |
+
print(f"🔍 RAG Search: {query}")
|
| 65 |
+
|
| 66 |
+
results = self._search_rag(query, limit=3)
|
| 67 |
+
formatted_events = self._format_event_list(results)
|
| 68 |
+
|
| 69 |
+
# Save results to scenario data
|
| 70 |
+
scenario_data['rag_results'] = formatted_events
|
| 71 |
+
scenario_data['available_events'] = [
|
| 72 |
+
r['metadata'].get('event_name', f'Event {i+1}')
|
| 73 |
+
for i, r in enumerate(results[:3])
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
return {
|
| 77 |
+
"message": f"Đây là 2–3 event hợp với bạn nè:\n{formatted_events}\n\nBạn có muốn xem chi tiết event nào không?",
|
| 78 |
+
"new_state": {
|
| 79 |
+
"active_scenario": "event_recommendation",
|
| 80 |
+
"scenario_step": 3,
|
| 81 |
+
"scenario_data": scenario_data
|
| 82 |
+
},
|
| 83 |
+
"scenario_active": True,
|
| 84 |
+
"loading_message": "⏳ Bạn đợi tôi tìm 1 chút nhé..."
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
# ===== STEP 3: User picks event =====
|
| 88 |
+
elif current_step == 3:
|
| 89 |
+
scenario_data['event_name'] = user_input
|
| 90 |
+
|
| 91 |
+
return {
|
| 92 |
+
"message": "Bạn cần xem: giá – line-up – địa điểm – hay thời gian của sự kiện?",
|
| 93 |
+
"new_state": {
|
| 94 |
+
"active_scenario": "event_recommendation",
|
| 95 |
+
"scenario_step": 4,
|
| 96 |
+
"scenario_data": scenario_data
|
| 97 |
+
},
|
| 98 |
+
"scenario_active": True
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
# ===== STEP 4: Branch based on info choice =====
|
| 102 |
+
elif current_step == 4:
|
| 103 |
+
choice = self._detect_choice(user_input)
|
| 104 |
+
event_name = scenario_data.get('event_name', 'sự kiện này')
|
| 105 |
+
|
| 106 |
+
# Build RAG query based on choice
|
| 107 |
+
query_map = {
|
| 108 |
+
'price': f"giá vé {event_name}",
|
| 109 |
+
'lineup': f"lineup nghệ sĩ {event_name}",
|
| 110 |
+
'location': f"địa điểm tổ chức {event_name}",
|
| 111 |
+
'time': f"thời gian lịch diễn {event_name}"
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
query = query_map.get(choice, query_map['price'])
|
| 115 |
+
print(f"🔍 RAG Search: {query}")
|
| 116 |
+
|
| 117 |
+
results = self._search_rag(query)
|
| 118 |
+
formatted_info = self._format_rag_results(results)
|
| 119 |
+
|
| 120 |
+
# Build response message
|
| 121 |
+
message_map = {
|
| 122 |
+
'price': f"Giá vé event {event_name} nè:\n{formatted_info}",
|
| 123 |
+
'lineup': f"Lineup / nghệ sĩ của event {event_name} là:\n{formatted_info}",
|
| 124 |
+
'location': f"Địa điểm tổ chức event {event_name}:\n{formatted_info}",
|
| 125 |
+
'time': f"Thời gian / lịch diễn của event {event_name}:\n{formatted_info}"
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
return {
|
| 129 |
+
"message": message_map.get(choice, message_map['price']),
|
| 130 |
+
"new_state": {
|
| 131 |
+
"active_scenario": "event_recommendation",
|
| 132 |
+
"scenario_step": 9, # Skip to email step
|
| 133 |
+
"scenario_data": scenario_data
|
| 134 |
+
},
|
| 135 |
+
"scenario_active": True,
|
| 136 |
+
"loading_message": "⏳ Bạn đợi tôi tìm 1 chút nhé..."
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
# ===== STEP 9: Ask if want to save event to email =====
|
| 140 |
+
elif current_step == 9:
|
| 141 |
+
choice = self._detect_yes_no(user_input)
|
| 142 |
+
|
| 143 |
+
if choice == 'yes':
|
| 144 |
+
return {
|
| 145 |
+
"message": "Cho mình xin email để gửi bản tóm tắt event kèm link mua vé?",
|
| 146 |
+
"new_state": {
|
| 147 |
+
"active_scenario": "event_recommendation",
|
| 148 |
+
"scenario_step": 10,
|
| 149 |
+
"scenario_data": scenario_data
|
| 150 |
+
},
|
| 151 |
+
"scenario_active": True
|
| 152 |
+
}
|
| 153 |
+
else:
|
| 154 |
+
return {
|
| 155 |
+
"message": "Okie, bạn cần event theo vibe khác không nè? 😄",
|
| 156 |
+
"new_state": None,
|
| 157 |
+
"scenario_active": False,
|
| 158 |
+
"end_scenario": True
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# ===== STEP 10: Collect email and send summary =====
|
| 162 |
+
elif current_step == 10:
|
| 163 |
+
email = user_input.strip()
|
| 164 |
+
|
| 165 |
+
if not self._validate_email(email):
|
| 166 |
+
return {
|
| 167 |
+
"message": "Email này có vẻ không đúng định dạng. Bạn nhập lại giúp mình nhé? (Ví dụ: name@example.com)",
|
| 168 |
+
"new_state": None, # Stay at same step
|
| 169 |
+
"scenario_active": True
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
# Save lead
|
| 173 |
+
scenario_data['email'] = email
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
self.lead_storage.save_lead(
|
| 177 |
+
event_name=scenario_data.get('event_name', 'Unknown Event'),
|
| 178 |
+
email=email,
|
| 179 |
+
interests={
|
| 180 |
+
"vibe": scenario_data.get('interest_tag'),
|
| 181 |
+
"wants_event_summary": True
|
| 182 |
+
},
|
| 183 |
+
session_id=scenario_data.get('session_id')
|
| 184 |
+
)
|
| 185 |
+
print(f"📧 Lead saved: {email}")
|
| 186 |
+
except Exception as e:
|
| 187 |
+
print(f"⚠️ Error saving lead: {e}")
|
| 188 |
+
|
| 189 |
+
return {
|
| 190 |
+
"message": "Đã gửi email cho bạn nha! ✨",
|
| 191 |
+
"new_state": None,
|
| 192 |
+
"scenario_active": False,
|
| 193 |
+
"end_scenario": True,
|
| 194 |
+
"action": "send_event_summary_email"
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
# Fallback - unknown step
|
| 198 |
+
return {
|
| 199 |
+
"message": "Xin lỗi, có lỗi xảy ra. Bạn muốn bắt đầu lại không?",
|
| 200 |
+
"new_state": None,
|
| 201 |
+
"scenario_active": False,
|
| 202 |
+
"end_scenario": True
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
def _get_expected_type(self, step: int) -> str:
|
| 206 |
+
"""Get expected input type for each step"""
|
| 207 |
+
type_map = {
|
| 208 |
+
1: 'interest_tag',
|
| 209 |
+
2: None, # Auto-advance after RAG
|
| 210 |
+
3: 'event_name',
|
| 211 |
+
4: 'choice',
|
| 212 |
+
9: 'choice',
|
| 213 |
+
10: 'email'
|
| 214 |
+
}
|
| 215 |
+
return type_map.get(step, 'text')
|
| 216 |
+
|
| 217 |
+
def _format_event_list(self, results: list) -> str:
|
| 218 |
+
"""Format event search results as numbered list"""
|
| 219 |
+
if not results or len(results) == 0:
|
| 220 |
+
return "Hiện tại chưa có event phù hợp 😢\nBạn thử vibe khác nhé!"
|
| 221 |
+
|
| 222 |
+
events = []
|
| 223 |
+
for i, r in enumerate(results[:3], 1):
|
| 224 |
+
metadata = r.get('metadata', {})
|
| 225 |
+
name = metadata.get('event_name', f'Event {i}')
|
| 226 |
+
date = metadata.get('date', 'TBA')
|
| 227 |
+
location = metadata.get('location', '')
|
| 228 |
+
|
| 229 |
+
event_str = f"{i}. **{name}**"
|
| 230 |
+
if date != 'TBA':
|
| 231 |
+
event_str += f" ({date})"
|
| 232 |
+
if location:
|
| 233 |
+
event_str += f" - {location}"
|
| 234 |
+
|
| 235 |
+
events.append(event_str)
|
| 236 |
+
|
| 237 |
+
return "\n".join(events)
|
| 238 |
+
|
| 239 |
+
def _detect_choice(self, user_input: str) -> str:
|
| 240 |
+
"""Detect what info user wants to see"""
|
| 241 |
+
input_lower = user_input.lower()
|
| 242 |
+
|
| 243 |
+
if any(k in input_lower for k in ['giá', 'price', 'vé', 'ticket', 'bao nhiêu']):
|
| 244 |
+
return 'price'
|
| 245 |
+
elif any(k in input_lower for k in ['lineup', 'line-up', 'nghệ sĩ', 'artist', 'performer']):
|
| 246 |
+
return 'lineup'
|
| 247 |
+
elif any(k in input_lower for k in ['địa điểm', 'location', 'ở đâu', 'where', 'chỗ']):
|
| 248 |
+
return 'location'
|
| 249 |
+
elif any(k in input_lower for k in ['thời gian', 'time', 'khi nào', 'when', 'lịch', 'date']):
|
| 250 |
+
return 'time'
|
| 251 |
+
else:
|
| 252 |
+
return 'price' # Default
|
| 253 |
+
|
| 254 |
+
def _detect_yes_no(self, user_input: str) -> str:
|
| 255 |
+
"""Detect yes/no response"""
|
| 256 |
+
input_lower = user_input.lower()
|
| 257 |
+
|
| 258 |
+
if any(k in input_lower for k in ['có', 'yes', 'ok', 'được', 'ừ', 'oke']):
|
| 259 |
+
return 'yes'
|
| 260 |
+
else:
|
| 261 |
+
return 'no'
|
scenario_handlers/exit_intent_rescue.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Exit Intent Rescue Scenario Handler
|
| 3 |
+
Quick flow to retain users about to leave
|
| 4 |
+
"""
|
| 5 |
+
from typing import Dict, Any
|
| 6 |
+
from .base_handler import BaseScenarioHandler
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ExitIntentRescueHandler(BaseScenarioHandler):
|
| 10 |
+
"""
|
| 11 |
+
Handle exit intent rescue flow
|
| 12 |
+
|
| 13 |
+
Very short 2-step flow to capture leaving users
|
| 14 |
+
|
| 15 |
+
Steps:
|
| 16 |
+
1. Offer discount coupon
|
| 17 |
+
2. Collect email to send coupon
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def start(self, initial_data: Dict = None) -> Dict[str, Any]:
|
| 21 |
+
"""Start exit intent flow with urgency"""
|
| 22 |
+
return {
|
| 23 |
+
"message": "Khoan đã! 😭 Trước khi bạn rời đi…\n\nChúng mình sắp có mã giảm **5%** cho lần mua đầu tiên. Bạn muốn nhận không?",
|
| 24 |
+
"new_state": {
|
| 25 |
+
"active_scenario": "exit_intent_rescue",
|
| 26 |
+
"scenario_step": 1,
|
| 27 |
+
"scenario_data": initial_data or {}
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
def next_step(self, current_step: int, user_input: str, scenario_data: Dict) -> Dict[str, Any]:
|
| 32 |
+
"""Process and try to capture email quickly"""
|
| 33 |
+
|
| 34 |
+
# STEP 1: Want coupon?
|
| 35 |
+
if current_step == 1:
|
| 36 |
+
choice = self._detect_yes_no(user_input)
|
| 37 |
+
|
| 38 |
+
if choice == 'yes':
|
| 39 |
+
return {
|
| 40 |
+
"message": "Tuyệt vời! 🎉 Cho mình xin email để gửi mã giảm giá nhé?",
|
| 41 |
+
"new_state": {
|
| 42 |
+
"active_scenario": "exit_intent_rescue",
|
| 43 |
+
"scenario_step": 2,
|
| 44 |
+
"scenario_data": scenario_data
|
| 45 |
+
},
|
| 46 |
+
"scenario_active": True
|
| 47 |
+
}
|
| 48 |
+
else:
|
| 49 |
+
return {
|
| 50 |
+
"message": "Okie! Hẹn gặp lại bạn nhé 👋",
|
| 51 |
+
"new_state": None,
|
| 52 |
+
"scenario_active": False,
|
| 53 |
+
"end_scenario": True
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
# STEP 2: Collect email and send coupon
|
| 57 |
+
elif current_step == 2:
|
| 58 |
+
email = user_input.strip()
|
| 59 |
+
|
| 60 |
+
if not self._validate_email(email):
|
| 61 |
+
return {
|
| 62 |
+
"message": "Email này có vẻ không đúng. Bạn nhập lại nhanh giúp mình nhé?",
|
| 63 |
+
"new_state": None,
|
| 64 |
+
"scenario_active": True
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
scenario_data['email'] = email
|
| 68 |
+
|
| 69 |
+
# Save lead with high priority (exit intent)
|
| 70 |
+
try:
|
| 71 |
+
self.lead_storage.save_lead(
|
| 72 |
+
event_name="Exit Intent Coupon",
|
| 73 |
+
email=email,
|
| 74 |
+
interests={"exit_intent": True, "discount_5_percent": True},
|
| 75 |
+
session_id=scenario_data.get('session_id')
|
| 76 |
+
)
|
| 77 |
+
print(f"🎯 Exit intent lead saved: {email}")
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f"⚠️ Error saving lead: {e}")
|
| 80 |
+
|
| 81 |
+
return {
|
| 82 |
+
"message": "Đã gửi mã **FIRST5** vào email rồi nha! ✨\n\nDùng ngay hôm nay nhé, mã chỉ có hiệu lực 24h thôi!",
|
| 83 |
+
"new_state": None,
|
| 84 |
+
"scenario_active": False,
|
| 85 |
+
"end_scenario": True,
|
| 86 |
+
"action": "send_coupon_email"
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
# Fallback
|
| 90 |
+
return {
|
| 91 |
+
"message": "Hẹn gặp lại bạn! 👋",
|
| 92 |
+
"new_state": None,
|
| 93 |
+
"scenario_active": False,
|
| 94 |
+
"end_scenario": True
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
def _detect_yes_no(self, user_input: str) -> str:
|
| 98 |
+
"""Detect yes/no quickly"""
|
| 99 |
+
input_lower = user_input.lower()
|
| 100 |
+
return 'yes' if any(k in input_lower for k in ['có', 'yes', 'ok', 'được', 'ừ', 'muốn']) else 'no'
|
scenario_handlers/post_event_feedback.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Post Event Feedback Scenario Handler
|
| 3 |
+
Collects feedback after user attends an event
|
| 4 |
+
"""
|
| 5 |
+
from typing import Dict, Any
|
| 6 |
+
from .base_handler import BaseScenarioHandler
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PostEventFeedbackHandler(BaseScenarioHandler):
|
| 10 |
+
"""
|
| 11 |
+
Handle post-event feedback flow
|
| 12 |
+
|
| 13 |
+
Requires initial_data: {event_name, event_date, event_id}
|
| 14 |
+
|
| 15 |
+
Steps:
|
| 16 |
+
1. Ask for rating (1-5 stars)
|
| 17 |
+
2. Ask what they liked most
|
| 18 |
+
3. Ask for improvement suggestions
|
| 19 |
+
4. Thank + ask if want similar events
|
| 20 |
+
5. Collect email for future events
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def start(self, initial_data: Dict = None) -> Dict[str, Any]:
|
| 24 |
+
"""Start feedback flow with event context"""
|
| 25 |
+
initial_data = initial_data or {}
|
| 26 |
+
event_name = initial_data.get('event_name', 'sự kiện')
|
| 27 |
+
|
| 28 |
+
return {
|
| 29 |
+
"message": f"Cảm ơn bạn đã tham dự *{event_name}* hôm qua! 🎉\n\nBạn thấy trải nghiệm như thế nào? (1-5 sao)",
|
| 30 |
+
"new_state": {
|
| 31 |
+
"active_scenario": "post_event_feedback",
|
| 32 |
+
"scenario_step": 1,
|
| 33 |
+
"scenario_data": initial_data
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def next_step(self, current_step: int, user_input: str, scenario_data: Dict) -> Dict[str, Any]:
|
| 38 |
+
"""Process feedback and advance"""
|
| 39 |
+
|
| 40 |
+
expected_type = self._get_expected_type(current_step)
|
| 41 |
+
unexpected = self.handle_unexpected_input(user_input, expected_type, current_step)
|
| 42 |
+
if unexpected:
|
| 43 |
+
return unexpected
|
| 44 |
+
|
| 45 |
+
# STEP 1: Collect rating
|
| 46 |
+
if current_step == 1:
|
| 47 |
+
rating = self._extract_rating(user_input)
|
| 48 |
+
scenario_data['rating'] = rating
|
| 49 |
+
|
| 50 |
+
if rating >= 4:
|
| 51 |
+
msg = "Tuyệt vời! 🎉 Bạn thích điểm gì nhất về event?"
|
| 52 |
+
else:
|
| 53 |
+
msg = "Cảm ơn feedback! Bạn thấy event cần cải thiện điểm nào?"
|
| 54 |
+
|
| 55 |
+
return {
|
| 56 |
+
"message": msg,
|
| 57 |
+
"new_state": {
|
| 58 |
+
"active_scenario": "post_event_feedback",
|
| 59 |
+
"scenario_step": 2,
|
| 60 |
+
"scenario_data": scenario_data
|
| 61 |
+
},
|
| 62 |
+
"scenario_active": True
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# STEP 2: What they liked/disliked
|
| 66 |
+
elif current_step == 2:
|
| 67 |
+
scenario_data['feedback_text'] = user_input
|
| 68 |
+
|
| 69 |
+
return {
|
| 70 |
+
"message": "Cảm ơn bạn nhiều! Feedback này giúp chúng mình rất nhiều 💙\n\nBạn muốn nhận thông tin về các event tương tự không?",
|
| 71 |
+
"new_state": {
|
| 72 |
+
"active_scenario": "post_event_feedback",
|
| 73 |
+
"scenario_step": 4,
|
| 74 |
+
"scenario_data": scenario_data
|
| 75 |
+
},
|
| 76 |
+
"scenario_active": True
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
# STEP 4: Want similar events?
|
| 80 |
+
elif current_step == 4:
|
| 81 |
+
choice = self._detect_yes_no(user_input)
|
| 82 |
+
|
| 83 |
+
if choice == 'yes':
|
| 84 |
+
return {
|
| 85 |
+
"message": "Tuyệt! Cho mình xin email để gửi thông tin event sắp tới nhé?",
|
| 86 |
+
"new_state": {
|
| 87 |
+
"active_scenario": "post_event_feedback",
|
| 88 |
+
"scenario_step": 5,
|
| 89 |
+
"scenario_data": scenario_data
|
| 90 |
+
},
|
| 91 |
+
"scenario_active": True
|
| 92 |
+
}
|
| 93 |
+
else:
|
| 94 |
+
# Save feedback without email
|
| 95 |
+
self._save_feedback(scenario_data)
|
| 96 |
+
|
| 97 |
+
return {
|
| 98 |
+
"message": "Okie! Cảm ơn bạn đã dành thời gian góp ý 🙏",
|
| 99 |
+
"new_state": None,
|
| 100 |
+
"scenario_active": False,
|
| 101 |
+
"end_scenario": True
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# STEP 5: Collect email
|
| 105 |
+
elif current_step == 5:
|
| 106 |
+
email = user_input.strip()
|
| 107 |
+
|
| 108 |
+
if not self._validate_email(email):
|
| 109 |
+
return {
|
| 110 |
+
"message": "Email này có vẻ không đúng. Bạn nhập lại giúp mình nhé?",
|
| 111 |
+
"new_state": None,
|
| 112 |
+
"scenario_active": True
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
scenario_data['email'] = email
|
| 116 |
+
|
| 117 |
+
# Save feedback + email
|
| 118 |
+
self._save_feedback(scenario_data)
|
| 119 |
+
|
| 120 |
+
# Save lead
|
| 121 |
+
try:
|
| 122 |
+
self.lead_storage.save_lead(
|
| 123 |
+
event_name=scenario_data.get('event_name'),
|
| 124 |
+
email=email,
|
| 125 |
+
interests={"wants_similar_events": True},
|
| 126 |
+
session_id=scenario_data.get('session_id')
|
| 127 |
+
)
|
| 128 |
+
print(f"📧 Feedback + lead saved: {email}")
|
| 129 |
+
except Exception as e:
|
| 130 |
+
print(f"⚠️ Error saving lead: {e}")
|
| 131 |
+
|
| 132 |
+
return {
|
| 133 |
+
"message": "Cảm ơn bạn! Mình sẽ gửi thông tin event mới cho bạn sớm nhất ✨",
|
| 134 |
+
"new_state": None,
|
| 135 |
+
"scenario_active": False,
|
| 136 |
+
"end_scenario": True
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
# Fallback
|
| 140 |
+
return {
|
| 141 |
+
"message": "Cảm ơn bạn! Hẹn gặp lại ✨",
|
| 142 |
+
"new_state": None,
|
| 143 |
+
"scenario_active": False,
|
| 144 |
+
"end_scenario": True
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
def _get_expected_type(self, step: int) -> str:
|
| 148 |
+
"""Get expected input type"""
|
| 149 |
+
type_map = {
|
| 150 |
+
1: 'rating',
|
| 151 |
+
2: 'text',
|
| 152 |
+
4: 'choice',
|
| 153 |
+
5: 'email'
|
| 154 |
+
}
|
| 155 |
+
return type_map.get(step, 'text')
|
| 156 |
+
|
| 157 |
+
def _extract_rating(self, user_input: str) -> int:
|
| 158 |
+
"""Extract rating from user input"""
|
| 159 |
+
import re
|
| 160 |
+
# Look for numbers 1-5
|
| 161 |
+
match = re.search(r'[1-5]', user_input)
|
| 162 |
+
if match:
|
| 163 |
+
return int(match.group())
|
| 164 |
+
|
| 165 |
+
# Default to 3 if can't extract
|
| 166 |
+
return 3
|
| 167 |
+
|
| 168 |
+
def _detect_yes_no(self, user_input: str) -> str:
|
| 169 |
+
"""Detect yes/no"""
|
| 170 |
+
input_lower = user_input.lower()
|
| 171 |
+
return 'yes' if any(k in input_lower for k in ['có', 'yes', 'ok', 'được', 'ừ']) else 'no'
|
| 172 |
+
|
| 173 |
+
def _save_feedback(self, scenario_data: Dict):
|
| 174 |
+
"""Save feedback to database (placeholder)"""
|
| 175 |
+
# TODO: Implement actual feedback storage
|
| 176 |
+
print(f"💾 Feedback saved: {scenario_data.get('rating')} stars - {scenario_data.get('feedback_text', '')[:50]}")
|
scenario_handlers/price_inquiry.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Price Inquiry Scenario Handler
|
| 3 |
+
Helps users understand pricing and collects leads
|
| 4 |
+
"""
|
| 5 |
+
from typing import Dict, Any
|
| 6 |
+
from .base_handler import BaseScenarioHandler
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PriceInquiryHandler(BaseScenarioHandler):
|
| 10 |
+
"""
|
| 11 |
+
Handle price inquiry flow
|
| 12 |
+
|
| 13 |
+
Steps:
|
| 14 |
+
1. Ask which event
|
| 15 |
+
2. Ask group size (1 person vs group)
|
| 16 |
+
3. Show pricing info
|
| 17 |
+
4. Ask if want PDF via email
|
| 18 |
+
5. Collect email → Send PDF
|
| 19 |
+
6. Ask if want SMS reminder
|
| 20 |
+
7-8. Collect phone → Send SMS
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def start(self, initial_data: Dict = None) -> Dict[str, Any]:
|
| 24 |
+
"""Start price inquiry flow"""
|
| 25 |
+
return {
|
| 26 |
+
"message": "Hello 👋 Bạn muốn xem giá vé của show nào?",
|
| 27 |
+
"new_state": {
|
| 28 |
+
"active_scenario": "price_inquiry",
|
| 29 |
+
"scenario_step": 1,
|
| 30 |
+
"scenario_data": initial_data or {}
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
def next_step(self, current_step: int, user_input: str, scenario_data: Dict) -> Dict[str, Any]:
|
| 35 |
+
"""Process user input and advance scenario"""
|
| 36 |
+
|
| 37 |
+
expected_type = self._get_expected_type(current_step)
|
| 38 |
+
unexpected = self.handle_unexpected_input(user_input, expected_type, current_step)
|
| 39 |
+
if unexpected:
|
| 40 |
+
return unexpected
|
| 41 |
+
|
| 42 |
+
# STEP 1: Which event?
|
| 43 |
+
if current_step == 1:
|
| 44 |
+
scenario_data['event_name'] = user_input
|
| 45 |
+
return {
|
| 46 |
+
"message": "Bạn đi 1 mình hay đi nhóm?",
|
| 47 |
+
"new_state": {
|
| 48 |
+
"active_scenario": "price_inquiry",
|
| 49 |
+
"scenario_step": 2,
|
| 50 |
+
"scenario_data": scenario_data
|
| 51 |
+
},
|
| 52 |
+
"scenario_active": True
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# STEP 2: Group size
|
| 56 |
+
elif current_step == 2:
|
| 57 |
+
group = self._detect_group_size(user_input)
|
| 58 |
+
scenario_data['group_size'] = group
|
| 59 |
+
|
| 60 |
+
event = scenario_data.get('event_name', 'event này')
|
| 61 |
+
|
| 62 |
+
# Show pricing based on group size
|
| 63 |
+
if group == 'single':
|
| 64 |
+
msg = f"Giá vé cho **{event}**:\n- Vé thường: 300k\n- Vé VIP: 500k"
|
| 65 |
+
else:
|
| 66 |
+
msg = f"Giá vé nhóm cho **{event}**:\n- Nhóm 3-5: 250k/người\n- Nhóm 6+: 200k/người"
|
| 67 |
+
|
| 68 |
+
return {
|
| 69 |
+
"message": msg + "\n\nBạn muốn mình gửi bảng giá chi tiết qua email không?",
|
| 70 |
+
"new_state": {
|
| 71 |
+
"active_scenario": "price_inquiry",
|
| 72 |
+
"scenario_step": 4, # Skip step 3
|
| 73 |
+
"scenario_data": scenario_data
|
| 74 |
+
},
|
| 75 |
+
"scenario_active": True
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
# STEP 4: Want PDF?
|
| 79 |
+
elif current_step == 4:
|
| 80 |
+
choice = self._detect_yes_no(user_input)
|
| 81 |
+
|
| 82 |
+
if choice == 'yes':
|
| 83 |
+
return {
|
| 84 |
+
"message": "Cho mình xin email để gửi PDF bảng giá nhé?",
|
| 85 |
+
"new_state": {
|
| 86 |
+
"active_scenario": "price_inquiry",
|
| 87 |
+
"scenario_step": 5,
|
| 88 |
+
"scenario_data": scenario_data
|
| 89 |
+
},
|
| 90 |
+
"scenario_active": True
|
| 91 |
+
}
|
| 92 |
+
else:
|
| 93 |
+
return {
|
| 94 |
+
"message": "Okie! Bạn cần tư vấn gì thêm không? 😊",
|
| 95 |
+
"new_state": None,
|
| 96 |
+
"scenario_active": False,
|
| 97 |
+
"end_scenario": True
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# STEP 5: Collect email
|
| 101 |
+
elif current_step == 5:
|
| 102 |
+
email = user_input.strip()
|
| 103 |
+
|
| 104 |
+
if not self._validate_email(email):
|
| 105 |
+
return {
|
| 106 |
+
"message": "Email này có vẻ không đúng. Bạn nhập lại giúp mình nhé?",
|
| 107 |
+
"new_state": None,
|
| 108 |
+
"scenario_active": True
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
scenario_data['email'] = email
|
| 112 |
+
|
| 113 |
+
# Save lead
|
| 114 |
+
try:
|
| 115 |
+
self.lead_storage.save_lead(
|
| 116 |
+
event_name=scenario_data.get('event_name'),
|
| 117 |
+
email=email,
|
| 118 |
+
interests={"group": scenario_data.get('group_size'), "wants_pdf": True},
|
| 119 |
+
session_id=scenario_data.get('session_id')
|
| 120 |
+
)
|
| 121 |
+
print(f"📧 Lead saved: {email}")
|
| 122 |
+
except Exception as e:
|
| 123 |
+
print(f"⚠️ Error saving lead: {e}")
|
| 124 |
+
|
| 125 |
+
return {
|
| 126 |
+
"message": "Đã gửi PDF rồi nha 🎫\n\nBạn muốn nhận nhắc lịch qua SMS không?",
|
| 127 |
+
"new_state": {
|
| 128 |
+
"active_scenario": "price_inquiry",
|
| 129 |
+
"scenario_step": 6,
|
| 130 |
+
"scenario_data": scenario_data
|
| 131 |
+
},
|
| 132 |
+
"scenario_active": True,
|
| 133 |
+
"action": "send_pdf_email"
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
# STEP 6: Want SMS?
|
| 137 |
+
elif current_step == 6:
|
| 138 |
+
choice = self._detect_yes_no(user_input)
|
| 139 |
+
|
| 140 |
+
if choice == 'yes':
|
| 141 |
+
return {
|
| 142 |
+
"message": "Cho mình xin số điện thoại để gửi SMS nhắc lịch nhé?",
|
| 143 |
+
"new_state": {
|
| 144 |
+
"active_scenario": "price_inquiry",
|
| 145 |
+
"scenario_step": 8,
|
| 146 |
+
"scenario_data": scenario_data
|
| 147 |
+
},
|
| 148 |
+
"scenario_active": True
|
| 149 |
+
}
|
| 150 |
+
else:
|
| 151 |
+
return {
|
| 152 |
+
"message": "Okie! Cảm ơn bạn nha ✨",
|
| 153 |
+
"new_state": None,
|
| 154 |
+
"scenario_active": False,
|
| 155 |
+
"end_scenario": True
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
# STEP 8: Collect phone
|
| 159 |
+
elif current_step == 8:
|
| 160 |
+
phone = user_input.strip()
|
| 161 |
+
|
| 162 |
+
if not self._validate_phone(phone):
|
| 163 |
+
return {
|
| 164 |
+
"message": "Số điện thoại này có vẻ không đúng. Bạn nhập lại giúp mình nhé? (VD: 0901234567)",
|
| 165 |
+
"new_state": None,
|
| 166 |
+
"scenario_active": True
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
scenario_data['phone'] = phone
|
| 170 |
+
|
| 171 |
+
# Save lead with phone
|
| 172 |
+
try:
|
| 173 |
+
self.lead_storage.save_lead(
|
| 174 |
+
event_name=scenario_data.get('event_name'),
|
| 175 |
+
email=scenario_data.get('email'),
|
| 176 |
+
phone=phone,
|
| 177 |
+
interests={"group": scenario_data.get('group_size'), "wants_reminder": True},
|
| 178 |
+
session_id=scenario_data.get('session_id')
|
| 179 |
+
)
|
| 180 |
+
print(f"📱 Lead saved: {phone}")
|
| 181 |
+
except Exception as e:
|
| 182 |
+
print(f"⚠️ Error saving lead: {e}")
|
| 183 |
+
|
| 184 |
+
return {
|
| 185 |
+
"message": "Đã lưu rồi nha! Mình sẽ nhắc bạn trước show 1 ngày ✨",
|
| 186 |
+
"new_state": None,
|
| 187 |
+
"scenario_active": False,
|
| 188 |
+
"end_scenario": True,
|
| 189 |
+
"action": "save_lead_phone"
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
# Fallback
|
| 193 |
+
return {
|
| 194 |
+
"message": "Có lỗi xảy ra. Bạn muốn bắt đầu lại không?",
|
| 195 |
+
"new_state": None,
|
| 196 |
+
"scenario_active": False,
|
| 197 |
+
"end_scenario": True
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
def _get_expected_type(self, step: int) -> str:
|
| 201 |
+
"""Get expected input type for each step"""
|
| 202 |
+
type_map = {
|
| 203 |
+
1: 'event_name',
|
| 204 |
+
2: 'choice',
|
| 205 |
+
4: 'choice',
|
| 206 |
+
5: 'email',
|
| 207 |
+
6: 'choice',
|
| 208 |
+
8: 'phone'
|
| 209 |
+
}
|
| 210 |
+
return type_map.get(step, 'text')
|
| 211 |
+
|
| 212 |
+
def _detect_group_size(self, user_input: str) -> str:
|
| 213 |
+
"""Detect if single or group"""
|
| 214 |
+
input_lower = user_input.lower()
|
| 215 |
+
|
| 216 |
+
if any(k in input_lower for k in ['nhóm', 'group', 'team', 'mấy người', 'nhiều']):
|
| 217 |
+
return 'group'
|
| 218 |
+
else:
|
| 219 |
+
return 'single'
|
| 220 |
+
|
| 221 |
+
def _detect_yes_no(self, user_input: str) -> str:
|
| 222 |
+
"""Detect yes/no"""
|
| 223 |
+
input_lower = user_input.lower()
|
| 224 |
+
return 'yes' if any(k in input_lower for k in ['có', 'yes', 'ok', 'được', 'ừ']) else 'no'
|