ABAO77 commited on
Commit
61e4b1e
·
1 Parent(s): 7f15e1c

feat: evaluation when end

Browse files
src/agents/evaluation/agent.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from langchain_core.pydantic_v1 import BaseModel, Field
3
+ from src.config.llm import model
4
+ from src.utils.logger import logger
5
+ from .prompt import evaluation_prompt
6
+ from langgraph.checkpoint.memory import InMemorySaver
7
+ from typing import List, Dict, Any
8
+ from src.agents.role_play.flow import role_play_agent
9
+
10
+
11
+ # Define the structured output format
12
+ class ResponseFormatter(BaseModel):
13
+ """Structured output format for conversation evaluation"""
14
+
15
+ score: int = Field(
16
+ ..., description="Overall conversation score out of 100", ge=0, le=100
17
+ )
18
+ feedback: str = Field(..., description="Overall feedback summary")
19
+ strengths: List[str] = Field(..., description="List of conversation strengths")
20
+ improvements: List[str] = Field(..., description="List of areas for improvement")
21
+ suggestions: List[str] = Field(
22
+ ..., description="List of specific improvement suggestions"
23
+ )
24
+ next_steps: List[str] = Field(..., description="List of recommended next steps")
25
+
26
+
27
+ # Create the prompt template
28
+
29
+
30
+ async def evaluate_conversation(
31
+ session_id: str,
32
+ learner_level: str = "beginner",
33
+ scenario_title: str = "",
34
+ scenario_description: str = "",
35
+ key_vocabulary: str = "",
36
+ ) -> Dict[str, Any]:
37
+ """
38
+ Evaluate a conversation based on the session ID and provide feedback.
39
+
40
+ Args:
41
+ session_id: The thread ID for the conversation
42
+ learner_level: The English level of the learner
43
+ scenario_title: Title of the conversation scenario
44
+ scenario_description: Description of the conversation scenario
45
+ key_vocabulary: Key vocabulary words from the scenario
46
+
47
+ Returns:
48
+ Dict containing evaluation results including score and feedback
49
+ """
50
+ logger.info(f"Evaluating conversation for session_id: {session_id}")
51
+ config = {"configurable": {"thread_id": session_id}}
52
+ snapshot = await role_play_agent().aget_state(config)
53
+ messages = snapshot.values.get("messages", [])
54
+ if not messages:
55
+ return {
56
+ "score": 0,
57
+ "feedback": "No conversation found for this session.",
58
+ "strengths": [],
59
+ "improvements": [],
60
+ "suggestions": [],
61
+ "next_steps": [],
62
+ }
63
+
64
+ evaluation_prompt_template = ChatPromptTemplate.from_messages(
65
+ [
66
+ (
67
+ "system",
68
+ """# CONVERSATION EVALUATOR - English Learning Assessment Specialist
69
+
70
+ You are **WISE Evaluator**, an expert English tutor who analyzes conversations between learners and AI roleplay partners. Your job is to provide comprehensive feedback that helps learners improve.
71
+
72
+ ## Evaluation Context
73
+ - **Session ID**: {session_id}
74
+ - **Learner Level**: {learner_level}
75
+ - **Scenario**: {scenario_title} - {scenario_description}
76
+ - **Key Vocabulary**: {key_vocabulary}
77
+
78
+ ## Your Evaluation Mission
79
+ 1. **Score the conversation** (0-100 scale) based on fluency, accuracy, and engagement
80
+ 2. **Identify strengths** - What did the learner do well?
81
+ 3. **Pinpoint areas for improvement** - Where can they get better?
82
+ 4. **Provide specific suggestions** - Concrete actions for improvement
83
+ 5. **Recommend next steps** - What should they practice next?
84
+
85
+ ## Scoring Criteria
86
+
87
+ ### Fluency (30 points)
88
+ - **Flow**: How naturally does the conversation progress?
89
+ - **Response time**: Are there appropriate pauses or unnatural delays?
90
+ - **Turn-taking**: Good balance of speaking between learner and AI?
91
+
92
+ ### Accuracy (30 points)
93
+ - **Grammar**: Correct sentence structures and verb forms
94
+ - **Vocabulary**: Appropriate word choices and usage
95
+ - **Pronunciation**: (If audio available) Clear pronunciation of words
96
+
97
+ ### Engagement (20 points)
98
+ - **Relevance**: Staying on topic and scenario context
99
+ - **Interaction**: Active participation and questions
100
+ - **Creativity**: Bringing personal experiences or unique responses
101
+
102
+ ### Vocabulary Usage (20 points)
103
+ - **Range**: Using diverse vocabulary from the scenario
104
+ - **Accuracy**: Correct usage of key vocabulary words
105
+ - **Complexity**: Appropriate challenge level for learner
106
+
107
+ ## Response Format Requirements
108
+
109
+ You must provide your response in the following structured format:
110
+
111
+ ### SCORE: [X/100]
112
+ Provide a single overall score out of 100.
113
+
114
+ ### STRENGTHS:
115
+ List specific strengths the learner demonstrated in the conversation.
116
+
117
+ ### AREAS FOR IMPROVEMENT:
118
+ List specific areas where the learner can improve.
119
+
120
+ ### IMPROVEMENT SUGGESTIONS:
121
+ Provide concrete, actionable suggestions for improvement with examples.
122
+
123
+ ### NEXT STEPS:
124
+ Recommend specific next steps for continued learning and practice.
125
+
126
+ ## Important Guidelines:
127
+ - **Be encouraging**: Focus on growth, not just mistakes
128
+ - **Be specific**: Give concrete examples, not vague advice
129
+ - **Be appropriate**: Match feedback complexity to learner level
130
+ - **Be actionable**: Every suggestion should be something they can practice
131
+ - **Use markdown**: Structure feedback clearly with headers and bullet points
132
+
133
+ Remember: Your goal is to help learners feel motivated while giving them clear paths to improvement. Balance honest feedback with positive reinforcement.
134
+ """,
135
+ ),
136
+ ("placeholder", "{messages}"),
137
+ ]
138
+ )
139
+ chain = evaluation_prompt_template | model.with_structured_output(ResponseFormatter)
140
+
141
+ # Call the LLM with the formatted prompt
142
+ structured_output: ResponseFormatter = await chain.ainvoke(
143
+ {
144
+ "session_id": session_id,
145
+ "learner_level": learner_level,
146
+ "scenario_title": scenario_title,
147
+ "scenario_description": scenario_description,
148
+ "key_vocabulary": key_vocabulary,
149
+ "messages": messages,
150
+ }
151
+ )
152
+
153
+ # Convert structured output to dictionary
154
+ result = {
155
+ "score": structured_output.score,
156
+ "feedback": structured_output.feedback,
157
+ "strengths": structured_output.strengths,
158
+ "improvements": structured_output.improvements,
159
+ "suggestions": structured_output.suggestions,
160
+ "next_steps": structured_output.next_steps,
161
+ }
162
+
163
+ return result
src/agents/evaluation/prompt.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ evaluation_prompt = """# CONVERSATION EVALUATOR - English Learning Assessment Specialist
2
+
3
+ You are **WISE Evaluator**, an expert English tutor who analyzes conversations between learners and AI roleplay partners. Your job is to provide comprehensive feedback that helps learners improve.
4
+
5
+ ## Evaluation Context
6
+ - **Session ID**: {session_id}
7
+ - **Learner Level**: {learner_level}
8
+ - **Scenario**: {scenario_title} - {scenario_description}
9
+ - **Key Vocabulary**: {key_vocabulary}
10
+
11
+ ## Your Evaluation Mission
12
+ 1. **Score the conversation** (0-100 scale) based on fluency, accuracy, and engagement
13
+ 2. **Identify strengths** - What did the learner do well?
14
+ 3. **Pinpoint areas for improvement** - Where can they get better?
15
+ 4. **Provide specific suggestions** - Concrete actions for improvement
16
+ 5. **Recommend next steps** - What should they practice next?
17
+
18
+ ## Scoring Criteria
19
+
20
+ ### Fluency (30 points)
21
+ - **Flow**: How naturally does the conversation progress?
22
+ - **Response time**: Are there appropriate pauses or unnatural delays?
23
+ - **Turn-taking**: Good balance of speaking between learner and AI?
24
+
25
+ ### Accuracy (30 points)
26
+ - **Grammar**: Correct sentence structures and verb forms
27
+ - **Vocabulary**: Appropriate word choices and usage
28
+ - **Pronunciation**: (If audio available) Clear pronunciation of words
29
+
30
+ ### Engagement (20 points)
31
+ - **Relevance**: Staying on topic and scenario context
32
+ - **Interaction**: Active participation and questions
33
+ - **Creativity**: Bringing personal experiences or unique responses
34
+
35
+ ### Vocabulary Usage (20 points)
36
+ - **Range**: Using diverse vocabulary from the scenario
37
+ - **Accuracy**: Correct usage of key vocabulary words
38
+ - **Complexity**: Appropriate challenge level for learner
39
+
40
+ ## Response Format Requirements
41
+
42
+ You must provide your response in the following structured format:
43
+
44
+ ### SCORE: [X/100]
45
+ Provide a single overall score out of 100.
46
+
47
+ ### STRENGTHS:
48
+ List specific strengths the learner demonstrated in the conversation.
49
+
50
+ ### AREAS FOR IMPROVEMENT:
51
+ List specific areas where the learner can improve.
52
+
53
+ ### IMPROVEMENT SUGGESTIONS:
54
+ Provide concrete, actionable suggestions for improvement with examples.
55
+
56
+ ### NEXT STEPS:
57
+ Recommend specific next steps for continued learning and practice.
58
+
59
+ ## Important Guidelines:
60
+ - **Be encouraging**: Focus on growth, not just mistakes
61
+ - **Be specific**: Give concrete examples, not vague advice
62
+ - **Be appropriate**: Match feedback complexity to learner level
63
+ - **Be actionable**: Every suggestion should be something they can practice
64
+ - **Use markdown**: Structure feedback clearly with headers and bullet points
65
+
66
+ Remember: Your goal is to help learners feel motivated while giving them clear paths to improvement. Balance honest feedback with positive reinforcement.
67
+ """
src/agents/lesson_practice/flow.py DELETED
@@ -1,41 +0,0 @@
1
- from langgraph.graph import StateGraph, START, END
2
- from .func import State, trim_history, agent, tool_node
3
- from langgraph.graph.state import CompiledStateGraph
4
- from langgraph.checkpoint.memory import InMemorySaver
5
-
6
-
7
- class LessonPracticeAgent:
8
- def __init__(self):
9
- pass
10
-
11
- @staticmethod
12
- def should_continue(state: State):
13
- messages = state["messages"]
14
- last_message = messages[-1]
15
- if not last_message.tool_calls:
16
- return "end"
17
- else:
18
- return "continue"
19
-
20
- def node(self, graph: StateGraph):
21
- graph.add_node("trim_history", trim_history)
22
- graph.add_node("agent", agent)
23
- graph.add_node("tools", tool_node)
24
- return graph
25
-
26
- def edge(self, graph: StateGraph):
27
- graph.add_edge(START, "trim_history")
28
- graph.add_edge("trim_history", "agent")
29
- graph.add_conditional_edges(
30
- "agent", self.should_continue, {"end": END, "continue": "tools"}
31
- )
32
- return graph
33
-
34
- def __call__(self, checkpointer=InMemorySaver()) -> CompiledStateGraph:
35
- graph = StateGraph(State)
36
- graph: StateGraph = self.node(graph)
37
- graph: StateGraph = self.edge(graph)
38
- return graph.compile(checkpointer=checkpointer)
39
-
40
-
41
- lesson_practice_agent = LessonPracticeAgent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/agents/lesson_practice/func.py DELETED
@@ -1,60 +0,0 @@
1
- from typing import (
2
- Annotated,
3
- Sequence,
4
- TypedDict,
5
- )
6
- from langchain_core.messages import ToolMessage, AnyMessage, RemoveMessage
7
- from langgraph.graph.message import add_messages
8
- import json
9
- from .prompt import conversation_prompt
10
- from src.config.llm import model
11
-
12
-
13
- class State(TypedDict):
14
- """The state of the agent."""
15
-
16
- unit: str
17
- vocabulary: list
18
- key_structures: list
19
- practice_questions: list
20
- student_level: list
21
- messages: Annotated[Sequence[AnyMessage], add_messages]
22
-
23
-
24
- tools = []
25
-
26
- tools_by_name = {tool.name: tool for tool in tools}
27
-
28
-
29
- def trim_history(state: State):
30
- if not state.get("active_agent"):
31
- state["active_agent"] = "Roleplay Agent"
32
- history = state.get("messages", [])
33
- if len(history) > 25:
34
- num_to_remove = len(history) - 5
35
- remove_messages = [
36
- RemoveMessage(id=history[i].id) for i in range(num_to_remove)
37
- ]
38
- state["messages"] = remove_messages
39
- return state
40
-
41
-
42
- # Define our tool node
43
- def tool_node(state: State):
44
- outputs = []
45
- for tool_call in state["messages"][-1].tool_calls:
46
- tool_result = tools_by_name[tool_call["name"]].invoke(tool_call["args"])
47
- outputs.append(
48
- ToolMessage(
49
- content=json.dumps(tool_result),
50
- name=tool_call["name"],
51
- tool_call_id=tool_call["id"],
52
- )
53
- )
54
- return {"messages": outputs}
55
-
56
-
57
- async def agent(state: State):
58
- llm = conversation_prompt | model
59
- response = await llm.ainvoke(state)
60
- return {"messages": response}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/agents/lesson_practice/prompt.py DELETED
@@ -1,94 +0,0 @@
1
- from langchain_core.prompts import ChatPromptTemplate
2
- from src.config.llm import model
3
-
4
- conversation_prompt = ChatPromptTemplate.from_messages(
5
- [
6
- (
7
- "system",
8
- """# English Practice Agent - Adaptive & Personal
9
-
10
- ## Context Data
11
- ```
12
- UNIT: {unit}
13
- VOCABULARY: {vocabulary}
14
- KEY STRUCTURES: {key_structures}
15
- PRACTICE QUESTIONS: {practice_questions}
16
- STUDENT LEVEL: {student_level}
17
- ```
18
-
19
- ## Core Rules (Priority Order)
20
-
21
- ### 1. Language Detection & Response
22
- - **Student uses Vietnamese** → Full Vietnamese response
23
- - **Student shows confusion** → Switch to Vietnamese immediately
24
- - **Student demonstrates fluency** → Can use English with Vietnamese translation
25
- - **Default assumption:** Start Vietnamese-friendly
26
-
27
- ### 2. Adaptive Response Length
28
- - **Struggling student:** 4-6 Vietnamese words max
29
- - **Confident student:** 8-12 words mixed language
30
- - **Complex explanation needed:** Break into 2 short messages
31
- - **Always:** One concept per response
32
-
33
- ### 3. Smart Error Handling
34
- - **1st mistake:** "Thử lại: [correct form] (meaning)"
35
- - **2nd mistake:** Give answer, move on smoothly
36
- - **Track:** One grammar point at a time
37
-
38
- ## Teaching Intelligence
39
-
40
- ### Emotion Detection & Response
41
- **Student signals frustrated/confused:**
42
- - Switch to Vietnamese comfort mode
43
- - Simplify current task
44
- - Ask about their interests to re-engage
45
-
46
- **Student signals confident:**
47
- - Introduce gentle challenges
48
- - Mix practice formats
49
- - Maintain momentum
50
-
51
- **Student signals bored:**
52
- - Change practice type immediately
53
- - Connect to personal interests
54
- - Add variety
55
-
56
- ### Practice Formats (Rotate Based on Mood)
57
- - **Contextual:** Give situation → create sentence
58
- - **Word ordering:** Scrambled words → arrange correctly
59
- - **Fill blanks:** Complete the sentence
60
- - **Translation:** English sentence + immediate Vietnamese meaning
61
-
62
- ### Personalization
63
- - **Remember:** Student's interests, struggles, preferences
64
- - **Adapt examples:** Use their hobbies/life context
65
- - **Track progress:** What they've mastered vs still learning
66
- - **Celebrate:** Acknowledge improvements specifically
67
-
68
- ## Response Framework
69
-
70
- ### Micro-Responses for Different Situations
71
- **Correct answer:** "Đúng!" + next step
72
- **Close attempt:** "Gần rồi! [correction]"
73
- **Wrong twice:** "[Answer]. Câu khác nhé!"
74
- **Confusion:** "Không hiểu? Em giải thích khác."
75
- **Good progress:** "Tiến bộ rồi!"
76
-
77
- ### Session Management
78
- - **Focus:** 1 grammar pattern per conversation
79
- - **Duration:** Keep exchanges short and engaging
80
- - **Goal clarity:** Tell student what they're practicing
81
- - **Closure:** Summarize what they learned today
82
-
83
- ## Technical Notes
84
- - **Translation rule:** Every English sentence → (Vietnamese meaning)
85
- - **Context awareness:** Remember previous exchanges in conversation
86
- - **Flexibility:** Adjust based on real-time student performance
87
- - **Encouragement:** Always end on positive note
88
- """,
89
- ),
90
- ("placeholder", "{messages}"),
91
- ]
92
- )
93
-
94
- conversation_chain = conversation_prompt | model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/agents/lesson_practice/tools.py DELETED
@@ -1,16 +0,0 @@
1
- from langchain_core.tools import tool
2
- from loguru import logger
3
-
4
-
5
- @tool
6
- def function_name(
7
- input: str,
8
- ) -> str:
9
- """
10
- Mô tả chức năng của hàm này.
11
- """
12
- logger.info(f"Received input: {input}")
13
- # Thực hiện các thao tác cần thiết với input
14
- result = f"Processed: {input}"
15
- logger.info(f"Returning result: {result}")
16
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/agents/role_play/__pycache__/func.cpython-311.pyc CHANGED
Binary files a/src/agents/role_play/__pycache__/func.cpython-311.pyc and b/src/agents/role_play/__pycache__/func.cpython-311.pyc differ
 
src/agents/role_play/__pycache__/prompt.cpython-311.pyc CHANGED
Binary files a/src/agents/role_play/__pycache__/prompt.cpython-311.pyc and b/src/agents/role_play/__pycache__/prompt.cpython-311.pyc differ
 
src/agents/role_play/func.py CHANGED
@@ -4,6 +4,7 @@ from langgraph.prebuilt import create_react_agent
4
  from langgraph_swarm import create_handoff_tool
5
  from langchain_core.messages import RemoveMessage
6
  from .prompt import roleplay_prompt, guiding_prompt
 
7
  from typing_extensions import TypedDict, Annotated
8
  from langchain_core.messages import AnyMessage
9
  from langgraph.graph import add_messages
@@ -35,6 +36,7 @@ def trim_history(state: State):
35
 
36
  async def call_roleplay(state: State):
37
  logger.info("Calling roleplay agent...")
 
38
  roleplay_agent = create_react_agent(
39
  model,
40
  [
@@ -42,6 +44,7 @@ async def call_roleplay(state: State):
42
  agent_name="Guiding Agent",
43
  description="Hand off to Guiding Agent when user shows signs of needing help, guidance, or struggles with communication",
44
  ),
 
45
  ],
46
  prompt=roleplay_prompt.format(
47
  scenario_title=state["scenario_title"],
@@ -59,6 +62,7 @@ async def call_roleplay(state: State):
59
 
60
  async def call_guiding_agent(state: State):
61
  logger.info("Calling guiding agent...")
 
62
  guiding_agent = create_react_agent(
63
  model,
64
  [
@@ -66,6 +70,7 @@ async def call_guiding_agent(state: State):
66
  agent_name="Roleplay Agent",
67
  description="Hand off back to Roleplay Agent when user is ready for scenario practice and shows improved confidence",
68
  ),
 
69
  ],
70
  prompt=guiding_prompt.format(
71
  scenario_title=state["scenario_title"],
@@ -77,6 +82,7 @@ async def call_guiding_agent(state: State):
77
  name="Guiding Agent",
78
  )
79
  response = await guiding_agent.ainvoke({"messages": state["messages"]})
 
80
  return {"messages": response["messages"]}
81
 
82
 
 
4
  from langgraph_swarm import create_handoff_tool
5
  from langchain_core.messages import RemoveMessage
6
  from .prompt import roleplay_prompt, guiding_prompt
7
+ from .tools import create_end_conversation_tool
8
  from typing_extensions import TypedDict, Annotated
9
  from langchain_core.messages import AnyMessage
10
  from langgraph.graph import add_messages
 
36
 
37
  async def call_roleplay(state: State):
38
  logger.info("Calling roleplay agent...")
39
+
40
  roleplay_agent = create_react_agent(
41
  model,
42
  [
 
44
  agent_name="Guiding Agent",
45
  description="Hand off to Guiding Agent when user shows signs of needing help, guidance, or struggles with communication",
46
  ),
47
+ create_end_conversation_tool(),
48
  ],
49
  prompt=roleplay_prompt.format(
50
  scenario_title=state["scenario_title"],
 
62
 
63
  async def call_guiding_agent(state: State):
64
  logger.info("Calling guiding agent...")
65
+
66
  guiding_agent = create_react_agent(
67
  model,
68
  [
 
70
  agent_name="Roleplay Agent",
71
  description="Hand off back to Roleplay Agent when user is ready for scenario practice and shows improved confidence",
72
  ),
73
+ create_end_conversation_tool(),
74
  ],
75
  prompt=guiding_prompt.format(
76
  scenario_title=state["scenario_title"],
 
82
  name="Guiding Agent",
83
  )
84
  response = await guiding_agent.ainvoke({"messages": state["messages"]})
85
+
86
  return {"messages": response["messages"]}
87
 
88
 
src/agents/role_play/prompt.py CHANGED
@@ -94,6 +94,22 @@ Stay true to {your_role}:
94
  - User asks questions or shares opinions
95
  - Natural conversation flow develops
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  Remember: You're a real person in your role who's patient with English learners. **Stay true to {your_role} personality and speaking style.** **Keep responses under 15 words typically.** **Use markdown formatting for readability - no icons.** Let them explore topics through questions rather than long explanations.
98
  """
99
 
@@ -260,6 +276,23 @@ Brief Vietnamese + English bridge back to roleplay with confidence.
260
  ### When User Needs Structure:
261
  Offer practice options in Vietnamese (under 25 words) + "What would you like to practice?"
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  Remember: **I'm WISE, your friendly language safety net!** **NEVER exceed 40 words in any response.** Keep it brief, encouraging, always end with open questions. **Use Vietnamese when they need it.** **Use concise markdown formatting - no icons.** Quick help + confidence building + back to practice!
264
 
265
- """
 
94
  - User asks questions or shares opinions
95
  - Natural conversation flow develops
96
 
97
+ ## CONVERSATION TERMINATION
98
+ End the conversation naturally when:
99
+ - The user says goodbye, thanks, or indicates they're done (e.g., "Thanks, that's all", "Goodbye", "See you later")
100
+ - The scenario's objectives have been fully explored and the user shows satisfaction
101
+ - The user explicitly states they want to end the conversation
102
+ - After a natural conclusion to the scenario's storyline
103
+
104
+ When ending the conversation:
105
+ 1. Acknowledge what was discussed
106
+ 2. Provide a natural closing statement
107
+ 3. Thank the user for the conversation
108
+
109
+ You have access to a special tool to end conversations naturally:
110
+ - Use the "end_conversation" tool when the conversation has reached its natural conclusion
111
+ - Provide a reason for ending the conversation (e.g., "User said goodbye", "Scenario completed")
112
+
113
  Remember: You're a real person in your role who's patient with English learners. **Stay true to {your_role} personality and speaking style.** **Keep responses under 15 words typically.** **Use markdown formatting for readability - no icons.** Let them explore topics through questions rather than long explanations.
114
  """
115
 
 
276
  ### When User Needs Structure:
277
  Offer practice options in Vietnamese (under 25 words) + "What would you like to practice?"
278
 
279
+ ## CONVERSATION TERMINATION
280
+ End the conversation naturally when:
281
+ - The user says goodbye, thanks, or indicates they're done (e.g., "Thanks, that's all", "Goodbye", "See you later")
282
+ - The learning objectives have been met and the user shows satisfaction
283
+ - The user explicitly states they want to end the conversation
284
+ - After successfully completing a practice activity and the user shows confidence
285
+
286
+ When ending the conversation:
287
+ 1. Acknowledge their progress and efforts
288
+ 2. Provide a natural closing statement
289
+ 3. Thank the user for the interaction
290
+ 4. Encourage them to practice more English
291
+
292
+ You have access to a special tool to end conversations naturally:
293
+ - Use the "end_conversation" tool when the conversation has reached its natural conclusion
294
+ - Provide a reason for ending the conversation (e.g., "User completed practice", "User said goodbye")
295
+
296
  Remember: **I'm WISE, your friendly language safety net!** **NEVER exceed 40 words in any response.** Keep it brief, encouraging, always end with open questions. **Use Vietnamese when they need it.** **Use concise markdown formatting - no icons.** Quick help + confidence building + back to practice!
297
 
298
+ """
src/agents/role_play/tools.py CHANGED
@@ -1,7 +1,59 @@
1
  from langchain_core.tools import tool
 
 
 
 
2
  from loguru import logger
3
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  @tool
6
  def function_name(
7
  input: str,
 
1
  from langchain_core.tools import tool
2
+ from langchain_core.messages import ToolMessage
3
+ from langgraph.types import Command
4
+ from typing import Annotated, Any
5
+ from langgraph_swarm.handoff import _get_field, InjectedState
6
  from loguru import logger
7
 
8
 
9
+ def create_end_conversation_tool(
10
+ name: str = "end_conversation",
11
+ description: str = "End the conversation naturally when the discussion has reached its conclusion or the user indicates they want to stop",
12
+ ) -> callable:
13
+ """Create a tool that can end the conversation.
14
+
15
+ Args:
16
+ name: Name of the tool to use for ending the conversation.
17
+ description: Description for the end conversation tool.
18
+
19
+ Returns:
20
+ A tool that when called will end the conversation.
21
+ """
22
+
23
+ @tool(name, description=description)
24
+ def end_conversation(
25
+ reason: str,
26
+ # Annotation is typed as Any instead of StateLike. StateLike
27
+ # trigger validation issues from Pydantic / langchain_core interaction.
28
+ # https://github.com/langchain-ai/langchain/issues/32067
29
+ state: Annotated[Any, InjectedState],
30
+ tool_call_id: str,
31
+ ) -> Command:
32
+ """End the conversation with the given reason.
33
+
34
+ Args:
35
+ reason: The reason for ending the conversation
36
+ """
37
+ logger.info(f"Conversation ended. Reason: {reason}")
38
+
39
+ tool_message = ToolMessage(
40
+ content=f"Conversation ended: {reason}",
41
+ name=name,
42
+ tool_call_id=tool_call_id,
43
+ )
44
+
45
+ # Return a Command that goes to END node
46
+ return Command(
47
+ # goto="END",
48
+ graph=Command.PARENT,
49
+ update={
50
+ "messages": [*_get_field(state, "messages"), tool_message],
51
+ },
52
+ )
53
+
54
+ return end_conversation
55
+
56
+
57
  @tool
58
  def function_name(
59
  input: str,
src/apis/__pycache__/create_app.cpython-311.pyc CHANGED
Binary files a/src/apis/__pycache__/create_app.cpython-311.pyc and b/src/apis/__pycache__/create_app.cpython-311.pyc differ
 
src/apis/create_app.py CHANGED
@@ -3,11 +3,13 @@ from fastapi.middleware.cors import CORSMiddleware
3
  from src.apis.routes.user_route import router as router_user
4
  from src.apis.routes.chat_route import router as router_chat
5
  from src.apis.routes.lesson_route import router as router_lesson
 
6
 
7
  api_router = APIRouter(prefix="/api")
8
  api_router.include_router(router_user)
9
  api_router.include_router(router_chat)
10
  api_router.include_router(router_lesson)
 
11
 
12
 
13
  def create_app():
@@ -21,4 +23,4 @@ def create_app():
21
  allow_headers=["*"],
22
  )
23
 
24
- return app
 
3
  from src.apis.routes.user_route import router as router_user
4
  from src.apis.routes.chat_route import router as router_chat
5
  from src.apis.routes.lesson_route import router as router_lesson
6
+ from src.apis.routes.evaluation_route import router as router_evaluation
7
 
8
  api_router = APIRouter(prefix="/api")
9
  api_router.include_router(router_user)
10
  api_router.include_router(router_chat)
11
  api_router.include_router(router_lesson)
12
+ api_router.include_router(router_evaluation)
13
 
14
 
15
  def create_app():
 
23
  allow_headers=["*"],
24
  )
25
 
26
+ return app
src/apis/routes/__pycache__/chat_route.cpython-311.pyc CHANGED
Binary files a/src/apis/routes/__pycache__/chat_route.cpython-311.pyc and b/src/apis/routes/__pycache__/chat_route.cpython-311.pyc differ
 
src/apis/routes/chat_route.py CHANGED
@@ -149,7 +149,7 @@ async def roleplay_stream(
149
  ):
150
  """Send a message (text or audio) to the roleplay agent with streaming response"""
151
  logger.info(f"Received streaming roleplay request: {session_id}")
152
-
153
  # Validate that at least one input is provided
154
  if not text_message and not audio_file:
155
  raise HTTPException(
@@ -217,6 +217,7 @@ async def roleplay_stream(
217
  async def generate_stream():
218
  """Generator function for streaming responses"""
219
  accumulated_content = ""
 
220
  try:
221
  input_graph = {
222
  "messages": [message],
@@ -230,7 +231,7 @@ async def roleplay_stream(
230
 
231
  async for event in role_play_agent().astream(
232
  input=input_graph,
233
- stream_mode=["messages"],
234
  config=config,
235
  subgraphs=True,
236
  ):
@@ -239,56 +240,79 @@ async def roleplay_stream(
239
  # message_chunk is a tuple, get the first element which is the actual AIMessageChunk
240
  if isinstance(message_chunk, tuple) and len(message_chunk) > 0:
241
  actual_message = message_chunk[0]
242
- content = getattr(actual_message, 'content', '')
243
  else:
244
  actual_message = message_chunk
245
- content = getattr(message_chunk, 'content', '')
246
-
247
- if content:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  # Accumulate content for TTS
249
  accumulated_content += content
250
-
251
  # Create SSE-formatted response
252
  response_data = {
253
  "type": "message_chunk",
254
  "content": content,
255
  "metadata": {
256
- "agent": getattr(actual_message, 'name', 'unknown'),
257
- "id": getattr(actual_message, 'id', ''),
258
- "usage_metadata": getattr(actual_message, 'usage_metadata', {})
259
- }
 
 
260
  }
261
  yield f"data: {json.dumps(response_data)}\n\n"
262
-
263
  # Small delay to prevent overwhelming the client
264
  await asyncio.sleep(0.01)
265
-
266
- # Generate TTS audio if requested
267
- audio_data = None
268
- if audio and accumulated_content.strip():
269
- try:
270
- logger.info(f"Generating TTS for accumulated content: {len(accumulated_content)} chars")
271
- audio_result = await tts_service.text_to_speech(accumulated_content)
272
- if audio_result:
273
- audio_data = {
274
- "audio_data": audio_result["audio_data"],
275
- "mime_type": audio_result["mime_type"],
276
- "format": audio_result["format"]
277
- }
278
- logger.info("TTS audio generated successfully")
279
- else:
280
- logger.warning("TTS generation failed")
281
- except Exception as tts_error:
282
- logger.error(f"TTS generation error: {str(tts_error)}")
283
-
284
- # Send completion signal with optional audio
285
- completion_data = {
286
- "type": "completion",
287
- "content": "",
288
- "audio": audio_data
289
- }
290
- yield f"data: {json.dumps(completion_data)}\n\n"
291
-
292
  except Exception as e:
293
  logger.error(f"Error in streaming roleplay: {str(e)}")
294
  error_data = {"type": "error", "content": str(e)}
 
149
  ):
150
  """Send a message (text or audio) to the roleplay agent with streaming response"""
151
  logger.info(f"Received streaming roleplay request: {session_id}")
152
+
153
  # Validate that at least one input is provided
154
  if not text_message and not audio_file:
155
  raise HTTPException(
 
217
  async def generate_stream():
218
  """Generator function for streaming responses"""
219
  accumulated_content = ""
220
+ conversation_ended = False
221
  try:
222
  input_graph = {
223
  "messages": [message],
 
231
 
232
  async for event in role_play_agent().astream(
233
  input=input_graph,
234
+ stream_mode=["messages", "values"],
235
  config=config,
236
  subgraphs=True,
237
  ):
 
240
  # message_chunk is a tuple, get the first element which is the actual AIMessageChunk
241
  if isinstance(message_chunk, tuple) and len(message_chunk) > 0:
242
  actual_message = message_chunk[0]
243
+ content = getattr(actual_message, "content", "")
244
  else:
245
  actual_message = message_chunk
246
+ content = getattr(message_chunk, "content", "")
247
+
248
+ # Check if this is a tool call message and if it's an end conversation tool call
249
+ if (
250
+ hasattr(actual_message, "tool_calls")
251
+ and actual_message.tool_calls
252
+ ):
253
+ # Check if any tool call is for ending the conversation
254
+ for tool_call in actual_message.tool_calls:
255
+ if (
256
+ isinstance(tool_call, dict)
257
+ and tool_call.get("name") == "end_conversation"
258
+ ):
259
+ # Send a special termination message to the client
260
+ termination_data = {
261
+ "type": "termination",
262
+ "content": "Conversation ended",
263
+ "reason": tool_call.get("args", {}).get("reason", "Unknown reason")
264
+ }
265
+ yield f"data: {json.dumps(termination_data)}\n\n"
266
+ conversation_ended = True
267
+ break
268
+
269
+ if content and not conversation_ended:
270
  # Accumulate content for TTS
271
  accumulated_content += content
272
+
273
  # Create SSE-formatted response
274
  response_data = {
275
  "type": "message_chunk",
276
  "content": content,
277
  "metadata": {
278
+ "agent": getattr(actual_message, "name", "unknown"),
279
+ "id": getattr(actual_message, "id", ""),
280
+ "usage_metadata": getattr(
281
+ actual_message, "usage_metadata", {}
282
+ ),
283
+ },
284
  }
285
  yield f"data: {json.dumps(response_data)}\n\n"
286
+
287
  # Small delay to prevent overwhelming the client
288
  await asyncio.sleep(0.01)
289
+
290
+ # Only send completion signal if conversation wasn't ended by tool call
291
+ if not conversation_ended:
292
+ # Generate TTS audio if requested
293
+ audio_data = None
294
+ if audio and accumulated_content.strip():
295
+ try:
296
+ logger.info(
297
+ f"Generating TTS for accumulated content: {len(accumulated_content)} chars"
298
+ )
299
+ audio_result = await tts_service.text_to_speech(accumulated_content)
300
+ if audio_result:
301
+ audio_data = {
302
+ "audio_data": audio_result["audio_data"],
303
+ "mime_type": audio_result["mime_type"],
304
+ "format": audio_result["format"],
305
+ }
306
+ logger.info("TTS audio generated successfully")
307
+ else:
308
+ logger.warning("TTS generation failed")
309
+ except Exception as tts_error:
310
+ logger.error(f"TTS generation error: {str(tts_error)}")
311
+
312
+ # Send completion signal with optional audio
313
+ completion_data = {"type": "completion", "content": "", "audio": audio_data}
314
+ yield f"data: {json.dumps(completion_data)}\n\n"
315
+
316
  except Exception as e:
317
  logger.error(f"Error in streaming roleplay: {str(e)}")
318
  error_data = {"type": "error", "content": str(e)}
src/apis/routes/evaluation_route.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, status, HTTPException
2
+ from pydantic import BaseModel
3
+ from src.agents.evaluation.agent import evaluate_conversation
4
+ from src.utils.logger import logger
5
+ from typing import Optional
6
+
7
+ router = APIRouter(prefix="/evaluation", tags=["Evaluation"])
8
+
9
+
10
+ class EvaluationRequest(BaseModel):
11
+ session_id: str
12
+ learner_level: str = "beginner"
13
+ scenario_title: Optional[str] = ""
14
+ scenario_description: Optional[str] = ""
15
+ key_vocabulary: Optional[str] = ""
16
+
17
+
18
+ class EvaluationResponse(BaseModel):
19
+ score: int
20
+ feedback: str
21
+ strengths: list
22
+ improvements: list
23
+ suggestions: list
24
+ next_steps: list
25
+
26
+
27
+ @router.post("/conversation", response_model=EvaluationResponse, status_code=status.HTTP_200_OK)
28
+ async def evaluate_conversation_endpoint(request: EvaluationRequest):
29
+ """Evaluate a conversation and provide feedback"""
30
+ logger.info(f"Received evaluation request for session: {request.session_id}")
31
+
32
+ try:
33
+ result = await evaluate_conversation(
34
+ session_id=request.session_id,
35
+ learner_level=request.learner_level,
36
+ scenario_title=request.scenario_title or "",
37
+ scenario_description=request.scenario_description or "",
38
+ key_vocabulary=request.key_vocabulary or ""
39
+ )
40
+
41
+ return EvaluationResponse(**result)
42
+
43
+ except Exception as e:
44
+ logger.error(f"Error in conversation evaluation: {str(e)}")
45
+ raise HTTPException(
46
+ status_code=500,
47
+ detail=f"Failed to evaluate conversation: {str(e)}"
48
+ )
src/apis/routes/lesson_route.py CHANGED
@@ -13,7 +13,6 @@ from src.utils.logger import logger
13
  from src.services.tts_service import tts_service
14
  from pydantic import BaseModel, Field
15
  from typing import List, Dict, Any, Optional
16
- from src.agents.lesson_practice.flow import lesson_practice_agent
17
  from src.agents.lesson_practice_2.flow import lesson_practice_2_agent
18
  from src.apis.models.lesson_models import Lesson, LessonResponse, LessonDetailResponse
19
  import json
@@ -42,15 +41,17 @@ class LessonPracticeRequest(BaseModel):
42
  def load_lessons_from_file() -> List[Lesson]:
43
  """Load lessons from the JSON file"""
44
  try:
45
- lessons_file_path = os.path.join(os.path.dirname(__file__), "..", "..", "data", "lessons.json")
46
-
 
 
47
  if not os.path.exists(lessons_file_path):
48
  logger.warning(f"Lessons file not found at {lessons_file_path}")
49
  return []
50
-
51
- with open(lessons_file_path, 'r', encoding='utf-8') as file:
52
  lessons_data = json.load(file)
53
-
54
  # Convert to Lesson objects
55
  lessons = []
56
  for lesson_data in lessons_data:
@@ -58,9 +59,11 @@ def load_lessons_from_file() -> List[Lesson]:
58
  lesson = Lesson(**lesson_data)
59
  lessons.append(lesson)
60
  except Exception as e:
61
- logger.error(f"Error parsing lesson {lesson_data.get('id', 'unknown')}: {str(e)}")
 
 
62
  continue
63
-
64
  return lessons
65
  except Exception as e:
66
  logger.error(f"Error loading lessons: {str(e)}")
@@ -71,22 +74,19 @@ def load_lessons_from_file() -> List[Lesson]:
71
  async def get_all_lessons():
72
  """
73
  Get all available lessons
74
-
75
  Returns:
76
  LessonResponse: Contains list of all lessons and total count
77
  """
78
  try:
79
  lessons = load_lessons_from_file()
80
-
81
- return LessonResponse(
82
- lessons=lessons,
83
- total=len(lessons)
84
- )
85
  except Exception as e:
86
  logger.error(f"Error retrieving lessons: {str(e)}")
87
  raise HTTPException(
88
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
89
- detail="Failed to retrieve lessons"
90
  )
91
 
92
 
@@ -94,25 +94,25 @@ async def get_all_lessons():
94
  async def get_lesson_by_id(lesson_id: str):
95
  """
96
  Get a specific lesson by ID
97
-
98
  Args:
99
  lesson_id (str): The unique identifier of the lesson
100
-
101
  Returns:
102
  LessonDetailResponse: Contains the lesson details
103
  """
104
  try:
105
  lessons = load_lessons_from_file()
106
-
107
  # Find the lesson with the specified ID
108
  lesson = next((l for l in lessons if l.id == lesson_id), None)
109
-
110
  if not lesson:
111
  raise HTTPException(
112
  status_code=status.HTTP_404_NOT_FOUND,
113
- detail=f"Lesson with ID '{lesson_id}' not found"
114
  )
115
-
116
  return LessonDetailResponse(lesson=lesson)
117
  except HTTPException:
118
  raise
@@ -120,7 +120,7 @@ async def get_lesson_by_id(lesson_id: str):
120
  logger.error(f"Error retrieving lesson {lesson_id}: {str(e)}")
121
  raise HTTPException(
122
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
123
- detail="Failed to retrieve lesson"
124
  )
125
 
126
 
@@ -128,31 +128,27 @@ async def get_lesson_by_id(lesson_id: str):
128
  async def search_lessons_by_unit(unit_name: str):
129
  """
130
  Search lessons by unit name (case-insensitive partial match)
131
-
132
  Args:
133
  unit_name (str): Part of the unit name to search for
134
-
135
  Returns:
136
  LessonResponse: Contains list of matching lessons
137
  """
138
  try:
139
  lessons = load_lessons_from_file()
140
-
141
  # Filter lessons by unit name (case-insensitive partial match)
142
  matching_lessons = [
143
- lesson for lesson in lessons
144
- if unit_name.lower() in lesson.unit.lower()
145
  ]
146
-
147
- return LessonResponse(
148
- lessons=matching_lessons,
149
- total=len(matching_lessons)
150
- )
151
  except Exception as e:
152
  logger.error(f"Error searching lessons by unit '{unit_name}': {str(e)}")
153
  raise HTTPException(
154
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
155
- detail="Failed to search lessons"
156
  )
157
 
158
 
@@ -161,110 +157,7 @@ async def chat(
161
  session_id: str = Form(
162
  ..., description="Session ID for tracking user interactions"
163
  ),
164
- lesson_data: str = Form(
165
- ..., description="The lesson data as JSON string"
166
- ),
167
- text_message: Optional[str] = Form(None, description="Text message from user"),
168
- audio_file: Optional[UploadFile] = File(None, description="Audio file from user"),
169
- ):
170
- """Send a message (text or audio) to the lesson practice agent"""
171
-
172
- # Validate that at least one input is provided
173
- if not text_message and not audio_file:
174
- raise HTTPException(
175
- status_code=400, detail="Either text_message or audio_file must be provided"
176
- )
177
-
178
- # Parse lesson data from JSON string
179
- try:
180
- lesson_dict = json.loads(lesson_data)
181
- except json.JSONDecodeError:
182
- raise HTTPException(status_code=400, detail="Invalid lesson_data JSON format")
183
-
184
- if not lesson_dict:
185
- raise HTTPException(status_code=400, detail="Lesson data not provided")
186
-
187
- # Prepare message content
188
- message_content = []
189
-
190
- # Handle text input
191
- if text_message:
192
- message_content.append({"type": "text", "text": text_message})
193
-
194
- # Handle audio input
195
- if audio_file:
196
- try:
197
- # Read audio file content
198
- audio_data = await audio_file.read()
199
-
200
- # Convert to base64
201
- audio_base64 = base64.b64encode(audio_data).decode("utf-8")
202
-
203
- # Determine mime type based on file extension
204
- file_extension = (
205
- audio_file.filename.split(".")[-1].lower()
206
- if audio_file.filename
207
- else "wav"
208
- )
209
- mime_type_map = {
210
- "wav": "audio/wav",
211
- "mp3": "audio/mpeg",
212
- "ogg": "audio/ogg",
213
- "webm": "audio/webm",
214
- "m4a": "audio/mp4",
215
- }
216
- mime_type = mime_type_map.get(file_extension, "audio/wav")
217
-
218
- message_content.append(
219
- {
220
- "type": "audio",
221
- "source_type": "base64",
222
- "data": audio_base64,
223
- "mime_type": mime_type,
224
- }
225
- )
226
-
227
- except Exception as e:
228
- logger.error(f"Error processing audio file: {str(e)}")
229
- raise HTTPException(
230
- status_code=400, detail=f"Error processing audio file: {str(e)}"
231
- )
232
-
233
- # Create message in the required format
234
- message = {"role": "user", "content": message_content}
235
-
236
- try:
237
- response = await lesson_practice_agent().ainvoke(
238
- {
239
- "messages": [message],
240
- "unit": lesson_dict.get("unit", ""),
241
- "vocabulary": lesson_dict.get("vocabulary", []),
242
- "key_structures": lesson_dict.get("key_structures", []),
243
- "practice_questions": lesson_dict.get("practice_questions", []),
244
- "student_level": lesson_dict.get("student_level", "beginner"),
245
- },
246
- {"configurable": {"thread_id": session_id}},
247
- )
248
-
249
- # Extract AI response content
250
- ai_response = response["messages"][-1].content
251
- logger.info(f"AI response: {ai_response}")
252
-
253
- return JSONResponse(content={"response": ai_response})
254
-
255
- except Exception as e:
256
- logger.error(f"Error in lesson practice: {str(e)}")
257
- raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
258
-
259
-
260
- @router.post("/chat_v2")
261
- async def chat_v2(
262
- session_id: str = Form(
263
- ..., description="Session ID for tracking user interactions"
264
- ),
265
- lesson_data: str = Form(
266
- ..., description="The lesson data as JSON string"
267
- ),
268
  text_message: Optional[str] = Form(None, description="Text message from user"),
269
  audio_file: Optional[UploadFile] = File(None, description="Audio file from user"),
270
  ):
@@ -346,7 +239,7 @@ async def chat_v2(
346
  },
347
  {"configurable": {"thread_id": session_id}},
348
  )
349
-
350
  # Extract AI response content
351
  ai_response = response["messages"][-1].content
352
  logger.info(f"AI response (v2): {ai_response}")
@@ -358,21 +251,19 @@ async def chat_v2(
358
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
359
 
360
 
361
- @router.post("/chat_v2/stream", status_code=status.HTTP_200_OK)
362
- async def chat_v2_stream(
363
  session_id: str = Form(
364
  ..., description="Session ID for tracking user interactions"
365
  ),
366
- lesson_data: str = Form(
367
- ..., description="The lesson data as JSON string"
368
- ),
369
  text_message: Optional[str] = Form(None, description="Text message from user"),
370
  audio_file: Optional[UploadFile] = File(None, description="Audio file from user"),
371
  audio: bool = Form(False, description="Whether to return TTS audio response"),
372
  ):
373
  """Send a message (text or audio) to the lesson practice v2 agent with streaming response"""
374
  logger.info(f"Received streaming lesson practice v2 request: {session_id}")
375
-
376
  # Validate that at least one input is provided
377
  if not text_message and not audio_file:
378
  raise HTTPException(
@@ -462,56 +353,56 @@ async def chat_v2_stream(
462
  # message_chunk is a tuple, get the first element which is the actual AIMessageChunk
463
  if isinstance(message_chunk, tuple) and len(message_chunk) > 0:
464
  actual_message = message_chunk[0]
465
- content = getattr(actual_message, 'content', '')
466
  else:
467
  actual_message = message_chunk
468
- content = getattr(message_chunk, 'content', '')
469
-
470
  if content:
471
  # Accumulate content for TTS
472
  accumulated_content += content
473
-
474
  # Create SSE-formatted response
475
  response_data = {
476
  "type": "message_chunk",
477
  "content": content,
478
  "metadata": {
479
- "agent": getattr(actual_message, 'name', 'unknown'),
480
- "id": getattr(actual_message, 'id', ''),
481
- "usage_metadata": getattr(actual_message, 'usage_metadata', {})
482
- }
 
 
483
  }
484
  yield f"data: {json.dumps(response_data)}\n\n"
485
-
486
  # Small delay to prevent overwhelming the client
487
  await asyncio.sleep(0.01)
488
-
489
  # Generate TTS audio if requested
490
  audio_data = None
491
  if audio and accumulated_content.strip():
492
  try:
493
- logger.info(f"Generating TTS for lesson v2 content: {len(accumulated_content)} chars")
 
 
494
  audio_result = await tts_service.text_to_speech(accumulated_content)
495
  if audio_result:
496
  audio_data = {
497
  "audio_data": audio_result["audio_data"],
498
  "mime_type": audio_result["mime_type"],
499
- "format": audio_result["format"]
500
  }
501
  logger.info("Lesson v2 TTS audio generated successfully")
502
  else:
503
  logger.warning("Lesson v2 TTS generation failed")
504
  except Exception as tts_error:
505
  logger.error(f"Lesson v2 TTS generation error: {str(tts_error)}")
506
-
507
  # Send completion signal with optional audio
508
- completion_data = {
509
- "type": "completion",
510
- "content": "",
511
- "audio": audio_data
512
- }
513
  yield f"data: {json.dumps(completion_data)}\n\n"
514
-
515
  except Exception as e:
516
  logger.error(f"Error in streaming lesson practice v2: {str(e)}")
517
  error_data = {"type": "error", "content": str(e)}
 
13
  from src.services.tts_service import tts_service
14
  from pydantic import BaseModel, Field
15
  from typing import List, Dict, Any, Optional
 
16
  from src.agents.lesson_practice_2.flow import lesson_practice_2_agent
17
  from src.apis.models.lesson_models import Lesson, LessonResponse, LessonDetailResponse
18
  import json
 
41
  def load_lessons_from_file() -> List[Lesson]:
42
  """Load lessons from the JSON file"""
43
  try:
44
+ lessons_file_path = os.path.join(
45
+ os.path.dirname(__file__), "..", "..", "data", "lessons.json"
46
+ )
47
+
48
  if not os.path.exists(lessons_file_path):
49
  logger.warning(f"Lessons file not found at {lessons_file_path}")
50
  return []
51
+
52
+ with open(lessons_file_path, "r", encoding="utf-8") as file:
53
  lessons_data = json.load(file)
54
+
55
  # Convert to Lesson objects
56
  lessons = []
57
  for lesson_data in lessons_data:
 
59
  lesson = Lesson(**lesson_data)
60
  lessons.append(lesson)
61
  except Exception as e:
62
+ logger.error(
63
+ f"Error parsing lesson {lesson_data.get('id', 'unknown')}: {str(e)}"
64
+ )
65
  continue
66
+
67
  return lessons
68
  except Exception as e:
69
  logger.error(f"Error loading lessons: {str(e)}")
 
74
  async def get_all_lessons():
75
  """
76
  Get all available lessons
77
+
78
  Returns:
79
  LessonResponse: Contains list of all lessons and total count
80
  """
81
  try:
82
  lessons = load_lessons_from_file()
83
+
84
+ return LessonResponse(lessons=lessons, total=len(lessons))
 
 
 
85
  except Exception as e:
86
  logger.error(f"Error retrieving lessons: {str(e)}")
87
  raise HTTPException(
88
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
89
+ detail="Failed to retrieve lessons",
90
  )
91
 
92
 
 
94
  async def get_lesson_by_id(lesson_id: str):
95
  """
96
  Get a specific lesson by ID
97
+
98
  Args:
99
  lesson_id (str): The unique identifier of the lesson
100
+
101
  Returns:
102
  LessonDetailResponse: Contains the lesson details
103
  """
104
  try:
105
  lessons = load_lessons_from_file()
106
+
107
  # Find the lesson with the specified ID
108
  lesson = next((l for l in lessons if l.id == lesson_id), None)
109
+
110
  if not lesson:
111
  raise HTTPException(
112
  status_code=status.HTTP_404_NOT_FOUND,
113
+ detail=f"Lesson with ID '{lesson_id}' not found",
114
  )
115
+
116
  return LessonDetailResponse(lesson=lesson)
117
  except HTTPException:
118
  raise
 
120
  logger.error(f"Error retrieving lesson {lesson_id}: {str(e)}")
121
  raise HTTPException(
122
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
123
+ detail="Failed to retrieve lesson",
124
  )
125
 
126
 
 
128
  async def search_lessons_by_unit(unit_name: str):
129
  """
130
  Search lessons by unit name (case-insensitive partial match)
131
+
132
  Args:
133
  unit_name (str): Part of the unit name to search for
134
+
135
  Returns:
136
  LessonResponse: Contains list of matching lessons
137
  """
138
  try:
139
  lessons = load_lessons_from_file()
140
+
141
  # Filter lessons by unit name (case-insensitive partial match)
142
  matching_lessons = [
143
+ lesson for lesson in lessons if unit_name.lower() in lesson.unit.lower()
 
144
  ]
145
+
146
+ return LessonResponse(lessons=matching_lessons, total=len(matching_lessons))
 
 
 
147
  except Exception as e:
148
  logger.error(f"Error searching lessons by unit '{unit_name}': {str(e)}")
149
  raise HTTPException(
150
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
151
+ detail="Failed to search lessons",
152
  )
153
 
154
 
 
157
  session_id: str = Form(
158
  ..., description="Session ID for tracking user interactions"
159
  ),
160
+ lesson_data: str = Form(..., description="The lesson data as JSON string"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  text_message: Optional[str] = Form(None, description="Text message from user"),
162
  audio_file: Optional[UploadFile] = File(None, description="Audio file from user"),
163
  ):
 
239
  },
240
  {"configurable": {"thread_id": session_id}},
241
  )
242
+
243
  # Extract AI response content
244
  ai_response = response["messages"][-1].content
245
  logger.info(f"AI response (v2): {ai_response}")
 
251
  raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
252
 
253
 
254
+ @router.post("/chat/stream", status_code=status.HTTP_200_OK)
255
+ async def chat_stream(
256
  session_id: str = Form(
257
  ..., description="Session ID for tracking user interactions"
258
  ),
259
+ lesson_data: str = Form(..., description="The lesson data as JSON string"),
 
 
260
  text_message: Optional[str] = Form(None, description="Text message from user"),
261
  audio_file: Optional[UploadFile] = File(None, description="Audio file from user"),
262
  audio: bool = Form(False, description="Whether to return TTS audio response"),
263
  ):
264
  """Send a message (text or audio) to the lesson practice v2 agent with streaming response"""
265
  logger.info(f"Received streaming lesson practice v2 request: {session_id}")
266
+
267
  # Validate that at least one input is provided
268
  if not text_message and not audio_file:
269
  raise HTTPException(
 
353
  # message_chunk is a tuple, get the first element which is the actual AIMessageChunk
354
  if isinstance(message_chunk, tuple) and len(message_chunk) > 0:
355
  actual_message = message_chunk[0]
356
+ content = getattr(actual_message, "content", "")
357
  else:
358
  actual_message = message_chunk
359
+ content = getattr(message_chunk, "content", "")
360
+
361
  if content:
362
  # Accumulate content for TTS
363
  accumulated_content += content
364
+
365
  # Create SSE-formatted response
366
  response_data = {
367
  "type": "message_chunk",
368
  "content": content,
369
  "metadata": {
370
+ "agent": getattr(actual_message, "name", "unknown"),
371
+ "id": getattr(actual_message, "id", ""),
372
+ "usage_metadata": getattr(
373
+ actual_message, "usage_metadata", {}
374
+ ),
375
+ },
376
  }
377
  yield f"data: {json.dumps(response_data)}\n\n"
378
+
379
  # Small delay to prevent overwhelming the client
380
  await asyncio.sleep(0.01)
381
+
382
  # Generate TTS audio if requested
383
  audio_data = None
384
  if audio and accumulated_content.strip():
385
  try:
386
+ logger.info(
387
+ f"Generating TTS for lesson v2 content: {len(accumulated_content)} chars"
388
+ )
389
  audio_result = await tts_service.text_to_speech(accumulated_content)
390
  if audio_result:
391
  audio_data = {
392
  "audio_data": audio_result["audio_data"],
393
  "mime_type": audio_result["mime_type"],
394
+ "format": audio_result["format"],
395
  }
396
  logger.info("Lesson v2 TTS audio generated successfully")
397
  else:
398
  logger.warning("Lesson v2 TTS generation failed")
399
  except Exception as tts_error:
400
  logger.error(f"Lesson v2 TTS generation error: {str(tts_error)}")
401
+
402
  # Send completion signal with optional audio
403
+ completion_data = {"type": "completion", "content": "", "audio": audio_data}
 
 
 
 
404
  yield f"data: {json.dumps(completion_data)}\n\n"
405
+
406
  except Exception as e:
407
  logger.error(f"Error in streaming lesson practice v2: {str(e)}")
408
  error_data = {"type": "error", "content": str(e)}