import gradio as gr import re import uuid from models import get_model_response # Optimized System Prompts MAIN_AGENT_PROMPT = "You are a calm and methodical assistant. Your sole purpose is to help the user solve their problem step-by-step. Maintain a neutral and professional tone. Break down the task by asking questions. Do not deviate from the user's stated goal." SUB_AGENT_PROMPT = """You are a silent observer Agent.\n\n**Rules:**\n1. Your output must be a **single** Markdown list item.\n2. The step must describe a high-level interaction **process** (e.g., "- Assistant: Clarify the user's goal.").\n3. The step **must not** contain specific details from the conversation (e.g., place names like 'Hangzhou', numbers like '3000 yuan').\n4. The step must be strictly based on the **assistant's last sentence**, do not assume or create actions.\n\n**Task:**\nAnalyze only the latest turn of the conversation between the 'Assistant' and the 'User' and, strictly following the rules above, output a single pseudo-code step describing the assistant's action.\n\n**Example:**\nUser: 'I want to go to Beijing by car'\nAssistant: 'Okay, so the destination is Beijing, and the mode of transport is by car, is that correct?'\nYour output should be:\n- Assistant: Confirm the destination and mode of transport with the user.""" def _convert_text_to_mermaid(workflow_text): """Converts a markdown list of steps into Mermaid flowchart syntax.""" if not workflow_text or workflow_text == "*Waiting for task to start...*": return "graph TD;\n A[\"Waiting for task to start...\"];" lines = workflow_text.strip().split('\n') mermaid_lines = ["graph TD;"] node_ids = [] for i, line in enumerate(lines): node_id = chr(65 + i) # A, B, C, ... # Clean up the text: remove markdown list markers and escape quotes clean_text = re.sub(r'^\s*[-*]\s*', '', line).strip() clean_text = clean_text.replace('"', '#quot;') mermaid_lines.append(f' {node_id}["{clean_text}"];') node_ids.append(node_id) if len(node_ids) > 1: mermaid_lines.append(" " + " --> ".join(node_ids) + ";") return '\n'.join(mermaid_lines) def _render_mermaid_html(mermaid_code): """Wraps Mermaid code in a simple
tag for rendering."""
return f'{mermaid_code}'
def create_workflow_tab():
"""
Creates the UI components for the Workflow tab and returns them as a dictionary.
"""
with gr.Blocks() as tab:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Chat Interface (Main Agent)")
chatbot = gr.Chatbot(label="Chat with Ling-1t", height=600, elem_id="workflow_chatbot")
textbox = gr.Textbox(label="Enter your task...", lines=3, elem_id="workflow_chat_input")
button = gr.Button("Send", elem_id="workflow_send_button")
examples = gr.Examples(
examples=["Write a Python Flask application that displays 'Hello, World!' on a webpage", "Help me plan a three-day travel itinerary for Beijing"],
inputs=textbox,
label="Example Topics"
)
with gr.Column(scale=1):
gr.Markdown("### Real-time Workflow (Sub-agent)")
topic_output = gr.Textbox(label="Current Topic", interactive=False, elem_id="workflow_topic_output")
workflow_output = gr.Markdown(label="Current Workflow Description", value="*Waiting for task to start...*", elem_id="workflow_output")
mermaid_output = gr.HTML(label="Flowchart Visualization")
return {
"chatbot": chatbot,
"chat_input": textbox,
"send_button": button,
"examples": examples,
"topic_output": topic_output,
"workflow_output": workflow_output,
"mermaid_output": mermaid_output
}
def handle_workflow_chat(user_input, history, current_topic, current_workflow):
"""
Handles the chat interaction, generates text workflow, and converts it to a Mermaid chart.
"""
print(f"[handle_workflow_chat] User Input: {user_input}")
print(f"[handle_workflow_chat] History before append: {history}")
history.append((user_input, None))
print(f"[handle_workflow_chat] History after append: {history}")
# 1. Main Agent Call
main_agent_model_id = "inclusionai/ling-1t"
main_agent_temp = 0.7
assistant_response = ""
response_generator = get_model_response(
model_id=main_agent_model_id, history=history, system_prompt=MAIN_AGENT_PROMPT, temperature=main_agent_temp
)
for chunk in response_generator:
assistant_response += chunk
history[-1] = (user_input, assistant_response)
yield history, current_topic, current_workflow, _render_mermaid_html(_convert_text_to_mermaid(current_workflow)), ""
print(f"[handle_workflow_chat] Assistant Response: {assistant_response}")
# 2. Sub-Agent Call to get ONLY the new step
sub_agent_model_id = "inclusionai/ling-mini-2.0"
sub_agent_temp = 0.3
sub_agent_user_prompt = f"""
Analyze ONLY the following conversation turn and extract the single, abstract process step.
User: "{user_input}"
Assistant: "{assistant_response}"
"""
sub_agent_history_for_call = [(sub_agent_user_prompt, None)]
new_step_generator = get_model_response(
model_id=sub_agent_model_id, history=sub_agent_history_for_call, system_prompt=SUB_AGENT_PROMPT, temperature=sub_agent_temp
)
new_step = ""
for chunk in new_step_generator:
new_step += chunk
new_step = new_step.strip()
print(f"[handle_workflow_chat] New Step: {new_step}")
# 3. Append the new step to the workflow
if new_step:
if current_workflow == "*Waiting for task to start...*":
new_workflow = new_step
else:
new_workflow = current_workflow + "\n" + new_step
else:
new_workflow = current_workflow
# 4. Topic Logic
new_topic = current_topic
if not current_topic and user_input:
new_topic = f"Task Topic: {user_input[:30]}..."
# 5. Generate and render Mermaid chart
mermaid_code = _convert_text_to_mermaid(new_workflow)
mermaid_html = _render_mermaid_html(mermaid_code)
yield history, new_topic, new_workflow, mermaid_html, ""