Khoi1234210 commited on
Commit
6296ee3
·
verified ·
1 Parent(s): cde748d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -17
app.py CHANGED
@@ -128,10 +128,11 @@ def render_latex(text):
128
  return text
129
 
130
  def respond(message, history, system_message, max_tokens, temperature, top_p):
131
- """Streaming response with yield"""
132
  client = InferenceClient(model="Qwen/Qwen2.5-Math-7B-Instruct")
133
 
134
  messages = [{"role": "system", "content": system_message}]
 
135
  for msg in history:
136
  if msg["role"] == "user":
137
  messages.append({"role": "user", "content": msg["content"]})
@@ -139,21 +140,17 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
139
  messages.append({"role": "assistant", "content": msg["content"]})
140
  messages.append({"role": "user", "content": message})
141
 
142
- response = ""
143
  try:
144
- for chunk in client.chat_completion(
145
  messages,
146
  max_tokens=max_tokens,
147
  temperature=temperature,
148
  top_p=top_p,
149
- stream=True # Enable streaming
150
- ):
151
- if chunk.choices[0].delta.content:
152
- response += chunk.choices[0].delta.content
153
- yield render_latex(response) # Yield progressively
154
-
155
  except Exception as e:
156
- yield f"❌ Error: {str(e)[:100]}..."
157
 
158
  def get_random_sample():
159
  """Get a random sample problem - loads datasets if needed"""
@@ -202,12 +199,12 @@ with gr.Blocks(title="🧮 Mathetics AI") as demo:
202
  )
203
 
204
  def chat_response(message, history):
205
- history.append({"role": "user", "content": message})
206
- history.append({"role": "assistant", "content": ""})
207
-
208
- for partial_response in respond(message, history[:-1], create_math_system_message(), 1024, 0.3, 0.85):
209
- history[-1]["content"] = partial_response
210
- yield history, ""
211
 
212
  def clear_chat():
213
  """Clear the chat history and textbox."""
@@ -221,4 +218,4 @@ with gr.Blocks(title="🧮 Mathetics AI") as demo:
221
  lambda: gr.update(visible=False), outputs=help_text
222
  )
223
 
224
- demo.launch()
 
128
  return text
129
 
130
  def respond(message, history, system_message, max_tokens, temperature, top_p):
131
+ """Non-streaming response for stability"""
132
  client = InferenceClient(model="Qwen/Qwen2.5-Math-7B-Instruct")
133
 
134
  messages = [{"role": "system", "content": system_message}]
135
+ # Iterate over history dicts and add user/assistant pairs
136
  for msg in history:
137
  if msg["role"] == "user":
138
  messages.append({"role": "user", "content": msg["content"]})
 
140
  messages.append({"role": "assistant", "content": msg["content"]})
141
  messages.append({"role": "user", "content": message})
142
 
 
143
  try:
144
+ completion = client.chat_completion(
145
  messages,
146
  max_tokens=max_tokens,
147
  temperature=temperature,
148
  top_p=top_p,
149
+ )
150
+ response = completion.choices[0].message.content
151
+ return render_latex(response)
 
 
 
152
  except Exception as e:
153
+ return f"❌ Error: {str(e)[:100]}... Try a simpler problem."
154
 
155
  def get_random_sample():
156
  """Get a random sample problem - loads datasets if needed"""
 
199
  )
200
 
201
  def chat_response(message, history):
202
+ """Updated to use dict-based history for type='messages'."""
203
+ bot_response = respond(message, history, create_math_system_message(), 1024, 0.3, 0.85)
204
+ # Append as dicts, not tuples
205
+ history.append({"role": "user", "content": message})
206
+ history.append({"role": "assistant", "content": bot_response})
207
+ return history, ""
208
 
209
  def clear_chat():
210
  """Clear the chat history and textbox."""
 
218
  lambda: gr.update(visible=False), outputs=help_text
219
  )
220
 
221
+ demo.launch()