yaya-sy commited on
Commit
1ab8818
Β·
verified Β·
1 Parent(s): 9e290b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -25
app.py CHANGED
@@ -182,21 +182,7 @@ def model_inference(input_dict, history):
182
  audio_path = tts(buffer)
183
  return audio_path # Return the audio file path
184
 
185
- # Alternative approach: Use regular Interface instead of ChatInterface
186
- def combined_inference(input_dict, history):
187
- """Modified function that returns both text and audio"""
188
- text_response = ""
189
-
190
- # Get the streaming response
191
- for response in model_inference(input_dict, history):
192
- text_response = response
193
-
194
- # Generate audio from final text
195
- audio_path = tts(text_response)
196
-
197
- return text_response, audio_path
198
-
199
- # Option 1: Use regular Interface (recommended)
200
  with gr.Blocks() as demo:
201
  gr.Markdown("# oolel-vision-experimental `@video-infer for video understanding`")
202
 
@@ -210,18 +196,23 @@ with gr.Blocks() as demo:
210
  clear = gr.Button("Clear")
211
 
212
  def respond(message, chat_history):
213
- # Get text response through streaming
214
- text_response = ""
215
- for response in model_inference(message, chat_history):
216
- text_response = response
217
-
218
- # Add to chat history
219
- chat_history.append([message["text"], text_response])
220
 
221
- # Generate audio
222
- audio_path = tts(text_response)
 
 
 
223
 
224
- return "", chat_history, audio_path
 
 
 
 
 
 
225
 
226
  msg.submit(respond, [msg, chatbot], [msg, chatbot, audio_output])
227
  clear.click(lambda: ([], None), outputs=[chatbot, audio_output])
 
182
  audio_path = tts(buffer)
183
  return audio_path # Return the audio file path
184
 
185
+ # Option 1: Use regular Interface with streaming (recommended)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  with gr.Blocks() as demo:
187
  gr.Markdown("# oolel-vision-experimental `@video-infer for video understanding`")
188
 
 
196
  clear = gr.Button("Clear")
197
 
198
  def respond(message, chat_history):
199
+ # Add user message to chat history
200
+ bot_message = ""
201
+ chat_history.append([message["text"], ""])
 
 
 
 
202
 
203
+ # Stream the response
204
+ for response in model_inference(message, chat_history):
205
+ bot_message = response
206
+ chat_history[-1][1] = bot_message
207
+ yield "", chat_history, None
208
 
209
+ # Generate audio after streaming is complete
210
+ try:
211
+ audio_path = tts(bot_message)
212
+ yield "", chat_history, audio_path
213
+ except Exception as e:
214
+ print(f"TTS Error: {e}")
215
+ yield "", chat_history, None
216
 
217
  msg.submit(respond, [msg, chatbot], [msg, chatbot, audio_output])
218
  clear.click(lambda: ([], None), outputs=[chatbot, audio_output])