Spaces:
Sleeping
Sleeping
ts startup
Browse files
app.py
CHANGED
|
@@ -1,11 +1,38 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
from utils.generator import generate, generate_streaming
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
# ---------------------------------------------------------------------
|
| 5 |
# Gradio Interface with MCP support and streaming
|
| 6 |
# ---------------------------------------------------------------------
|
| 7 |
ui = gr.Interface(
|
| 8 |
-
fn=
|
| 9 |
inputs=[
|
| 10 |
gr.Textbox(
|
| 11 |
label="Query",
|
|
@@ -23,8 +50,7 @@ ui = gr.Interface(
|
|
| 23 |
outputs=gr.Textbox(
|
| 24 |
label="Generated Answer",
|
| 25 |
lines=6,
|
| 26 |
-
show_copy_button=True
|
| 27 |
-
streaming=True # Enable streaming in the output
|
| 28 |
),
|
| 29 |
title="ChatFed Generation Module",
|
| 30 |
description="Ask questions based on provided context. Intended for use in RAG pipelines as an MCP server with other ChatFed modules (i.e. context supplied by semantic retriever service).",
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import asyncio
|
| 3 |
from utils.generator import generate, generate_streaming
|
| 4 |
|
| 5 |
+
# ---------------------------------------------------------------------
|
| 6 |
+
# Wrapper function to handle async streaming for Gradio
|
| 7 |
+
# ---------------------------------------------------------------------
|
| 8 |
+
def generate_streaming_wrapper(query: str, context: str):
|
| 9 |
+
"""Wrapper to convert async generator to sync generator for Gradio"""
|
| 10 |
+
async def _async_generator():
|
| 11 |
+
async for chunk in generate_streaming(query, context):
|
| 12 |
+
yield chunk
|
| 13 |
+
|
| 14 |
+
# Create a new event loop for this thread
|
| 15 |
+
try:
|
| 16 |
+
loop = asyncio.get_event_loop()
|
| 17 |
+
except RuntimeError:
|
| 18 |
+
loop = asyncio.new_event_loop()
|
| 19 |
+
asyncio.set_event_loop(loop)
|
| 20 |
+
|
| 21 |
+
# Convert async generator to sync generator
|
| 22 |
+
async_gen = _async_generator()
|
| 23 |
+
|
| 24 |
+
while True:
|
| 25 |
+
try:
|
| 26 |
+
chunk = loop.run_until_complete(async_gen.__anext__())
|
| 27 |
+
yield chunk
|
| 28 |
+
except StopAsyncIteration:
|
| 29 |
+
break
|
| 30 |
+
|
| 31 |
# ---------------------------------------------------------------------
|
| 32 |
# Gradio Interface with MCP support and streaming
|
| 33 |
# ---------------------------------------------------------------------
|
| 34 |
ui = gr.Interface(
|
| 35 |
+
fn=generate_streaming_wrapper, # Use streaming wrapper function
|
| 36 |
inputs=[
|
| 37 |
gr.Textbox(
|
| 38 |
label="Query",
|
|
|
|
| 50 |
outputs=gr.Textbox(
|
| 51 |
label="Generated Answer",
|
| 52 |
lines=6,
|
| 53 |
+
show_copy_button=True
|
|
|
|
| 54 |
),
|
| 55 |
title="ChatFed Generation Module",
|
| 56 |
description="Ask questions based on provided context. Intended for use in RAG pipelines as an MCP server with other ChatFed modules (i.e. context supplied by semantic retriever service).",
|