Spaces:
Running
Running
Commit
ยท
07d7cbc
1
Parent(s):
7229992
Update main.py
Browse files
main.py
CHANGED
|
@@ -32,6 +32,45 @@ async def index():
|
|
| 32 |
class ChatCompletionRequest(BaseModel):
|
| 33 |
prompt: str
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
@app.get("/stream")
|
| 36 |
async def chat(prompt = "Once upon a time there was a "):
|
| 37 |
completion = llm(prompt)
|
|
@@ -39,7 +78,7 @@ async def chat(prompt = "Once upon a time there was a "):
|
|
| 39 |
yield prompt
|
| 40 |
for chat_chunk in chat_chunks:
|
| 41 |
yield chat_chunk
|
| 42 |
-
yield "
|
| 43 |
|
| 44 |
return StreamingResponse(server_sent_events(completion))
|
| 45 |
|
|
|
|
| 32 |
class ChatCompletionRequest(BaseModel):
|
| 33 |
prompt: str
|
| 34 |
|
| 35 |
+
|
| 36 |
+
@app.get("/demo")
|
| 37 |
+
async def demo():
|
| 38 |
+
html_content = """
|
| 39 |
+
<!DOCTYPE html>
|
| 40 |
+
<html>
|
| 41 |
+
<head>
|
| 42 |
+
<style>
|
| 43 |
+
#logs {
|
| 44 |
+
background-color: black;
|
| 45 |
+
color:white;
|
| 46 |
+
height:600px;
|
| 47 |
+
overflow-x: hidden;
|
| 48 |
+
overflow-y: auto;
|
| 49 |
+
text-align: left;
|
| 50 |
+
padding-left:10px;
|
| 51 |
+
}
|
| 52 |
+
</style>
|
| 53 |
+
</head>
|
| 54 |
+
|
| 55 |
+
<body>
|
| 56 |
+
|
| 57 |
+
<h1>StoryWriter Demo</h1>
|
| 58 |
+
<div id="logs">
|
| 59 |
+
</div>
|
| 60 |
+
|
| 61 |
+
<script>
|
| 62 |
+
var source = new EventSource("http://localhost:8000/stream");
|
| 63 |
+
source.onmessage = function(event) {
|
| 64 |
+
document.getElementById("logs").innerHTML += event.data + "<br>";
|
| 65 |
+
};
|
| 66 |
+
</script>
|
| 67 |
+
|
| 68 |
+
</body>
|
| 69 |
+
</html>
|
| 70 |
+
"""
|
| 71 |
+
return HTMLResponse(content=html_content, status_code=200)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
@app.get("/stream")
|
| 75 |
async def chat(prompt = "Once upon a time there was a "):
|
| 76 |
completion = llm(prompt)
|
|
|
|
| 78 |
yield prompt
|
| 79 |
for chat_chunk in chat_chunks:
|
| 80 |
yield chat_chunk
|
| 81 |
+
yield ""
|
| 82 |
|
| 83 |
return StreamingResponse(server_sent_events(completion))
|
| 84 |
|