Spaces:
Running
Running
Update chat_demo.py
Browse files- chat_demo.py +7 -7
chat_demo.py
CHANGED
|
@@ -7,8 +7,8 @@ import tempfile
|
|
| 7 |
import subprocess
|
| 8 |
import threading
|
| 9 |
|
| 10 |
-
BASE_URL = "http://localhost:
|
| 11 |
-
MODEL_NAME = "
|
| 12 |
|
| 13 |
def read_output(process):
|
| 14 |
"""Reads the output from the subprocess and prints it to the console."""
|
|
@@ -33,7 +33,7 @@ def start_server(command):
|
|
| 33 |
|
| 34 |
return process
|
| 35 |
|
| 36 |
-
server_process = start_server(["./
|
| 37 |
|
| 38 |
|
| 39 |
cli = OpenAI(api_key="sk-nokey", base_url=BASE_URL)
|
|
@@ -52,7 +52,7 @@ def openai_call(message, history, system_prompt, max_new_tokens):
|
|
| 52 |
model=MODEL_NAME,
|
| 53 |
messages=history,
|
| 54 |
max_tokens=max_new_tokens,
|
| 55 |
-
stop=["<|im_end|>", "</s>"],
|
| 56 |
stream=True
|
| 57 |
)
|
| 58 |
reply = ""
|
|
@@ -97,11 +97,11 @@ with gr.Blocks() as demo:
|
|
| 97 |
type="messages",
|
| 98 |
additional_inputs=[
|
| 99 |
gr.Textbox("You are a helpful AI assistant.", label="System Prompt"),
|
| 100 |
-
gr.Slider(30,
|
| 101 |
],
|
| 102 |
additional_outputs=[conv_state],
|
| 103 |
-
title="
|
| 104 |
-
description="
|
| 105 |
)
|
| 106 |
download_file = gr.File()
|
| 107 |
download_btn = gr.Button("Export Conversation for Download") \
|
|
|
|
| 7 |
import subprocess
|
| 8 |
import threading
|
| 9 |
|
| 10 |
+
BASE_URL = "http://localhost:5100/v1"
|
| 11 |
+
MODEL_NAME = "placeholder-model-id"
|
| 12 |
|
| 13 |
def read_output(process):
|
| 14 |
"""Reads the output from the subprocess and prints it to the console."""
|
|
|
|
| 33 |
|
| 34 |
return process
|
| 35 |
|
| 36 |
+
server_process = start_server(["./llama.cpp/build/bin/llama-server", "-m" ,"./llama.cpp/build/ERNIE-4.5-0.3B-PT-UD-Q8_K_XL.gguf", "-c", "32000", "--jinja", "--no-mmap", "--port", "5100"])
|
| 37 |
|
| 38 |
|
| 39 |
cli = OpenAI(api_key="sk-nokey", base_url=BASE_URL)
|
|
|
|
| 52 |
model=MODEL_NAME,
|
| 53 |
messages=history,
|
| 54 |
max_tokens=max_new_tokens,
|
| 55 |
+
#stop=["<|im_end|>", "</s>"],
|
| 56 |
stream=True
|
| 57 |
)
|
| 58 |
reply = ""
|
|
|
|
| 97 |
type="messages",
|
| 98 |
additional_inputs=[
|
| 99 |
gr.Textbox("You are a helpful AI assistant.", label="System Prompt"),
|
| 100 |
+
gr.Slider(30, 8192, label="Max new tokens"),
|
| 101 |
],
|
| 102 |
additional_outputs=[conv_state],
|
| 103 |
+
title="Edge level LLM Chat demo",
|
| 104 |
+
description="In this demo, you can chat with sub-1B param range LLM - they are small enough to run with reasonable speed on most end user device. **Warning:** Do not input sensitive info - assume everything is public!"
|
| 105 |
)
|
| 106 |
download_file = gr.File()
|
| 107 |
download_btn = gr.Button("Export Conversation for Download") \
|