lemonteaa commited on
Commit
7977967
·
verified ·
1 Parent(s): e3f0a4b

Create chat_demo.py

Browse files
Files changed (1) hide show
  1. chat_demo.py +119 -0
chat_demo.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from openai import OpenAI
3
+ import uuid
4
+ import json
5
+ import os
6
+ import tempfile
7
+ import subprocess
8
+ import threading
9
+
10
+ BASE_URL = "http://localhost:8080/v1"
11
+ MODEL_NAME = "bn"
12
+
13
+ def read_output(process):
14
+ """Reads the output from the subprocess and prints it to the console."""
15
+ for line in iter(process.stdout.readline, ""):
16
+ print(line.rstrip())
17
+ process.stdout.close()
18
+
19
+ def start_server(command):
20
+ """Starts the server as a subprocess and captures its stdout."""
21
+ # Start the server process
22
+ process = subprocess.Popen(
23
+ command,
24
+ stdout=subprocess.PIPE,
25
+ stderr=subprocess.STDOUT, # Redirect stderr to stdout
26
+ text=True # Automatically decode the output to text
27
+ )
28
+
29
+ # Start a thread to read the output
30
+ output_thread = threading.Thread(target=read_output, args=(process,))
31
+ output_thread.daemon = True # Daemonize the thread so it exits when the main program does
32
+ output_thread.start()
33
+
34
+ return process
35
+
36
+ server_process = start_server(["./ik_llama.cpp/build/bin/llama-server", "-m" ,"./ik_llama.cpp/build/model-out.gguf", "--chat-template", "vicuna"])
37
+
38
+
39
+ cli = OpenAI(api_key="sk-nokey", base_url=BASE_URL)
40
+
41
+ def openai_call(message, history, system_prompt, max_new_tokens):
42
+ #print(history) # DEBUG
43
+ history.insert(0, {
44
+ "role": "system",
45
+ "content": system_prompt
46
+ })
47
+ history.append({
48
+ "role": "user",
49
+ "content": message
50
+ })
51
+ response = cli.chat.completions.create(
52
+ model=MODEL_NAME,
53
+ messages=history,
54
+ max_tokens=max_new_tokens,
55
+ stop=["<|im_end|>", "</s>"],
56
+ stream=True
57
+ )
58
+ reply = ""
59
+ for chunk in response:
60
+ delta = chunk.choices[0].delta.content
61
+ if delta is not None:
62
+ reply = reply + delta
63
+ yield reply, None
64
+ history.append({ "role": "assistant", "content": reply })
65
+ yield reply, gr.State(history)
66
+
67
+ def gen_file(conv_state):
68
+ #print(conv_state) # DEBUG
69
+ fname = f"{str(uuid.uuid4())}.json"
70
+ #with tempfile.NamedTemporaryFile(prefix=str(uuid.uuid4()), suffix=".json", mode="w", encoding="utf-8", delete_on_close=False) as f:
71
+ with open(fname, mode="w", encoding="utf-8") as f:
72
+ json.dump(conv_state.value, f, indent=4, ensure_ascii=False)
73
+ return gr.File(fname), gr.State(fname)
74
+
75
+ def rm_file_wrap(path : str):
76
+ # Try to delete the file.
77
+ try:
78
+ os.remove(path)
79
+ except OSError as e:
80
+ # If it fails, inform the user.
81
+ print("Error: %s - %s." % (e.filename, e.strerror))
82
+
83
+ def on_download(download_data: gr.DownloadData):
84
+ print(f"deleting {download_data.file.path}")
85
+ rm_file_wrap(download_data.file.path)
86
+
87
+ def clean_file(orig_path):
88
+ print(f"Deleting {orig_path.value}")
89
+ rm_file_wrap(orig_path.value)
90
+
91
+ with gr.Blocks() as demo:
92
+ #download=gr.DownloadButton(label="Download Conversation", value=None)
93
+ conv_state = gr.State()
94
+ orig_path = gr.State()
95
+ chat = gr.ChatInterface(
96
+ openai_call,
97
+ type="messages",
98
+ additional_inputs=[
99
+ gr.Textbox("You are a helpful AI assistant.", label="System Prompt"),
100
+ gr.Slider(30, 2048, label="Max new tokens"),
101
+ ],
102
+ additional_outputs=[conv_state],
103
+ title="Chat with bitnet using ik_llama",
104
+ description="Warning: Do not input sensitive info - assume everything is public! Also note this is experimental and ik_llama server doesn't seems to support arbitrary chat template, we're using vicuna as approximate match - so there might be intelligence degradation."
105
+ )
106
+ download_file = gr.File()
107
+ download_btn = gr.Button("Export Conversation for Download") \
108
+ .click(fn=gen_file, inputs=[conv_state], outputs=[download_file, orig_path]) \
109
+ .success(fn=clean_file, inputs=[orig_path])
110
+ download_file.download(on_download, None, None)
111
+
112
+ try:
113
+ demo.queue(max_size=10, api_open=True).launch(server_name='0.0.0.0')
114
+ finally:
115
+ # Stop the server
116
+ server_process.terminate()
117
+ server_process.wait()
118
+ print("Server stopped.")
119
+