Spaces:
Sleeping
Sleeping
Update to_md function for illustrating code blocks
Browse files
app.py
CHANGED
|
@@ -1,76 +1,10 @@
|
|
| 1 |
-
import openai
|
| 2 |
-
import tiktoken
|
| 3 |
-
|
| 4 |
-
import datetime
|
| 5 |
-
import json
|
| 6 |
-
import os
|
| 7 |
-
|
| 8 |
-
openai.api_key = os.getenv('API_KEY')
|
| 9 |
-
openai.request_times = 0
|
| 10 |
-
|
| 11 |
-
def ask(question, history, behavior):
|
| 12 |
-
openai.request_times += 1
|
| 13 |
-
print(f"request times {openai.request_times}: {datetime.datetime.now()}: {question}")
|
| 14 |
-
try:
|
| 15 |
-
response = openai.ChatCompletion.create(
|
| 16 |
-
model="gpt-3.5-turbo",
|
| 17 |
-
messages=forget_long_term(
|
| 18 |
-
[
|
| 19 |
-
{"role":"system", "content":content}
|
| 20 |
-
for content in behavior
|
| 21 |
-
] + [
|
| 22 |
-
{"role":"user" if i%2==0 else "assistant", "content":content}
|
| 23 |
-
for i,content in enumerate(history + [question])
|
| 24 |
-
]
|
| 25 |
-
)
|
| 26 |
-
)["choices"][0]["message"]["content"]
|
| 27 |
-
while response.startswith("\n"):
|
| 28 |
-
response = response[1:]
|
| 29 |
-
except Exception as e:
|
| 30 |
-
print(e)
|
| 31 |
-
response = 'Timeout! Please wait a few minutes and retry'
|
| 32 |
-
history = history + [question, response]
|
| 33 |
-
return history
|
| 34 |
-
|
| 35 |
-
def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
|
| 36 |
-
"""Returns the number of tokens used by a list of messages."""
|
| 37 |
-
try:
|
| 38 |
-
encoding = tiktoken.encoding_for_model(model)
|
| 39 |
-
except KeyError:
|
| 40 |
-
encoding = tiktoken.get_encoding("cl100k_base")
|
| 41 |
-
if model == "gpt-3.5-turbo": # note: future models may deviate from this
|
| 42 |
-
num_tokens = 0
|
| 43 |
-
for message in messages:
|
| 44 |
-
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
|
| 45 |
-
for key, value in message.items():
|
| 46 |
-
num_tokens += len(encoding.encode(value))
|
| 47 |
-
if key == "name": # if there's a name, the role is omitted
|
| 48 |
-
num_tokens += -1 # role is always required and always 1 token
|
| 49 |
-
num_tokens += 2 # every reply is primed with <im_start>assistant
|
| 50 |
-
return num_tokens
|
| 51 |
-
else:
|
| 52 |
-
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
|
| 53 |
-
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
| 54 |
-
|
| 55 |
-
def forget_long_term(messages, max_num_tokens=4000):
|
| 56 |
-
while num_tokens_from_messages(messages)>max_num_tokens:
|
| 57 |
-
if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
|
| 58 |
-
messages = messages[:1] + messages[2:]
|
| 59 |
-
else:
|
| 60 |
-
messages = messages[1:]
|
| 61 |
-
return messages
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
import gradio as gr
|
| 65 |
-
|
| 66 |
-
|
| 67 |
def to_md(content):
|
| 68 |
is_inside_code_block = False
|
| 69 |
output_spans = []
|
| 70 |
for i in range(len(content)):
|
| 71 |
if content[i]=="\n" and not is_inside_code_block:
|
| 72 |
-
if len(output_spans)>0 and output_spans[-1]
|
| 73 |
-
|
| 74 |
else:
|
| 75 |
output_spans.append("<br>")
|
| 76 |
elif content[i]=="`":
|
|
@@ -78,70 +12,27 @@ def to_md(content):
|
|
| 78 |
if len(output_spans)>=3 and all([output_spans[j]=="`" for j in [-3,-2,-1]]):
|
| 79 |
is_inside_code_block = not is_inside_code_block
|
| 80 |
output_spans = output_spans[:-3]
|
| 81 |
-
if
|
| 82 |
-
output_spans
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
else:
|
| 85 |
output_spans.append(content[i])
|
| 86 |
-
return "".join(output_spans)
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
def predict(question, history=[], behavior=[]):
|
| 90 |
-
history = ask(question, history, behavior)
|
| 91 |
-
response = [(to_md(history[i]),to_md(history[i+1])) for i in range(0,len(history)-1,2)]
|
| 92 |
-
return "", history, response
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
with gr.Blocks() as demo:
|
| 96 |
-
|
| 97 |
-
examples_txt = [
|
| 98 |
-
['帮我写一个python脚本实现快排'],
|
| 99 |
-
['如何用numpy提取数组的分位数?'],
|
| 100 |
-
['how to match the code block in markdown such like ```def foo():\n pass``` through regex in python?'],
|
| 101 |
-
['how to load a pre-trained language model and generate sentences?'],
|
| 102 |
-
]
|
| 103 |
-
|
| 104 |
-
examples_bhv = [
|
| 105 |
-
f"You are a helpful assistant. You will answer all the questions step-by-step.",
|
| 106 |
-
f"You are a helpful assistant. Today is {datetime.date.today()}.",
|
| 107 |
-
]
|
| 108 |
-
|
| 109 |
-
gr.Markdown(
|
| 110 |
-
"""
|
| 111 |
-
朋友你好,
|
| 112 |
-
|
| 113 |
-
这是我利用[gradio](https://gradio.app/creating-a-chatbot/)编写的一个小网页,用于以网页的形式给大家分享ChatGPT请求服务,希望你玩的开心。关于使用技巧或学术研讨,欢迎在[Community](https://huggingface.co/spaces/zhangjf/chatbot/discussions)中和我交流。
|
| 114 |
-
|
| 115 |
-
这一版相比于原版的[chatbot](https://huggingface.co/spaces/zhangjf/chatbot),用了较低版本的gradio==3.16.2,因而能更好地展示markdown中的源代码
|
| 116 |
-
|
| 117 |
-
p.s. 响应时间和聊天内容长度正相关,一般能在5秒~30秒内响应。
|
| 118 |
-
""")
|
| 119 |
-
|
| 120 |
-
behavior = gr.State([])
|
| 121 |
-
|
| 122 |
-
with gr.Column(variant="panel"):
|
| 123 |
-
with gr.Row().style(equal_height=True):
|
| 124 |
-
with gr.Column(scale=0.85):
|
| 125 |
-
bhv = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT扮演的人设").style(container=False)
|
| 126 |
-
with gr.Column(scale=0.15, min_width=0):
|
| 127 |
-
button_set = gr.Button("Set")
|
| 128 |
-
bhv.submit(fn=lambda x:(x,[x]), inputs=[bhv], outputs=[bhv, behavior])
|
| 129 |
-
button_set.click(fn=lambda x:(x,[x]), inputs=[bhv], outputs=[bhv, behavior])
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
state = gr.State([])
|
| 133 |
-
|
| 134 |
-
with gr.Column(variant="panel"):
|
| 135 |
-
chatbot = gr.Chatbot()
|
| 136 |
-
txt = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT回答的问题").style(container=False)
|
| 137 |
-
with gr.Row():
|
| 138 |
-
button_gen = gr.Button("Submit")
|
| 139 |
-
button_clr = gr.Button("Clear")
|
| 140 |
-
|
| 141 |
-
gr.Examples(examples=examples_bhv, inputs=bhv, label="Examples for setting behavior")
|
| 142 |
-
gr.Examples(examples=examples_txt, inputs=txt, label="Examples for asking question")
|
| 143 |
-
txt.submit(predict, [txt, state, behavior], [txt, state, chatbot])
|
| 144 |
-
button_gen.click(fn=predict, inputs=[txt, state, behavior], outputs=[txt, state, chatbot])
|
| 145 |
-
button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
|
| 146 |
-
|
| 147 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
def to_md(content):
|
| 2 |
is_inside_code_block = False
|
| 3 |
output_spans = []
|
| 4 |
for i in range(len(content)):
|
| 5 |
if content[i]=="\n" and not is_inside_code_block:
|
| 6 |
+
if len(output_spans)>0 and output_spans[-1].endswith("```"):
|
| 7 |
+
output_spans.append("\n")
|
| 8 |
else:
|
| 9 |
output_spans.append("<br>")
|
| 10 |
elif content[i]=="`":
|
|
|
|
| 12 |
if len(output_spans)>=3 and all([output_spans[j]=="`" for j in [-3,-2,-1]]):
|
| 13 |
is_inside_code_block = not is_inside_code_block
|
| 14 |
output_spans = output_spans[:-3]
|
| 15 |
+
if is_inside_code_block:
|
| 16 |
+
if len(output_spans)==0:
|
| 17 |
+
output_spans.append("```")
|
| 18 |
+
elif output_spans[-1]=="<br>":
|
| 19 |
+
output_spans[-1] = "\n"
|
| 20 |
+
output_spans.append("```")
|
| 21 |
+
elif output_spans[-1].endswith("\n"):
|
| 22 |
+
output_spans.append("```")
|
| 23 |
+
else:
|
| 24 |
+
output_spans.append("\n```")
|
| 25 |
+
|
| 26 |
+
if i+1<len(content) and content[i+1]!="\n":
|
| 27 |
+
output_spans.append("\n")
|
| 28 |
+
else:
|
| 29 |
+
if output_spans[-1].endswith("\n"):
|
| 30 |
+
output_spans.append("```")
|
| 31 |
+
else:
|
| 32 |
+
output_spans.append("\n```")
|
| 33 |
+
|
| 34 |
+
if i+1<len(content) and content[i+1]!="\n":
|
| 35 |
+
output_spans.append("\n")
|
| 36 |
else:
|
| 37 |
output_spans.append(content[i])
|
| 38 |
+
return "".join(output_spans)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|