Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,15 +4,17 @@ import uuid
|
|
| 4 |
import copy
|
| 5 |
import pandas as pd
|
| 6 |
import openai
|
|
|
|
| 7 |
from requests.models import ChunkedEncodingError
|
| 8 |
|
| 9 |
-
st.set_page_config(page_title='ChatGPT Assistant', layout='wide')
|
|
|
|
| 10 |
# 自定义元素样式
|
| 11 |
# 第一个是减少侧边栏顶部空白,不同版本的st存在区别(此处适用1.19.0)
|
| 12 |
st.markdown("""
|
| 13 |
<style>
|
| 14 |
div.css-1vq4p4l.e1fqkh3o4 {
|
| 15 |
-
padding-top:
|
| 16 |
}
|
| 17 |
.avatar {
|
| 18 |
display: flex;
|
|
@@ -44,10 +46,8 @@ st.markdown("""
|
|
| 44 |
margin : 2px;
|
| 45 |
}
|
| 46 |
#chat-window{
|
| 47 |
-
color: black;
|
| 48 |
padding: 10px 0px;
|
| 49 |
text-decoration: none;
|
| 50 |
-
font-size: 25px;
|
| 51 |
}
|
| 52 |
#chat-window:hover{
|
| 53 |
color: blue;
|
|
@@ -56,7 +56,8 @@ st.markdown("""
|
|
| 56 |
""", unsafe_allow_html=True)
|
| 57 |
if "initial_settings" not in st.session_state:
|
| 58 |
# 历史聊天窗口
|
| 59 |
-
st.session_state[
|
|
|
|
| 60 |
# ss参数初始化
|
| 61 |
st.session_state['pre_chat'] = None
|
| 62 |
st.session_state['if_chat_change'] = False
|
|
@@ -68,7 +69,7 @@ if "initial_settings" not in st.session_state:
|
|
| 68 |
|
| 69 |
with st.sidebar:
|
| 70 |
# 此处href与下文的st.header内容相对应,跳转锚点
|
| 71 |
-
st.markdown("
|
| 72 |
current_chat = st.radio(
|
| 73 |
label='历史聊天窗口',
|
| 74 |
format_func=lambda x: x.split('_')[0] if '_' in x else x,
|
|
@@ -100,7 +101,7 @@ with st.sidebar:
|
|
| 100 |
else:
|
| 101 |
st.session_state["current_chat_index"] = 0
|
| 102 |
st.session_state['history_chats'].remove(current_chat)
|
| 103 |
-
remove_data(current_chat)
|
| 104 |
|
| 105 |
|
| 106 |
c1, c2 = st.columns(2)
|
|
@@ -109,14 +110,18 @@ with st.sidebar:
|
|
| 109 |
delete_chat_button = c2.button('删除', use_container_width=True, key='delete_chat_button',
|
| 110 |
on_click=delete_chat_button_callback)
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
# 加载数据
|
| 113 |
if ("history" + current_chat not in st.session_state) or (st.session_state['if_chat_change']):
|
| 114 |
-
for key, value in load_data(current_chat).items():
|
| 115 |
if key == 'history':
|
| 116 |
st.session_state[key + current_chat] = value
|
| 117 |
else:
|
| 118 |
for k, v in value.items():
|
| 119 |
-
st.session_state[k + current_chat] = v
|
| 120 |
st.session_state['if_chat_change'] = False
|
| 121 |
|
| 122 |
# 对话展示
|
|
@@ -136,8 +141,8 @@ def write_data(new_chat_name=current_chat):
|
|
| 136 |
"context_input": st.session_state["context_input" + current_chat],
|
| 137 |
"context_level": st.session_state["context_level" + current_chat],
|
| 138 |
}
|
| 139 |
-
save_data(
|
| 140 |
-
st.session_state["contexts"])
|
| 141 |
|
| 142 |
|
| 143 |
# 输入内容展示
|
|
@@ -155,12 +160,12 @@ tap_input, tap_context, tap_set = st.tabs(['💬 聊天', '🗒️ 预设', '⚙
|
|
| 155 |
|
| 156 |
with tap_context:
|
| 157 |
set_context_list = list(set_context_all.keys())
|
| 158 |
-
context_select_index = set_context_list.index(st.session_state['context_select' + current_chat])
|
| 159 |
st.selectbox(label='选择上下文', options=set_context_list, key='context_select' + current_chat,
|
| 160 |
index=context_select_index, on_change=write_data)
|
| 161 |
st.caption(set_context_all[st.session_state['context_select' + current_chat]])
|
| 162 |
st.text_area(label='补充或自定义上下文:', key="context_input" + current_chat,
|
| 163 |
-
value=st.session_state['context_input' + current_chat],
|
| 164 |
on_change=write_data)
|
| 165 |
|
| 166 |
with tap_set:
|
|
@@ -168,28 +173,31 @@ with tap_set:
|
|
| 168 |
st.session_state['history' + current_chat] = copy.deepcopy(initial_content_history)
|
| 169 |
write_data()
|
| 170 |
|
| 171 |
-
|
| 172 |
st.button("清空聊天记录", use_container_width=True, on_click=clear_button_callback)
|
| 173 |
|
| 174 |
-
st.
|
| 175 |
-
st.
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
-
st.
|
| 179 |
-
st.slider("Temperature", 0.0, 2.0, st.session_state["temperature" + current_chat], 0.1,
|
| 180 |
help="""在0和2之间,应该使用什么样的采样温度?较高的值(如0.8)会使输出更随机,而较低的值(如0.2)则会使其更加集中和确定性。
|
| 181 |
我们一般建议只更改这个参数或top_p参数中的一个,而不要同时更改两个。""",
|
| 182 |
on_change=write_data, key='temperature' + current_chat)
|
| 183 |
-
st.slider("Top P", 0.1, 1.0, st.session_state["top_p" + current_chat], 0.1,
|
| 184 |
help="""一种替代采用温度进行采样的方法,称为“基于核心概率”的采样。在该方法中,模型会考虑概率最高的top_p个标记的预测结果。
|
| 185 |
因此,当该参数为0.1时,只有包括前10%概率质量的标记将被考虑。我们一般建议只更改这个参数或采样温度参数中的一个,而不要同时更改两个。""",
|
| 186 |
on_change=write_data, key='top_p' + current_chat)
|
| 187 |
st.slider("Presence Penalty", -2.0, 2.0,
|
| 188 |
-
st.session_state["presence_penalty" + current_chat], 0.1,
|
| 189 |
help="""该参数的取值范围为-2.0到2.0。正值会根据新标记是否出现在当前生成的文本中对其进行惩罚,从而增加模型谈论新话题的可能性。""",
|
| 190 |
on_change=write_data, key='presence_penalty' + current_chat)
|
| 191 |
st.slider("Frequency Penalty", -2.0, 2.0,
|
| 192 |
-
st.session_state["frequency_penalty" + current_chat], 0.1,
|
| 193 |
help="""该参数的取值范围为-2.0到2.0。正值会根据新标记在当前生成的文本中的已有频率对其进行惩罚,从而减少模型直接重复相同语句的可能性。""",
|
| 194 |
on_change=write_data, key='frequency_penalty' + current_chat)
|
| 195 |
st.caption("[官网参数说明](https://platform.openai.com/docs/api-reference/completions/create)")
|
|
@@ -204,6 +212,11 @@ with tap_input:
|
|
| 204 |
return res
|
| 205 |
|
| 206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
def extract_chars(text, num):
|
| 208 |
char_num = 0
|
| 209 |
chars = ''
|
|
@@ -221,14 +234,15 @@ with tap_input:
|
|
| 221 |
|
| 222 |
def user_input_area_callback():
|
| 223 |
# 清空输入框
|
| 224 |
-
st.session_state['user_input_content'] = st.session_state['user_input_area']
|
|
|
|
| 225 |
st.session_state['user_input_area'] = ''
|
| 226 |
|
| 227 |
# 修改窗口名称
|
| 228 |
user_input_content = st.session_state['user_input_content']
|
| 229 |
df_history = pd.DataFrame(st.session_state["history" + current_chat])
|
| 230 |
if len(df_history.query('role!="system"')) == 0:
|
| 231 |
-
remove_data(current_chat)
|
| 232 |
current_chat_index = st.session_state['history_chats'].index(current_chat)
|
| 233 |
new_name = extract_chars(user_input_content, 18) + '_' + str(uuid.uuid4())
|
| 234 |
st.session_state['history_chats'][current_chat_index] = new_name
|
|
@@ -237,22 +251,20 @@ with tap_input:
|
|
| 237 |
write_data(new_name)
|
| 238 |
|
| 239 |
|
| 240 |
-
st.text_area("**输入:**", key="user_input_area", on_change=user_input_area_callback
|
| 241 |
-
help="点击侧边栏标题可直接跳转此输入区 \n"
|
| 242 |
-
"此功能目前在Hugging Face不可用")
|
| 243 |
if st.session_state['user_input_content'].strip() != '':
|
| 244 |
st.session_state['pre_user_input_content'] = st.session_state['user_input_content']
|
| 245 |
st.session_state['user_input_content'] = ''
|
| 246 |
show_each_message(st.session_state['pre_user_input_content'], 'user',
|
| 247 |
[area_user_svg.markdown, area_user_content.markdown])
|
| 248 |
-
history_tem = st.session_state["history" + current_chat] + \
|
| 249 |
-
[{"role": "user", "content": st.session_state['pre_user_input_content'].replace('\n', '\n\n')}]
|
| 250 |
context_level_tem = st.session_state['context_level' + current_chat]
|
|
|
|
|
|
|
| 251 |
history_need_input = ([{"role": "system",
|
| 252 |
"content": set_context_all[st.session_state['context_select' + current_chat]]}]
|
| 253 |
+ [{"role": "system",
|
| 254 |
"content": st.session_state['context_input' + current_chat]}]
|
| 255 |
-
+
|
| 256 |
paras_need_input = {
|
| 257 |
"temperature": st.session_state["temperature" + current_chat],
|
| 258 |
"top_p": st.session_state["top_p" + current_chat],
|
|
@@ -261,11 +273,15 @@ with tap_input:
|
|
| 261 |
}
|
| 262 |
with st.spinner("🤔"):
|
| 263 |
try:
|
| 264 |
-
|
|
|
|
|
|
|
|
|
|
| 265 |
r = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history_need_input, stream=True,
|
| 266 |
**paras_need_input)
|
| 267 |
except (FileNotFoundError, KeyError):
|
| 268 |
-
area_error.error("缺失 OpenAI API Key
|
|
|
|
| 269 |
except openai.error.AuthenticationError:
|
| 270 |
area_error.error("无效的 OpenAI API Key。")
|
| 271 |
except openai.error.APIConnectionError as e:
|
|
@@ -307,4 +323,4 @@ if ("r" in st.session_state) and (current_chat == st.session_state["chat_of_r"])
|
|
| 307 |
if current_chat + 'report' in st.session_state:
|
| 308 |
st.session_state.pop(current_chat + 'report')
|
| 309 |
if 'r' in st.session_state:
|
| 310 |
-
st.session_state.pop("r")
|
|
|
|
| 4 |
import copy
|
| 5 |
import pandas as pd
|
| 6 |
import openai
|
| 7 |
+
import re
|
| 8 |
from requests.models import ChunkedEncodingError
|
| 9 |
|
| 10 |
+
st.set_page_config(page_title='ChatGPT Assistant', layout='wide', page_icon='🤖')
|
| 11 |
+
|
| 12 |
# 自定义元素样式
|
| 13 |
# 第一个是减少侧边栏顶部空白,不同版本的st存在区别(此处适用1.19.0)
|
| 14 |
st.markdown("""
|
| 15 |
<style>
|
| 16 |
div.css-1vq4p4l.e1fqkh3o4 {
|
| 17 |
+
padding-top: 2rem !important;
|
| 18 |
}
|
| 19 |
.avatar {
|
| 20 |
display: flex;
|
|
|
|
| 46 |
margin : 2px;
|
| 47 |
}
|
| 48 |
#chat-window{
|
|
|
|
| 49 |
padding: 10px 0px;
|
| 50 |
text-decoration: none;
|
|
|
|
| 51 |
}
|
| 52 |
#chat-window:hover{
|
| 53 |
color: blue;
|
|
|
|
| 56 |
""", unsafe_allow_html=True)
|
| 57 |
if "initial_settings" not in st.session_state:
|
| 58 |
# 历史聊天窗口
|
| 59 |
+
st.session_state["path"] = set_chats_path()
|
| 60 |
+
st.session_state['history_chats'] = get_history_chats(st.session_state["path"])
|
| 61 |
# ss参数初始化
|
| 62 |
st.session_state['pre_chat'] = None
|
| 63 |
st.session_state['if_chat_change'] = False
|
|
|
|
| 69 |
|
| 70 |
with st.sidebar:
|
| 71 |
# 此处href与下文的st.header内容相对应,跳转锚点
|
| 72 |
+
st.markdown("# 🤖 聊天窗口")
|
| 73 |
current_chat = st.radio(
|
| 74 |
label='历史聊天窗口',
|
| 75 |
format_func=lambda x: x.split('_')[0] if '_' in x else x,
|
|
|
|
| 101 |
else:
|
| 102 |
st.session_state["current_chat_index"] = 0
|
| 103 |
st.session_state['history_chats'].remove(current_chat)
|
| 104 |
+
remove_data(st.session_state["path"], current_chat)
|
| 105 |
|
| 106 |
|
| 107 |
c1, c2 = st.columns(2)
|
|
|
|
| 110 |
delete_chat_button = c2.button('删除', use_container_width=True, key='delete_chat_button',
|
| 111 |
on_click=delete_chat_button_callback)
|
| 112 |
|
| 113 |
+
st.write("\n")
|
| 114 |
+
st.write("\n")
|
| 115 |
+
st.markdown("<a href='#chatgpt-assistant' id='chat-window'>➡️ 直达输入区</a>",unsafe_allow_html=True)
|
| 116 |
+
|
| 117 |
# 加载数据
|
| 118 |
if ("history" + current_chat not in st.session_state) or (st.session_state['if_chat_change']):
|
| 119 |
+
for key, value in load_data(st.session_state["path"], current_chat).items():
|
| 120 |
if key == 'history':
|
| 121 |
st.session_state[key + current_chat] = value
|
| 122 |
else:
|
| 123 |
for k, v in value.items():
|
| 124 |
+
st.session_state[k + current_chat + 'default'] = v
|
| 125 |
st.session_state['if_chat_change'] = False
|
| 126 |
|
| 127 |
# 对话展示
|
|
|
|
| 141 |
"context_input": st.session_state["context_input" + current_chat],
|
| 142 |
"context_level": st.session_state["context_level" + current_chat],
|
| 143 |
}
|
| 144 |
+
save_data(st.session_state["path"], new_chat_name, st.session_state["history" + current_chat],
|
| 145 |
+
st.session_state["paras"], st.session_state["contexts"])
|
| 146 |
|
| 147 |
|
| 148 |
# 输入内容展示
|
|
|
|
| 160 |
|
| 161 |
with tap_context:
|
| 162 |
set_context_list = list(set_context_all.keys())
|
| 163 |
+
context_select_index = set_context_list.index(st.session_state['context_select' + current_chat + "default"])
|
| 164 |
st.selectbox(label='选择上下文', options=set_context_list, key='context_select' + current_chat,
|
| 165 |
index=context_select_index, on_change=write_data)
|
| 166 |
st.caption(set_context_all[st.session_state['context_select' + current_chat]])
|
| 167 |
st.text_area(label='补充或自定义上下文:', key="context_input" + current_chat,
|
| 168 |
+
value=st.session_state['context_input' + current_chat + "default"],
|
| 169 |
on_change=write_data)
|
| 170 |
|
| 171 |
with tap_set:
|
|
|
|
| 173 |
st.session_state['history' + current_chat] = copy.deepcopy(initial_content_history)
|
| 174 |
write_data()
|
| 175 |
|
|
|
|
| 176 |
st.button("清空聊天记录", use_container_width=True, on_click=clear_button_callback)
|
| 177 |
|
| 178 |
+
st.markdown("OpenAI API Key (���选)")
|
| 179 |
+
st.text_input("OpenAI API Key (可选)", type='password', key='apikey_input', label_visibility='collapsed')
|
| 180 |
+
st.caption("此Key仅在当前网页有效,且优先级高于Secrets中的配置,仅自己可用,他人无法共享。[官网获取](https://platform.openai.com/account/api-keys)")
|
| 181 |
+
|
| 182 |
+
st.markdown("包含对话次数:")
|
| 183 |
+
st.slider("Context Level", 0, 10, st.session_state['context_level' + current_chat + "default"], 1, on_change=write_data,
|
| 184 |
+
key='context_level' + current_chat, help="表示每次会话中包含的历史对话次数,预设内容不计算在内。")
|
| 185 |
|
| 186 |
+
st.markdown("模型参数:")
|
| 187 |
+
st.slider("Temperature", 0.0, 2.0, st.session_state["temperature" + current_chat + "default"], 0.1,
|
| 188 |
help="""在0和2之间,应该使用什么样的采样温度?较高的值(如0.8)会使输出更随机,而较低的值(如0.2)则会使其更加集中和确定性。
|
| 189 |
我们一般建议只更改这个参数或top_p参数中的一个,而不要同时更改两个。""",
|
| 190 |
on_change=write_data, key='temperature' + current_chat)
|
| 191 |
+
st.slider("Top P", 0.1, 1.0, st.session_state["top_p" + current_chat + "default"], 0.1,
|
| 192 |
help="""一种替代采用温度进行采样的方法,称为“基于核心概率”的采样。在该方法中,模型会考虑概率最高的top_p个标记的预测结果。
|
| 193 |
因此,当该参数为0.1时,只有包括前10%概率质量的标记将被考虑。我们一般建议只更改这个参数或采样温度参数中的一个,而不要同时更改两个。""",
|
| 194 |
on_change=write_data, key='top_p' + current_chat)
|
| 195 |
st.slider("Presence Penalty", -2.0, 2.0,
|
| 196 |
+
st.session_state["presence_penalty" + current_chat + "default"], 0.1,
|
| 197 |
help="""该参数的取值范围为-2.0到2.0。正值会根据新标记是否出现在当前生成的文本中对其进行惩罚,从而增加模型谈论新话题的可能性。""",
|
| 198 |
on_change=write_data, key='presence_penalty' + current_chat)
|
| 199 |
st.slider("Frequency Penalty", -2.0, 2.0,
|
| 200 |
+
st.session_state["frequency_penalty" + current_chat + "default"], 0.1,
|
| 201 |
help="""该参数的取值范围为-2.0到2.0。正值会根据新标记在当前生成的文本中的已有频率对其进行惩罚,从而减少模型直接重复相同语句的可能性。""",
|
| 202 |
on_change=write_data, key='frequency_penalty' + current_chat)
|
| 203 |
st.caption("[官网参数说明](https://platform.openai.com/docs/api-reference/completions/create)")
|
|
|
|
| 212 |
return res
|
| 213 |
|
| 214 |
|
| 215 |
+
def remove_hashtag_space(text):
|
| 216 |
+
res = re.sub(r"(#+)\s*", r"\1", text)
|
| 217 |
+
return res
|
| 218 |
+
|
| 219 |
+
|
| 220 |
def extract_chars(text, num):
|
| 221 |
char_num = 0
|
| 222 |
chars = ''
|
|
|
|
| 234 |
|
| 235 |
def user_input_area_callback():
|
| 236 |
# 清空输入框
|
| 237 |
+
st.session_state['user_input_content'] = (remove_hashtag_space(st.session_state['user_input_area'])
|
| 238 |
+
.replace('\n', '\n\n'))
|
| 239 |
st.session_state['user_input_area'] = ''
|
| 240 |
|
| 241 |
# 修改窗口名称
|
| 242 |
user_input_content = st.session_state['user_input_content']
|
| 243 |
df_history = pd.DataFrame(st.session_state["history" + current_chat])
|
| 244 |
if len(df_history.query('role!="system"')) == 0:
|
| 245 |
+
remove_data(st.session_state["path"], current_chat)
|
| 246 |
current_chat_index = st.session_state['history_chats'].index(current_chat)
|
| 247 |
new_name = extract_chars(user_input_content, 18) + '_' + str(uuid.uuid4())
|
| 248 |
st.session_state['history_chats'][current_chat_index] = new_name
|
|
|
|
| 251 |
write_data(new_name)
|
| 252 |
|
| 253 |
|
| 254 |
+
st.text_area("**输入:**", key="user_input_area", on_change=user_input_area_callback)
|
|
|
|
|
|
|
| 255 |
if st.session_state['user_input_content'].strip() != '':
|
| 256 |
st.session_state['pre_user_input_content'] = st.session_state['user_input_content']
|
| 257 |
st.session_state['user_input_content'] = ''
|
| 258 |
show_each_message(st.session_state['pre_user_input_content'], 'user',
|
| 259 |
[area_user_svg.markdown, area_user_content.markdown])
|
|
|
|
|
|
|
| 260 |
context_level_tem = st.session_state['context_level' + current_chat]
|
| 261 |
+
history_tem = get_history_input(st.session_state["history" + current_chat], context_level_tem) + \
|
| 262 |
+
[{"role": "user", "content": st.session_state['pre_user_input_content']}]
|
| 263 |
history_need_input = ([{"role": "system",
|
| 264 |
"content": set_context_all[st.session_state['context_select' + current_chat]]}]
|
| 265 |
+ [{"role": "system",
|
| 266 |
"content": st.session_state['context_input' + current_chat]}]
|
| 267 |
+
+ history_tem)
|
| 268 |
paras_need_input = {
|
| 269 |
"temperature": st.session_state["temperature" + current_chat],
|
| 270 |
"top_p": st.session_state["top_p" + current_chat],
|
|
|
|
| 273 |
}
|
| 274 |
with st.spinner("🤔"):
|
| 275 |
try:
|
| 276 |
+
if apikey := st.session_state['apikey_input']:
|
| 277 |
+
openai.api_key = apikey
|
| 278 |
+
else:
|
| 279 |
+
openai.api_key = st.secrets["apikey"]
|
| 280 |
r = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history_need_input, stream=True,
|
| 281 |
**paras_need_input)
|
| 282 |
except (FileNotFoundError, KeyError):
|
| 283 |
+
area_error.error("缺失 OpenAI API Key,请在复制项目后配置Secrets,或者在设置中进行临时配置。"
|
| 284 |
+
"详情见[项目仓库](https://github.com/PierXuY/ChatGPT-Assistant)。")
|
| 285 |
except openai.error.AuthenticationError:
|
| 286 |
area_error.error("无效的 OpenAI API Key。")
|
| 287 |
except openai.error.APIConnectionError as e:
|
|
|
|
| 323 |
if current_chat + 'report' in st.session_state:
|
| 324 |
st.session_state.pop(current_chat + 'report')
|
| 325 |
if 'r' in st.session_state:
|
| 326 |
+
st.session_state.pop("r")
|