Spaces:
Runtime error
Runtime error
Commit
Β·
cd08eb6
1
Parent(s):
87d0d80
ENV
Browse files- app.py +39 -33
- awesome_chat.py +7 -3
- config.gradio.yaml +1 -1
app.py
CHANGED
|
@@ -10,22 +10,21 @@ os.makedirs("public/images", exist_ok=True)
|
|
| 10 |
os.makedirs("public/audios", exist_ok=True)
|
| 11 |
os.makedirs("public/videos", exist_ok=True)
|
| 12 |
|
|
|
|
|
|
|
|
|
|
| 13 |
class Client:
|
| 14 |
def __init__(self) -> None:
|
| 15 |
-
self.OPENAI_KEY =
|
| 16 |
-
self.HUGGINGFACE_TOKEN =
|
| 17 |
self.all_messages = []
|
| 18 |
|
| 19 |
def set_key(self, openai_key):
|
| 20 |
self.OPENAI_KEY = openai_key
|
| 21 |
-
if len(self.HUGGINGFACE_TOKEN)>0:
|
| 22 |
-
gr.update(visible = True)
|
| 23 |
return self.OPENAI_KEY
|
| 24 |
|
| 25 |
def set_token(self, huggingface_token):
|
| 26 |
self.HUGGINGFACE_TOKEN = huggingface_token
|
| 27 |
-
if len(self.OPENAI_KEY)>0:
|
| 28 |
-
gr.update(visible = True)
|
| 29 |
return self.HUGGINGFACE_TOKEN
|
| 30 |
|
| 31 |
def add_message(self, content, role):
|
|
@@ -60,7 +59,7 @@ class Client:
|
|
| 60 |
return urls, image_urls, audio_urls, video_urls
|
| 61 |
|
| 62 |
def add_text(self, messages, message):
|
| 63 |
-
if
|
| 64 |
return messages, "Please set your OpenAI API key and Hugging Face token first!!!"
|
| 65 |
self.add_message(message, "user")
|
| 66 |
messages = messages + [(message, None)]
|
|
@@ -94,7 +93,7 @@ class Client:
|
|
| 94 |
return messages, ""
|
| 95 |
|
| 96 |
def bot(self, messages):
|
| 97 |
-
if
|
| 98 |
return messages, {}
|
| 99 |
message, results = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)
|
| 100 |
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
|
@@ -129,27 +128,29 @@ with gr.Blocks(css=css) as demo:
|
|
| 129 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
| 130 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
| 131 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
| 132 |
-
|
| 133 |
-
with gr.
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
| 153 |
|
| 154 |
|
| 155 |
with gr.Row().style():
|
|
@@ -181,12 +182,17 @@ with gr.Blocks(css=css) as demo:
|
|
| 181 |
def bot(state, chatbot):
|
| 182 |
return state["client"].bot(chatbot)
|
| 183 |
|
| 184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
| 186 |
-
hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 187 |
-
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 188 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
| 189 |
-
|
| 190 |
|
| 191 |
gr.Examples(
|
| 192 |
examples=["Given a collection of image A: /examples/a.jpg, B: /examples/b.jpg, C: /examples/c.jpg, please tell me how many zebras in these picture?",
|
|
|
|
| 10 |
os.makedirs("public/audios", exist_ok=True)
|
| 11 |
os.makedirs("public/videos", exist_ok=True)
|
| 12 |
|
| 13 |
+
HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
|
| 14 |
+
OPENAI_KEY = os.environ.get("OPENAI_KEY")
|
| 15 |
+
|
| 16 |
class Client:
|
| 17 |
def __init__(self) -> None:
|
| 18 |
+
self.OPENAI_KEY = OPENAI_KEY
|
| 19 |
+
self.HUGGINGFACE_TOKEN = HUGGINGFACE_TOKEN
|
| 20 |
self.all_messages = []
|
| 21 |
|
| 22 |
def set_key(self, openai_key):
|
| 23 |
self.OPENAI_KEY = openai_key
|
|
|
|
|
|
|
| 24 |
return self.OPENAI_KEY
|
| 25 |
|
| 26 |
def set_token(self, huggingface_token):
|
| 27 |
self.HUGGINGFACE_TOKEN = huggingface_token
|
|
|
|
|
|
|
| 28 |
return self.HUGGINGFACE_TOKEN
|
| 29 |
|
| 30 |
def add_message(self, content, role):
|
|
|
|
| 59 |
return urls, image_urls, audio_urls, video_urls
|
| 60 |
|
| 61 |
def add_text(self, messages, message):
|
| 62 |
+
if not self.OPENAI_KEY or not self.OPENAI_KEY.startswith("sk-") or not self.HUGGINGFACE_TOKEN or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 63 |
return messages, "Please set your OpenAI API key and Hugging Face token first!!!"
|
| 64 |
self.add_message(message, "user")
|
| 65 |
messages = messages + [(message, None)]
|
|
|
|
| 93 |
return messages, ""
|
| 94 |
|
| 95 |
def bot(self, messages):
|
| 96 |
+
if not self.OPENAI_KEY or not self.OPENAI_KEY.startswith("sk-") or not self.HUGGINGFACE_TOKEN or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 97 |
return messages, {}
|
| 98 |
message, results = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)
|
| 99 |
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
|
|
|
| 128 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
| 129 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
| 130 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
| 131 |
+
if not OPENAI_KEY:
|
| 132 |
+
with gr.Row().style():
|
| 133 |
+
with gr.Column(scale=0.85):
|
| 134 |
+
openai_api_key = gr.Textbox(
|
| 135 |
+
show_label=False,
|
| 136 |
+
placeholder="Set your OpenAI API key here and press Enter",
|
| 137 |
+
lines=1,
|
| 138 |
+
type="password"
|
| 139 |
+
).style(container=False)
|
| 140 |
+
with gr.Column(scale=0.15, min_width=0):
|
| 141 |
+
btn1 = gr.Button("Submit").style(full_height=True)
|
| 142 |
+
|
| 143 |
+
if not HUGGINGFACE_TOKEN:
|
| 144 |
+
with gr.Row().style():
|
| 145 |
+
with gr.Column(scale=0.85):
|
| 146 |
+
hugging_face_token = gr.Textbox(
|
| 147 |
+
show_label=False,
|
| 148 |
+
placeholder="Set your Hugging Face Token here and press Enter",
|
| 149 |
+
lines=1,
|
| 150 |
+
type="password"
|
| 151 |
+
).style(container=False)
|
| 152 |
+
with gr.Column(scale=0.15, min_width=0):
|
| 153 |
+
btn3 = gr.Button("Submit").style(full_height=True)
|
| 154 |
|
| 155 |
|
| 156 |
with gr.Row().style():
|
|
|
|
| 182 |
def bot(state, chatbot):
|
| 183 |
return state["client"].bot(chatbot)
|
| 184 |
|
| 185 |
+
if not OPENAI_KEY:
|
| 186 |
+
openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
|
| 187 |
+
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 188 |
+
|
| 189 |
+
if not HUGGINGFACE_TOKEN:
|
| 190 |
+
hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 191 |
+
btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 192 |
+
|
| 193 |
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
|
|
|
|
|
|
| 194 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
| 195 |
+
|
| 196 |
|
| 197 |
gr.Examples(
|
| 198 |
examples=["Given a collection of image A: /examples/a.jpg, B: /examples/b.jpg, C: /examples/c.jpg, please tell me how many zebras in these picture?",
|
awesome_chat.py
CHANGED
|
@@ -40,6 +40,8 @@ config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
|
|
| 40 |
if not os.path.exists("logs"):
|
| 41 |
os.mkdir("logs")
|
| 42 |
|
|
|
|
|
|
|
| 43 |
DATASET_REPO_URL = "https://huggingface.co/datasets/tricktreat/HuggingGPT_logs"
|
| 44 |
LOG_HF_TOKEN = os.environ.get("LOG_HF_TOKEN")
|
| 45 |
repo = Repository(
|
|
@@ -58,6 +60,7 @@ logger.addHandler(handler)
|
|
| 58 |
|
| 59 |
log_file = config["log_file"]
|
| 60 |
if log_file:
|
|
|
|
| 61 |
filehandler = logging.FileHandler(log_file)
|
| 62 |
filehandler.setLevel(logging.DEBUG)
|
| 63 |
filehandler.setFormatter(formatter)
|
|
@@ -200,14 +203,15 @@ def get_id_reason(choose_str):
|
|
| 200 |
|
| 201 |
def record_case(success, **args):
|
| 202 |
# time format
|
|
|
|
| 203 |
if success:
|
| 204 |
-
f = open(f"logs/
|
| 205 |
else:
|
| 206 |
-
f = open(f"logs/
|
| 207 |
log = args
|
| 208 |
f.write(json.dumps(log) + "\n")
|
| 209 |
f.close()
|
| 210 |
-
commit_url = repo.push_to_hub()
|
| 211 |
|
| 212 |
def image_to_bytes(img_url):
|
| 213 |
img_byte = io.BytesIO()
|
|
|
|
| 40 |
if not os.path.exists("logs"):
|
| 41 |
os.mkdir("logs")
|
| 42 |
|
| 43 |
+
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
| 44 |
+
|
| 45 |
DATASET_REPO_URL = "https://huggingface.co/datasets/tricktreat/HuggingGPT_logs"
|
| 46 |
LOG_HF_TOKEN = os.environ.get("LOG_HF_TOKEN")
|
| 47 |
repo = Repository(
|
|
|
|
| 60 |
|
| 61 |
log_file = config["log_file"]
|
| 62 |
if log_file:
|
| 63 |
+
log_file = log_file.replace("TIMESTAMP", now)
|
| 64 |
filehandler = logging.FileHandler(log_file)
|
| 65 |
filehandler.setLevel(logging.DEBUG)
|
| 66 |
filehandler.setFormatter(formatter)
|
|
|
|
| 203 |
|
| 204 |
def record_case(success, **args):
|
| 205 |
# time format
|
| 206 |
+
global now
|
| 207 |
if success:
|
| 208 |
+
f = open(f"logs/log_success_{now}.jsonl", "a")
|
| 209 |
else:
|
| 210 |
+
f = open(f"logs/log_fail_{now}.jsonl", "a")
|
| 211 |
log = args
|
| 212 |
f.write(json.dumps(log) + "\n")
|
| 213 |
f.close()
|
| 214 |
+
commit_url = repo.push_to_hub(blocking=True)
|
| 215 |
|
| 216 |
def image_to_bytes(img_url):
|
| 217 |
img_byte = io.BytesIO()
|
config.gradio.yaml
CHANGED
|
@@ -4,7 +4,7 @@ huggingface:
|
|
| 4 |
token: # required: huggingface token @ https://huggingface.co/settings/tokens
|
| 5 |
dev: false
|
| 6 |
debug: true
|
| 7 |
-
log_file: logs/
|
| 8 |
model: text-davinci-003 # text-davinci-003
|
| 9 |
use_completion: true
|
| 10 |
inference_mode: hybrid # local, huggingface or hybrid
|
|
|
|
| 4 |
token: # required: huggingface token @ https://huggingface.co/settings/tokens
|
| 5 |
dev: false
|
| 6 |
debug: true
|
| 7 |
+
log_file: logs/debug_TIMESTAMP.log
|
| 8 |
model: text-davinci-003 # text-davinci-003
|
| 9 |
use_completion: true
|
| 10 |
inference_mode: hybrid # local, huggingface or hybrid
|