Spaces:
Runtime error
Runtime error
Commit
Β·
0431f29
1
Parent(s):
bff5b69
duplicate space
Browse files- app.py +12 -5
- awesome_chat.py +1 -1
- models_server.py +4 -4
app.py
CHANGED
|
@@ -33,6 +33,12 @@ class Client:
|
|
| 33 |
self.all_messages.append(message)
|
| 34 |
|
| 35 |
def extract_medias(self, message):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
image_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?\.(jpg|jpeg|tiff|gif|png)")
|
| 37 |
image_urls = []
|
| 38 |
for match in image_pattern.finditer(message):
|
|
@@ -51,14 +57,14 @@ class Client:
|
|
| 51 |
if match.group(0) not in video_urls:
|
| 52 |
video_urls.append(match.group(0))
|
| 53 |
|
| 54 |
-
return image_urls, audio_urls, video_urls
|
| 55 |
|
| 56 |
def add_text(self, messages, message):
|
| 57 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 58 |
-
return messages, "Please set your OpenAI API key
|
| 59 |
self.add_message(message, "user")
|
| 60 |
messages = messages + [(message, None)]
|
| 61 |
-
image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 62 |
|
| 63 |
for image_url in image_urls:
|
| 64 |
if not image_url.startswith("http") and not image_url.startswith("public"):
|
|
@@ -91,7 +97,7 @@ class Client:
|
|
| 91 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 92 |
return messages
|
| 93 |
message = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)["message"]
|
| 94 |
-
image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 95 |
self.add_message(message, "assistant")
|
| 96 |
messages[-1][1] = message
|
| 97 |
for image_url in image_urls:
|
|
@@ -119,6 +125,7 @@ with gr.Blocks() as demo:
|
|
| 119 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
| 120 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
| 121 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
|
|
|
| 122 |
with gr.Row().style():
|
| 123 |
with gr.Column(scale=0.85):
|
| 124 |
openai_api_key = gr.Textbox(
|
|
@@ -167,7 +174,7 @@ with gr.Blocks() as demo:
|
|
| 167 |
|
| 168 |
openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
|
| 169 |
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 170 |
-
hugging_face_token.submit(set_token, [hugging_face_token], [hugging_face_token])
|
| 171 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 172 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 173 |
btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
|
|
|
| 33 |
self.all_messages.append(message)
|
| 34 |
|
| 35 |
def extract_medias(self, message):
|
| 36 |
+
# url_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?")
|
| 37 |
+
urls = []
|
| 38 |
+
# for match in url_pattern.finditer(message):
|
| 39 |
+
# if match.group(0) not in urls:
|
| 40 |
+
# urls.append(match.group(0))
|
| 41 |
+
|
| 42 |
image_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?\.(jpg|jpeg|tiff|gif|png)")
|
| 43 |
image_urls = []
|
| 44 |
for match in image_pattern.finditer(message):
|
|
|
|
| 57 |
if match.group(0) not in video_urls:
|
| 58 |
video_urls.append(match.group(0))
|
| 59 |
|
| 60 |
+
return urls, image_urls, audio_urls, video_urls
|
| 61 |
|
| 62 |
def add_text(self, messages, message):
|
| 63 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 64 |
+
return messages, "Please set your OpenAI API key and Hugging Face token first!!!"
|
| 65 |
self.add_message(message, "user")
|
| 66 |
messages = messages + [(message, None)]
|
| 67 |
+
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 68 |
|
| 69 |
for image_url in image_urls:
|
| 70 |
if not image_url.startswith("http") and not image_url.startswith("public"):
|
|
|
|
| 97 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 98 |
return messages
|
| 99 |
message = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)["message"]
|
| 100 |
+
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 101 |
self.add_message(message, "assistant")
|
| 102 |
messages[-1][1] = message
|
| 103 |
for image_url in image_urls:
|
|
|
|
| 125 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
| 126 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
| 127 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
| 128 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
| 129 |
with gr.Row().style():
|
| 130 |
with gr.Column(scale=0.85):
|
| 131 |
openai_api_key = gr.Textbox(
|
|
|
|
| 174 |
|
| 175 |
openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
|
| 176 |
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 177 |
+
hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 178 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 179 |
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 180 |
btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
awesome_chat.py
CHANGED
|
@@ -839,7 +839,7 @@ def chat_huggingface(messages, openaikey = None, huggingfacetoken = None, return
|
|
| 839 |
logger.info(task_str)
|
| 840 |
|
| 841 |
if "error" in task_str:
|
| 842 |
-
return {"message": task_str
|
| 843 |
else:
|
| 844 |
task_str = task_str.strip()
|
| 845 |
|
|
|
|
| 839 |
logger.info(task_str)
|
| 840 |
|
| 841 |
if "error" in task_str:
|
| 842 |
+
return {"message": str(task_str)}
|
| 843 |
else:
|
| 844 |
task_str = task_str.strip()
|
| 845 |
|
models_server.py
CHANGED
|
@@ -162,10 +162,10 @@ def load_pipes(local_deployment):
|
|
| 162 |
# "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
| 163 |
# "device": "cuda:0"
|
| 164 |
# },
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
# "lambdalabs/sd-image-variations-diffusers": {
|
| 170 |
# "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
|
| 171 |
# "device": "cuda:0"
|
|
|
|
| 162 |
# "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
| 163 |
# "device": "cuda:0"
|
| 164 |
# },
|
| 165 |
+
"espnet/kan-bayashi_ljspeech_vits": {
|
| 166 |
+
"model": Text2Speech.from_pretrained("espnet/kan-bayashi_ljspeech_vits"),
|
| 167 |
+
"device": "cuda:0"
|
| 168 |
+
},
|
| 169 |
# "lambdalabs/sd-image-variations-diffusers": {
|
| 170 |
# "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
|
| 171 |
# "device": "cuda:0"
|