Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,11 +3,15 @@ import os
|
|
| 3 |
import sys
|
| 4 |
import json
|
| 5 |
import requests
|
|
|
|
|
|
|
| 6 |
|
| 7 |
MODEL = "gpt-4o-mini"
|
| 8 |
API_URL = os.getenv("API_URL")
|
| 9 |
DISABLED = os.getenv("DISABLED") == 'True'
|
| 10 |
-
|
|
|
|
|
|
|
| 11 |
NUM_THREADS = int(os.getenv("NUM_THREADS"))
|
| 12 |
|
| 13 |
print (NUM_THREADS)
|
|
@@ -16,20 +20,6 @@ def exception_handler(exception_type, exception, traceback):
|
|
| 16 |
print("%s: %s" % (exception_type.__name__, exception))
|
| 17 |
sys.excepthook = exception_handler
|
| 18 |
sys.tracebacklimit = 0
|
| 19 |
-
|
| 20 |
-
#https://github.com/gradio-app/gradio/issues/3531#issuecomment-1484029099
|
| 21 |
-
def parse_codeblock(text):
|
| 22 |
-
lines = text.split("\n")
|
| 23 |
-
for i, line in enumerate(lines):
|
| 24 |
-
if "```" in line:
|
| 25 |
-
if line != "```":
|
| 26 |
-
lines[i] = f'<pre><code class="{lines[i][3:]}">'
|
| 27 |
-
else:
|
| 28 |
-
lines[i] = '</code></pre>'
|
| 29 |
-
else:
|
| 30 |
-
if i > 0:
|
| 31 |
-
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
| 32 |
-
return "".join(lines)
|
| 33 |
|
| 34 |
def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request):
|
| 35 |
payload = {
|
|
@@ -42,11 +32,13 @@ def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:
|
|
| 42 |
"presence_penalty":0,
|
| 43 |
"frequency_penalty":0,
|
| 44 |
}
|
| 45 |
-
|
|
|
|
|
|
|
| 46 |
headers = {
|
| 47 |
"Content-Type": "application/json",
|
| 48 |
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
| 49 |
-
"Headers": f"{
|
| 50 |
}
|
| 51 |
|
| 52 |
# print(f"chat_counter - {chat_counter}")
|
|
@@ -93,6 +85,8 @@ def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:
|
|
| 93 |
# raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
| 94 |
|
| 95 |
for chunk in response.iter_lines():
|
|
|
|
|
|
|
| 96 |
#Skipping first chunk
|
| 97 |
if counter == 0:
|
| 98 |
counter += 1
|
|
@@ -109,19 +103,19 @@ def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:
|
|
| 109 |
else:
|
| 110 |
history[-1] = partial_words
|
| 111 |
token_counter += 1
|
| 112 |
-
yield [(
|
| 113 |
except Exception as e:
|
| 114 |
print (f'error found: {e}')
|
| 115 |
-
yield [(
|
| 116 |
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
| 117 |
|
| 118 |
|
| 119 |
def reset_textbox():
|
| 120 |
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
| 121 |
|
| 122 |
-
title = """<h1 align="center">GPT-4o
|
| 123 |
if DISABLED:
|
| 124 |
-
title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit.
|
| 125 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
| 126 |
```
|
| 127 |
User: <utterance>
|
|
@@ -130,7 +124,7 @@ User: <utterance>
|
|
| 130 |
Assistant: <utterance>
|
| 131 |
...
|
| 132 |
```
|
| 133 |
-
In this app, you can explore the outputs of a gpt-
|
| 134 |
"""
|
| 135 |
|
| 136 |
theme = gr.themes.Default(primary_hue="green")
|
|
@@ -139,17 +133,18 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
| 139 |
#chatbot {height: 520px; overflow: auto;}""",
|
| 140 |
theme=theme) as demo:
|
| 141 |
gr.HTML(title)
|
| 142 |
-
gr.HTML("""<h3 align="center">This app provides you full access to GPT-4o mini (128K token limit). You don't need any OPENAI API key.</
|
| 143 |
-
|
|
|
|
| 144 |
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
| 145 |
-
#API Key is provided by
|
| 146 |
-
#openai_api_key = gr.Textbox(type='password', label="Enter only your OpenAI API key here")
|
| 147 |
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
| 148 |
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
|
| 149 |
state = gr.State([]) #s
|
| 150 |
with gr.Row():
|
| 151 |
with gr.Column(scale=7):
|
| 152 |
-
b1 = gr.Button(visible=not DISABLED)
|
| 153 |
with gr.Column(scale=3):
|
| 154 |
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
| 155 |
|
|
@@ -181,9 +176,9 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
| 181 |
accept_button = gr.Button("I Agree")
|
| 182 |
|
| 183 |
def enable_inputs():
|
| 184 |
-
return
|
| 185 |
|
| 186 |
-
accept_button.click(None, None, accept_checkbox,
|
| 187 |
accept_checkbox.change(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False)
|
| 188 |
|
| 189 |
inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
|
@@ -191,4 +186,4 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
| 191 |
b1.click(reset_textbox, [], [inputs, b1], queue=False)
|
| 192 |
b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
| 193 |
|
| 194 |
-
demo.queue(max_size=
|
|
|
|
| 3 |
import sys
|
| 4 |
import json
|
| 5 |
import requests
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
|
| 9 |
MODEL = "gpt-4o-mini"
|
| 10 |
API_URL = os.getenv("API_URL")
|
| 11 |
DISABLED = os.getenv("DISABLED") == 'True'
|
| 12 |
+
OPENAI_API_KEYS = os.getenv("OPENAI_API_KEYS").split(',')
|
| 13 |
+
print (API_URL)
|
| 14 |
+
print (OPENAI_API_KEYS)
|
| 15 |
NUM_THREADS = int(os.getenv("NUM_THREADS"))
|
| 16 |
|
| 17 |
print (NUM_THREADS)
|
|
|
|
| 20 |
print("%s: %s" % (exception_type.__name__, exception))
|
| 21 |
sys.excepthook = exception_handler
|
| 22 |
sys.tracebacklimit = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request):
|
| 25 |
payload = {
|
|
|
|
| 32 |
"presence_penalty":0,
|
| 33 |
"frequency_penalty":0,
|
| 34 |
}
|
| 35 |
+
OPENAI_API_KEY = random.choice(OPENAI_API_KEYS)
|
| 36 |
+
print (OPENAI_API_KEY)
|
| 37 |
+
headers_dict = {key.decode('utf-8'): value.decode('utf-8') for key, value in request.headers.raw}
|
| 38 |
headers = {
|
| 39 |
"Content-Type": "application/json",
|
| 40 |
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
| 41 |
+
"Headers": f"{headers_dict}"
|
| 42 |
}
|
| 43 |
|
| 44 |
# print(f"chat_counter - {chat_counter}")
|
|
|
|
| 85 |
# raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
| 86 |
|
| 87 |
for chunk in response.iter_lines():
|
| 88 |
+
print (chunk)
|
| 89 |
+
sys.stdout.flush()
|
| 90 |
#Skipping first chunk
|
| 91 |
if counter == 0:
|
| 92 |
counter += 1
|
|
|
|
| 103 |
else:
|
| 104 |
history[-1] = partial_words
|
| 105 |
token_counter += 1
|
| 106 |
+
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history}
|
| 107 |
except Exception as e:
|
| 108 |
print (f'error found: {e}')
|
| 109 |
+
yield [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True)
|
| 110 |
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
| 111 |
|
| 112 |
|
| 113 |
def reset_textbox():
|
| 114 |
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
| 115 |
|
| 116 |
+
title = """<h1 align="center">GPT-4o: Research Preview (Short-Term Availability)</h1>"""
|
| 117 |
if DISABLED:
|
| 118 |
+
title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. Please check back tomorrow.</h1>"""
|
| 119 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
| 120 |
```
|
| 121 |
User: <utterance>
|
|
|
|
| 124 |
Assistant: <utterance>
|
| 125 |
...
|
| 126 |
```
|
| 127 |
+
In this app, you can explore the outputs of a gpt-4 turbo LLM.
|
| 128 |
"""
|
| 129 |
|
| 130 |
theme = gr.themes.Default(primary_hue="green")
|
|
|
|
| 133 |
#chatbot {height: 520px; overflow: auto;}""",
|
| 134 |
theme=theme) as demo:
|
| 135 |
gr.HTML(title)
|
| 136 |
+
gr.HTML("""<h3 align="center">This app provides you full access to GPT-4o mini (128K token limit). You don't need any OPENAI API key.</h3>""")
|
| 137 |
+
|
| 138 |
+
#gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
| 139 |
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
| 140 |
+
#GPT4 API Key is provided by Huggingface
|
| 141 |
+
#openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
|
| 142 |
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
| 143 |
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
|
| 144 |
state = gr.State([]) #s
|
| 145 |
with gr.Row():
|
| 146 |
with gr.Column(scale=7):
|
| 147 |
+
b1 = gr.Button(visible=not DISABLED) #.style(full_width=True)
|
| 148 |
with gr.Column(scale=3):
|
| 149 |
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
| 150 |
|
|
|
|
| 176 |
accept_button = gr.Button("I Agree")
|
| 177 |
|
| 178 |
def enable_inputs():
|
| 179 |
+
return gr.update(visible=False), gr.update(visible=True)
|
| 180 |
|
| 181 |
+
accept_button.click(None, None, accept_checkbox, js=js, queue=False)
|
| 182 |
accept_checkbox.change(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False)
|
| 183 |
|
| 184 |
inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
|
|
|
| 186 |
b1.click(reset_textbox, [], [inputs, b1], queue=False)
|
| 187 |
b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
| 188 |
|
| 189 |
+
demo.queue(max_size=10, default_concurrency_limit=NUM_THREADS, api_open=False).launch(share=False)
|