Update app.py
Browse files
app.py
CHANGED
|
@@ -1,27 +1,7 @@
|
|
| 1 |
-
|
| 2 |
-
import gradio as gr
|
| 3 |
import requests
|
| 4 |
import json
|
| 5 |
|
| 6 |
-
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 7 |
-
|
| 8 |
-
def google_search(query, **kwargs):
|
| 9 |
-
api_key = 'AIzaSyDseKKQCAUBmPidu_QapnpJCGLueDWYJbE'
|
| 10 |
-
cse_id = '001ae9bf840514e61'
|
| 11 |
-
|
| 12 |
-
service_url = 'https://www.googleapis.com/customsearch/v1'
|
| 13 |
-
params = {
|
| 14 |
-
'key': api_key,
|
| 15 |
-
'cx': cse_id,
|
| 16 |
-
'q': query,
|
| 17 |
-
**kwargs
|
| 18 |
-
}
|
| 19 |
-
response = requests.get(service_url, params=params)
|
| 20 |
-
if response.status_code == 200:
|
| 21 |
-
return json.loads(response.text)['items']
|
| 22 |
-
else:
|
| 23 |
-
print(f'Error: {response.status_code}')
|
| 24 |
-
return []
|
| 25 |
|
| 26 |
def tokenize(text):
|
| 27 |
return text
|
|
@@ -36,6 +16,7 @@ def format_prompt(message, history):
|
|
| 36 |
return prompt
|
| 37 |
|
| 38 |
def generate(prompt, history, system_prompt, temperature=0.2, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
|
|
|
| 39 |
temperature = float(temperature)
|
| 40 |
if temperature < 1e-2:
|
| 41 |
temperature = 1e-2
|
|
@@ -52,7 +33,7 @@ def generate(prompt, history, system_prompt, temperature=0.2, max_new_tokens=512
|
|
| 52 |
|
| 53 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
| 54 |
|
| 55 |
-
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 56 |
output = ""
|
| 57 |
|
| 58 |
for response in stream:
|
|
@@ -61,66 +42,11 @@ def generate(prompt, history, system_prompt, temperature=0.2, max_new_tokens=512
|
|
| 61 |
yield output
|
| 62 |
return output
|
| 63 |
|
| 64 |
-
def generateS(prompt, history, system_prompt, temperature=0.2, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
| 65 |
-
|
| 66 |
-
stream = google_search(prompt)
|
| 67 |
-
output = ""
|
| 68 |
-
|
| 69 |
-
for response in stream:
|
| 70 |
-
output += json.dumps(response)
|
| 71 |
-
yield output
|
| 72 |
-
return output
|
| 73 |
|
| 74 |
-
additional_inputs=[
|
| 75 |
-
gr.Textbox(
|
| 76 |
-
label="System Prompt",
|
| 77 |
-
max_lines=1,
|
| 78 |
-
interactive=True,
|
| 79 |
-
),
|
| 80 |
-
gr.Slider(
|
| 81 |
-
label="Temperature",
|
| 82 |
-
value=0.2,
|
| 83 |
-
minimum=0.0,
|
| 84 |
-
maximum=1.0,
|
| 85 |
-
step=0.05,
|
| 86 |
-
interactive=True,
|
| 87 |
-
info="Higher values produce more diverse outputs",
|
| 88 |
-
),
|
| 89 |
-
gr.Slider(
|
| 90 |
-
label="Max new tokens",
|
| 91 |
-
value=512,
|
| 92 |
-
minimum=0,
|
| 93 |
-
maximum=1048,
|
| 94 |
-
step=64,
|
| 95 |
-
interactive=True,
|
| 96 |
-
info="The maximum numbers of new tokens",
|
| 97 |
-
),
|
| 98 |
-
gr.Slider(
|
| 99 |
-
label="Top-p (nucleus sampling)",
|
| 100 |
-
value=0.95,
|
| 101 |
-
minimum=0.0,
|
| 102 |
-
maximum=1,
|
| 103 |
-
step=0.05,
|
| 104 |
-
interactive=True,
|
| 105 |
-
info="Higher values sample more low-probability tokens",
|
| 106 |
-
),
|
| 107 |
-
gr.Slider(
|
| 108 |
-
label="Repetition penalty",
|
| 109 |
-
value=1,
|
| 110 |
-
minimum=1.0,
|
| 111 |
-
maximum=2.0,
|
| 112 |
-
step=0.05,
|
| 113 |
-
interactive=True,
|
| 114 |
-
info="Penalize repeated tokens",
|
| 115 |
-
)
|
| 116 |
-
]
|
| 117 |
-
|
| 118 |
-
mychatbot = gr.Chatbot(
|
| 119 |
-
avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=False)
|
| 120 |
|
| 121 |
demo = gr.ChatInterface(fn=generate,
|
| 122 |
-
chatbot=mychatbot,
|
| 123 |
-
additional_inputs=additional_inputs,
|
| 124 |
title="Kamran's Mixtral 8x7b Chat",
|
| 125 |
retry_btn=None,
|
| 126 |
undo_btn=None
|
|
|
|
| 1 |
+
import kminterface
|
|
|
|
| 2 |
import requests
|
| 3 |
import json
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
def tokenize(text):
|
| 7 |
return text
|
|
|
|
| 16 |
return prompt
|
| 17 |
|
| 18 |
def generate(prompt, history, system_prompt, temperature=0.2, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
| 19 |
+
print(type(history), history)
|
| 20 |
temperature = float(temperature)
|
| 21 |
if temperature < 1e-2:
|
| 22 |
temperature = 1e-2
|
|
|
|
| 33 |
|
| 34 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
| 35 |
|
| 36 |
+
stream = kminterface.client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 37 |
output = ""
|
| 38 |
|
| 39 |
for response in stream:
|
|
|
|
| 42 |
yield output
|
| 43 |
return output
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
demo = gr.ChatInterface(fn=generate,
|
| 48 |
+
chatbot=kminterface.mychatbot,
|
| 49 |
+
additional_inputs=kminterface.additional_inputs,
|
| 50 |
title="Kamran's Mixtral 8x7b Chat",
|
| 51 |
retry_btn=None,
|
| 52 |
undo_btn=None
|