Spaces:
Sleeping
Sleeping
Commit
·
e228074
1
Parent(s):
d353085
Update run.py
Browse files
run.py
CHANGED
|
@@ -1,82 +1,50 @@
|
|
| 1 |
#############################################################################
|
| 2 |
-
# Title: Gradio Interface to AI hosted
|
| 3 |
# Author: Andreas Fischer
|
| 4 |
# Date: October 7th, 2023
|
| 5 |
-
# Last update: December
|
| 6 |
#############################################################################
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
import requests
|
| 10 |
-
import
|
| 11 |
import json
|
| 12 |
|
| 13 |
-
def
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
if(model=="SauerkrautLM-7B"):
|
| 21 |
-
url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
|
| 22 |
-
if(prompt_type=="Default"): prompt_type="Vicuna (German)"
|
| 23 |
-
if(model=="WizardLM-13B"):
|
| 24 |
-
url="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
|
| 25 |
-
if(prompt_type=="Default"): prompt_type="Vicuna"
|
| 26 |
-
if(model=="OpenHermes2-7B"):
|
| 27 |
-
url="https://AFischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
|
| 28 |
-
if(prompt_type=="Default"): prompt_type="ChatML"
|
| 29 |
-
if(model=="CollectiveCognition-7B"):
|
| 30 |
-
url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
|
| 31 |
-
if(prompt_type=="Default"): prompt_type="ChatML"
|
| 32 |
-
|
| 33 |
-
if(prompt_type=="ChatML"):
|
| 34 |
-
body={"prompt":"<|im_start|>system\nYou are a helpful AI-Assistant.<|im_end|>\n<|im_start|>user\n"+message+"<|im_end|>\n<|im_start|>assistant\n","max_tokens":1000,"stop":"<|im_end|>","echo":"False","stream":True}
|
| 35 |
-
if(prompt_type=="ChatML (German)"):
|
| 36 |
-
body={"prompt":"<|im_start|>system\nu bist ein großes Sprachmodell, das höflich und kompetent antwortet. Schreibe deine Gedanken Schritt für Schritt auf, um Probleme sinnvoll zu lösen.<|im_end|>\n<|im_start|>user\n"+message+"<|im_end|>\n<|im_start|>assistant\n","max_tokens":1000,"stop":"User:","echo":"False","stream":True}
|
| 37 |
-
if(prompt_type=="Alpaca"):
|
| 38 |
-
body={"prompt":"###Instruction:\n"+message+"\n\n###Response:\n","max_tokens":1000,"stop":"###","echo":"False","stream":True}
|
| 39 |
-
if(prompt_type=="Vicuna"):
|
| 40 |
-
body={"prompt":"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: "+message+" ASSISTANT:","max_tokens":1000,"stop":"USER:","echo":"False","stream":"True"}
|
| 41 |
-
if(prompt_type=="Vicuna (German)"):
|
| 42 |
-
body={"prompt":"Ein Chat zwischen einem Benutzer und einem KI-Assistenten. Der KI-Assistent gibt hilfreiche, detaillierte und höfliche Antworten.\nUser: "+message+"\nAssistant: ","max_tokens":1000,"stop":"User:","echo":"False","stream":True}
|
| 43 |
-
if(verbose==True):
|
| 44 |
-
print("model: "+model+"\n"+"URL: "+url+"\n"+"prompt_type: "+prompt_type+"\n"+"message: "+message+"\n"+"body: "+str(body)+"\n")
|
| 45 |
-
return([url,body,model,prompt_type])
|
| 46 |
-
|
| 47 |
-
def response(message, history, model, prompt_type):
|
| 48 |
-
print(model)
|
| 49 |
-
[url,body,model,prompt_type]=specifications(message,model,prompt_type,verbose=True)
|
| 50 |
-
response=""
|
| 51 |
-
buffer=""
|
| 52 |
print("URL: "+url)
|
|
|
|
| 53 |
print("User: "+message+"\nAI: ")
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
if(
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
if(
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
#
|
| 80 |
-
#os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')
|
| 81 |
|
| 82 |
|
|
|
|
| 1 |
#############################################################################
|
| 2 |
+
# Title: Gradio Interface to AI hosted by Huggingface
|
| 3 |
# Author: Andreas Fischer
|
| 4 |
# Date: October 7th, 2023
|
| 5 |
+
# Last update: December 19th, 2023
|
| 6 |
#############################################################################
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
import requests
|
| 10 |
+
import time
|
| 11 |
import json
|
| 12 |
|
| 13 |
+
def response(message, history, model):
|
| 14 |
+
if(model=="Default"): model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 15 |
+
model_id = model
|
| 16 |
+
params={"max_length":500, "return_full_text":False} #, "stream":True
|
| 17 |
+
url = f"https://api-inference.huggingface.co/models/{model_id}"
|
| 18 |
+
correction=1
|
| 19 |
+
prompt=f"[INST] {message} [/INST]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
print("URL: "+url)
|
| 21 |
+
print(params)
|
| 22 |
print("User: "+message+"\nAI: ")
|
| 23 |
+
response=""
|
| 24 |
+
for text in requests.post(url, json={"inputs":prompt, "parameters":params}, stream=True):
|
| 25 |
+
text=text.decode('UTF-8')
|
| 26 |
+
print(text)
|
| 27 |
+
if(correction==3):
|
| 28 |
+
text='"}]'+text
|
| 29 |
+
correction=2
|
| 30 |
+
if(correction==1):
|
| 31 |
+
text=text.lstrip('[{"generated_text":"')
|
| 32 |
+
correction=2
|
| 33 |
+
if(text.endswith('"}]')):
|
| 34 |
+
text=text.rstrip('"}]')
|
| 35 |
+
correction=3
|
| 36 |
+
response=response+text
|
| 37 |
+
print(response)
|
| 38 |
+
time.sleep(0.2)
|
| 39 |
+
yield response
|
| 40 |
+
|
| 41 |
+
x=requests.get(f"https://api-inference.huggingface.co/framework/text-generation-inference")
|
| 42 |
+
x=[i["model_id"] for i in x.json()]
|
| 43 |
+
x.insert(0,"Default")
|
| 44 |
+
print(x)
|
| 45 |
+
|
| 46 |
+
gr.ChatInterface(
|
| 47 |
+
response,
|
| 48 |
+
additional_inputs=[gr.Dropdown(x,value="Default",label="Model")]).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
|
|
|
|
| 49 |
|
| 50 |
|