Update app.py
Browse files
app.py
CHANGED
|
@@ -110,8 +110,8 @@ def chat_inf_a(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 110 |
if len(client_choice)>=hid_val:
|
| 111 |
if system_prompt:
|
| 112 |
system_prompt=f'{system_prompt}, '
|
| 113 |
-
|
| 114 |
-
client1=gr.load("models/" + models[0])
|
| 115 |
if not history:
|
| 116 |
history = []
|
| 117 |
hist_len=0
|
|
@@ -125,7 +125,7 @@ def chat_inf_a(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 125 |
)
|
| 126 |
#formatted_prompt=prompt
|
| 127 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[0])
|
| 128 |
-
stream1 = client1
|
| 129 |
output = ""
|
| 130 |
for response in stream1:
|
| 131 |
output += response.token.text
|
|
@@ -155,7 +155,7 @@ def chat_inf_b(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 155 |
)
|
| 156 |
#formatted_prompt=prompt
|
| 157 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[1])
|
| 158 |
-
stream2 = client2
|
| 159 |
output = ""
|
| 160 |
for response in stream2:
|
| 161 |
output += response.token.text
|
|
@@ -184,7 +184,7 @@ def chat_inf_c(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 184 |
)
|
| 185 |
#formatted_prompt=prompt
|
| 186 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[2])
|
| 187 |
-
stream3 = client3
|
| 188 |
output = ""
|
| 189 |
for response in stream3:
|
| 190 |
output += response.token.text
|
|
@@ -213,7 +213,7 @@ def chat_inf_d(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 213 |
)
|
| 214 |
#formatted_prompt=prompt
|
| 215 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[3])
|
| 216 |
-
stream4 = client4
|
| 217 |
output = ""
|
| 218 |
for response in stream4:
|
| 219 |
output += response.token.text
|
|
|
|
| 110 |
if len(client_choice)>=hid_val:
|
| 111 |
if system_prompt:
|
| 112 |
system_prompt=f'{system_prompt}, '
|
| 113 |
+
client1=client_z[int(hid_val)-1]
|
| 114 |
+
#client1=gr.load("models/" + models[0])
|
| 115 |
if not history:
|
| 116 |
history = []
|
| 117 |
hist_len=0
|
|
|
|
| 125 |
)
|
| 126 |
#formatted_prompt=prompt
|
| 127 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[0])
|
| 128 |
+
stream1 = client1(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 129 |
output = ""
|
| 130 |
for response in stream1:
|
| 131 |
output += response.token.text
|
|
|
|
| 155 |
)
|
| 156 |
#formatted_prompt=prompt
|
| 157 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[1])
|
| 158 |
+
stream2 = client2(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 159 |
output = ""
|
| 160 |
for response in stream2:
|
| 161 |
output += response.token.text
|
|
|
|
| 184 |
)
|
| 185 |
#formatted_prompt=prompt
|
| 186 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[2])
|
| 187 |
+
stream3 = client3(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 188 |
output = ""
|
| 189 |
for response in stream3:
|
| 190 |
output += response.token.text
|
|
|
|
| 213 |
)
|
| 214 |
#formatted_prompt=prompt
|
| 215 |
formatted_prompt = format_prompt_choose(f"{system_prompt}{prompt}", history, client_choice[3])
|
| 216 |
+
stream4 = client4(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 217 |
output = ""
|
| 218 |
for response in stream4:
|
| 219 |
output += response.token.text
|