Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,6 +12,7 @@ import datetime
|
|
| 12 |
#from query import tasks
|
| 13 |
from prompts import (
|
| 14 |
FINDER,
|
|
|
|
| 15 |
COMPRESS_HISTORY_PROMPT,
|
| 16 |
COMPRESS_DATA_PROMPT,
|
| 17 |
COMPRESS_DATA_PROMPT_SMALL,
|
|
@@ -164,6 +165,71 @@ def compress_data(c,purpose, task, history, result):
|
|
| 164 |
history = "result: {}\n".format(resp)
|
| 165 |
return history
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
|
| 168 |
|
| 169 |
|
|
@@ -392,6 +458,7 @@ with gr.Blocks() as app:
|
|
| 392 |
opt=gr.Dropdown(label="Choices",choices=examples,allow_custom_value=True, value="Start a new game", interactive=True)
|
| 393 |
#prompt=gr.Textbox(label = "Prompt", value="Start a new game")
|
| 394 |
with gr.Column(scale=2):
|
|
|
|
| 395 |
rand = gr.Checkbox(label="Random", value=True)
|
| 396 |
seed=gr.Slider(label="Seed", minimum=1, maximum=99999999999, value=rand_val)
|
| 397 |
#models_dd=gr.Dropdown(choices=[m for m in return_list],interactive=True)
|
|
@@ -402,11 +469,15 @@ with gr.Blocks() as app:
|
|
| 402 |
with gr.Row():
|
| 403 |
tokens = gr.Slider(label="Max new tokens",value=2096,minimum=0,maximum=1048*10,step=64,interactive=False, visible=False,info="The maximum numbers of new tokens")
|
| 404 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
| 405 |
char_stats=gr.Textbox()
|
| 406 |
-
|
| 407 |
#text=gr.JSON()
|
| 408 |
#inp_query.change(search_models,inp_query,models_dd)
|
| 409 |
#test_b=test_btn.click(itt,url,e_box)
|
|
|
|
|
|
|
| 410 |
clear_btn.click(clear_fn,None,[opt,chatbot])
|
| 411 |
#go=button.click(check_rand,[rand,seed],seed).then(run,[opt,chatbot,tokens,char_stats,seed],[chatbot,char_stats,json_out,opt])
|
| 412 |
go=button.click(check_rand,[rand,seed],seed).then(run,[opt,chatbot],[chatbot])
|
|
|
|
| 12 |
#from query import tasks
|
| 13 |
from prompts import (
|
| 14 |
FINDER,
|
| 15 |
+
SAVE_MEMORY,
|
| 16 |
COMPRESS_HISTORY_PROMPT,
|
| 17 |
COMPRESS_DATA_PROMPT,
|
| 18 |
COMPRESS_DATA_PROMPT_SMALL,
|
|
|
|
| 165 |
history = "result: {}\n".format(resp)
|
| 166 |
return history
|
| 167 |
|
| 168 |
+
|
| 169 |
+
def save_memory(purpose, history):
|
| 170 |
+
history=str(history)
|
| 171 |
+
c=0
|
| 172 |
+
out = str(out)
|
| 173 |
+
rl = len(out)
|
| 174 |
+
print(f'rl:: {rl}')
|
| 175 |
+
for i in str(out):
|
| 176 |
+
if i == " " or i=="," or i=="\n" or i=="/" or i=="." or i=="<":
|
| 177 |
+
c +=1
|
| 178 |
+
print (f'c:: {c}')
|
| 179 |
+
|
| 180 |
+
seed=random.randint(1,1000000000)
|
| 181 |
+
|
| 182 |
+
print (c)
|
| 183 |
+
#tot=len(purpose)
|
| 184 |
+
#print(tot)
|
| 185 |
+
divr=int(c)/MAX_DATA
|
| 186 |
+
divi=int(divr)+1 if divr != int(divr) else int(divr)
|
| 187 |
+
chunk = int(int(c)/divr)
|
| 188 |
+
print(f'chunk:: {chunk}')
|
| 189 |
+
print(f'divr:: {divr}')
|
| 190 |
+
print (f'divi:: {divi}')
|
| 191 |
+
out = []
|
| 192 |
+
#out=""
|
| 193 |
+
s=0
|
| 194 |
+
e=chunk
|
| 195 |
+
print(f'e:: {e}')
|
| 196 |
+
new_history=""
|
| 197 |
+
task = f'Compile this data to fulfill the task: {task}, and complete the purpose: {purpose}\n'
|
| 198 |
+
for z in range(divi):
|
| 199 |
+
print(f's:e :: {s}:{e}')
|
| 200 |
+
|
| 201 |
+
hist = history[s:e]
|
| 202 |
+
|
| 203 |
+
resp = run_gpt(
|
| 204 |
+
SAVE_MEMORY,
|
| 205 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
| 206 |
+
max_tokens=2048,
|
| 207 |
+
seed=seed,
|
| 208 |
+
purpose=purpose,
|
| 209 |
+
task=task,
|
| 210 |
+
knowledge=new_history,
|
| 211 |
+
history=hist,
|
| 212 |
+
).strip('\n')
|
| 213 |
+
new_history = resp
|
| 214 |
+
print (resp)
|
| 215 |
+
out+=resp
|
| 216 |
+
e=e+chunk
|
| 217 |
+
s=s+chunk
|
| 218 |
+
'''
|
| 219 |
+
resp = run_gpt(
|
| 220 |
+
COMPRESS_DATA_PROMPT,
|
| 221 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
| 222 |
+
max_tokens=2048,
|
| 223 |
+
seed=seed,
|
| 224 |
+
purpose=purpose,
|
| 225 |
+
task=task,
|
| 226 |
+
knowledge=new_history,
|
| 227 |
+
history=result,
|
| 228 |
+
)
|
| 229 |
+
'''
|
| 230 |
+
print ("final" + resp)
|
| 231 |
+
#history = "result: {}\n".format(resp)
|
| 232 |
+
return history
|
| 233 |
|
| 234 |
|
| 235 |
|
|
|
|
| 458 |
opt=gr.Dropdown(label="Choices",choices=examples,allow_custom_value=True, value="Start a new game", interactive=True)
|
| 459 |
#prompt=gr.Textbox(label = "Prompt", value="Start a new game")
|
| 460 |
with gr.Column(scale=2):
|
| 461 |
+
|
| 462 |
rand = gr.Checkbox(label="Random", value=True)
|
| 463 |
seed=gr.Slider(label="Seed", minimum=1, maximum=99999999999, value=rand_val)
|
| 464 |
#models_dd=gr.Dropdown(choices=[m for m in return_list],interactive=True)
|
|
|
|
| 469 |
with gr.Row():
|
| 470 |
tokens = gr.Slider(label="Max new tokens",value=2096,minimum=0,maximum=1048*10,step=64,interactive=False, visible=False,info="The maximum numbers of new tokens")
|
| 471 |
with gr.Column(scale=1):
|
| 472 |
+
save_btn=gr.Button("Save Memory")
|
| 473 |
+
snap_btn=gr.Button("Take Screenshot")
|
| 474 |
char_stats=gr.Textbox()
|
| 475 |
+
json_out=gr.JSON()
|
| 476 |
#text=gr.JSON()
|
| 477 |
#inp_query.change(search_models,inp_query,models_dd)
|
| 478 |
#test_b=test_btn.click(itt,url,e_box)
|
| 479 |
+
|
| 480 |
+
save_btn.click(save_memory,[opt,chatbot],json_out)
|
| 481 |
clear_btn.click(clear_fn,None,[opt,chatbot])
|
| 482 |
#go=button.click(check_rand,[rand,seed],seed).then(run,[opt,chatbot,tokens,char_stats,seed],[chatbot,char_stats,json_out,opt])
|
| 483 |
go=button.click(check_rand,[rand,seed],seed).then(run,[opt,chatbot],[chatbot])
|