Spaces:
Paused
Paused
Create agent.py
Browse files
agent.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import random
|
| 4 |
+
from huggingface_hub import InferenceClient
|
| 5 |
+
import gradio as gr
|
| 6 |
+
from safe_search import safe_search
|
| 7 |
+
from i_search import google
|
| 8 |
+
from i_search import i_search as i_s
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from utils import parse_action, parse_file_content, read_python_module_structure
|
| 11 |
+
|
| 12 |
+
now = datetime.now()
|
| 13 |
+
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
| 14 |
+
|
| 15 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 16 |
+
|
| 17 |
+
VERBOSE = True
|
| 18 |
+
MAX_HISTORY = 100
|
| 19 |
+
|
| 20 |
+
# Prompts
|
| 21 |
+
ACTION_PROMPT = "action prompt"
|
| 22 |
+
ADD_PROMPT = "add prompt"
|
| 23 |
+
COMPRESS_HISTORY_PROMPT = "compress history prompt"
|
| 24 |
+
LOG_PROMPT = "log prompt"
|
| 25 |
+
LOG_RESPONSE = "log response"
|
| 26 |
+
MODIFY_PROMPT = "modify prompt"
|
| 27 |
+
PREFIX = "prefix"
|
| 28 |
+
SEARCH_QUERY = "search query"
|
| 29 |
+
READ_PROMPT = "read prompt"
|
| 30 |
+
TASK_PROMPT = "task prompt"
|
| 31 |
+
UNDERSTAND_TEST_RESULTS_PROMPT = "understand test results prompt"
|
| 32 |
+
|
| 33 |
+
def format_prompt(message, history):
|
| 34 |
+
prompt = "\n### Instruction:\n{}\n### History:\n{}".format(message, '\n'.join(history))
|
| 35 |
+
return prompt
|
| 36 |
+
|
| 37 |
+
def run_agent(instruction, history):
|
| 38 |
+
prompt = format_prompt(instruction, history)
|
| 39 |
+
response = ""
|
| 40 |
+
for chunk in generate(prompt, history[-MAX_HISTORY:], temperature=0.7):
|
| 41 |
+
response += chunk
|
| 42 |
+
if "\n\n### Instruction:" in chunk:
|
| 43 |
+
break
|
| 44 |
+
|
| 45 |
+
response_actions = []
|
| 46 |
+
for line in response.strip().split('\n'):
|
| 47 |
+
if line.startswith('action:'):
|
| 48 |
+
response_actions.append((line.replace('action: ', '')))
|
| 49 |
+
|
| 50 |
+
return response, response_actions
|
| 51 |
+
|
| 52 |
+
def generate(prompt, history, temperature):
|
| 53 |
+
seed = random.randint(1, 1111111111111111)
|
| 54 |
+
generate_kwargs = {
|
| 55 |
+
"temperature": temperature,
|
| 56 |
+
"max_new_tokens": 256,
|
| 57 |
+
"top_p": 0.95,
|
| 58 |
+
"repetition_penalty": 1.0,
|
| 59 |
+
"do_sample": True,
|
| 60 |
+
"seed": seed,
|
| 61 |
+
}
|
| 62 |
+
formatted_prompt = format_prompt(f"{prompt}", history)
|
| 63 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 64 |
+
output = ""
|
| 65 |
+
|
| 66 |
+
for response in stream:
|
| 67 |
+
output += response.token.text
|
| 68 |
+
yield output
|
| 69 |
+
|
| 70 |
+
def create_interface():
|
| 71 |
+
global MAX_HISTORY
|
| 72 |
+
|
| 73 |
+
block = gr.Blocks()
|
| 74 |
+
|
| 75 |
+
chatbot = gr.Chatbot()
|
| 76 |
+
with block.title("Expert Web Developer Assistant"):
|
| 77 |
+
with block.tab("Conversation"):
|
| 78 |
+
txt = gr.Textbox(show_label=False, placeholder="Type something...")
|
| 79 |
+
btn = gr.Button("Send", variant="primary")
|
| 80 |
+
|
| 81 |
+
txt.submit(run_agent, inputs=[txt, chatbot], outputs=[chatbot, None])
|
| 82 |
+
txt.clear(None, [txt, chatbot]).then(_clear_history, chatbot, _update_chatbot_styles)
|
| 83 |
+
btn.click(_clear_history, chatbot, _update_chatbot_styles)
|
| 84 |
+
|
| 85 |
+
with block.tab("Settings"):
|
| 86 |
+
MAX_HISTORY_slider = gr.Slider(minimum=1, maximum=100, step=1, label="Max history", value=MAX_HISTORY)
|
| 87 |
+
MAX_HISTORY_slider.change(lambda x: setattr(block, "MAX_HISTORY", int(x)), MAX_HISTORY_slider)
|
| 88 |
+
|
| 89 |
+
return block
|
| 90 |
+
|
| 91 |
+
def _update_chatbot_styles(history):
|
| 92 |
+
num_messages = sum([1 for item in history if isinstance(item, tuple)])
|
| 93 |
+
gr.Chatbot.update({"num_messages": num_messages})
|
| 94 |
+
|
| 95 |
+
def _clear_history(history):
|
| 96 |
+
return [], []
|
| 97 |
+
|
| 98 |
+
# Exportable functions and variables
|
| 99 |
+
__all__ = [
|
| 100 |
+
"run_agent",
|
| 101 |
+
"create_interface",
|
| 102 |
+
"format_prompt",
|
| 103 |
+
"generate",
|
| 104 |
+
"MAX_HISTORY",
|
| 105 |
+
"client",
|
| 106 |
+
"VERBOSE",
|
| 107 |
+
"date_time_str",
|
| 108 |
+
]
|