Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,480 +1,304 @@
|
|
| 1 |
import os
|
| 2 |
-
import sys
|
| 3 |
import subprocess
|
| 4 |
-
import
|
| 5 |
-
import json
|
| 6 |
-
from io import StringIO
|
| 7 |
-
from typing import Dict, List
|
| 8 |
-
import streamlit as st
|
| 9 |
-
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
| 10 |
-
from pylint import lint
|
| 11 |
from huggingface_hub import InferenceClient
|
| 12 |
import gradio as gr
|
| 13 |
-
import
|
| 14 |
-
import
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
)
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
for user_prompt, bot_response in history:
|
| 22 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
| 23 |
-
prompt += f" {bot_response}</s> "
|
| 24 |
-
prompt += f"[INST] {message} [/INST]"
|
| 25 |
-
return prompt
|
| 26 |
-
|
| 27 |
-
# Replace st.secrets with os.environ
|
| 28 |
-
hf_token = os.environ.get("huggingface_token")
|
| 29 |
-
|
| 30 |
-
if not hf_token:
|
| 31 |
-
st.error("Hugging Face API key not found. Please set the HUGGINGFACE_API_KEY environment variable.")
|
| 32 |
-
st.stop()
|
| 33 |
-
|
| 34 |
-
# Global state to manage communication between Tool Box and Workspace Chat App
|
| 35 |
-
if "chat_history" not in st.session_state:
|
| 36 |
-
st.session_state.chat_history = []
|
| 37 |
-
if "terminal_history" not in st.session_state:
|
| 38 |
-
st.session_state.terminal_history = []
|
| 39 |
-
if "workspace_projects" not in st.session_state:
|
| 40 |
-
st.session_state.workspace_projects = {}
|
| 41 |
-
|
| 42 |
-
# Load pre-trained RAG retriever
|
| 43 |
-
rag_retriever = pipeline("text-generation", model="mistralai/Mixtral-8x7B-v0.1")
|
| 44 |
-
|
| 45 |
-
# Load pre-trained chat model
|
| 46 |
-
chat_model = AutoModelForSeq2SeqLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
| 47 |
-
|
| 48 |
-
# Load tokenizer
|
| 49 |
-
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
| 50 |
-
|
| 51 |
-
def process_input(user_input: str) -> str:
|
| 52 |
-
# Input pipeline: Tokenize and preprocess user input
|
| 53 |
-
input_ids = tokenizer(user_input, return_tensors="pt").input_ids
|
| 54 |
-
attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
|
| 55 |
-
|
| 56 |
-
# RAG model: Generate response
|
| 57 |
-
with torch.no_grad():
|
| 58 |
-
output = rag_retriever(input_ids, attention_mask=attention_mask)
|
| 59 |
-
response = output.generator_outputs[0].sequences[0]
|
| 60 |
-
|
| 61 |
-
# Chat model: Refine response
|
| 62 |
-
chat_input = tokenizer(response, return_tensors="pt")
|
| 63 |
-
chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
|
| 64 |
-
chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
|
| 65 |
-
with torch.no_grad():
|
| 66 |
-
chat_output = chat_model(**chat_input)
|
| 67 |
-
refined_response = chat_output.sequences[0]
|
| 68 |
-
|
| 69 |
-
# Output pipeline: Return final response
|
| 70 |
-
return refined_response
|
| 71 |
-
|
| 72 |
-
class AIAgent:
|
| 73 |
-
def __init__(self, name: str, description: str, skills: List[str], hf_api=None):
|
| 74 |
-
self.name = name
|
| 75 |
-
self.description = description
|
| 76 |
-
self.skills = skills
|
| 77 |
-
self._hf_api = hf_api
|
| 78 |
-
self._hf_token = hf_token
|
| 79 |
-
|
| 80 |
-
@property
|
| 81 |
-
def hf_api(self):
|
| 82 |
-
if not self._hf_api and self.has_valid_hf_token():
|
| 83 |
-
self._hf_api = HfApi(token=self._hf_token)
|
| 84 |
-
return self._hf_api
|
| 85 |
-
|
| 86 |
-
def has_valid_hf_token(self):
|
| 87 |
-
return bool(self._hf_token)
|
| 88 |
-
|
| 89 |
-
async def autonomous_build(self, chat_history: List[str], workspace_projects: Dict[str, str], project_name: str, selected_model: str):
|
| 90 |
-
# Continuation of previous methods
|
| 91 |
-
summary = "Chat History:\n" + "\n".join(chat_history)
|
| 92 |
-
summary += "\n\nWorkspace Projects:\n" + "\n".join(workspace_projects.items())
|
| 93 |
-
|
| 94 |
-
# Analyze chat history and workspace projects to suggest actions
|
| 95 |
-
# Example:
|
| 96 |
-
# - Check if the user has requested to create a new file
|
| 97 |
-
# - Check if the user has requested to install a package
|
| 98 |
-
# - Check if the user has requested to run a command
|
| 99 |
-
# - Check if the user has requested to generate code
|
| 100 |
-
# - Check if the user has requested to translate code
|
| 101 |
-
# - Check if the user has requested to summarize text
|
| 102 |
-
|
| 103 |
-
# Generate a response based on the analysis
|
| 104 |
-
next_step = "Based on the current state, the next logical step is to implement the main application logic."
|
| 105 |
|
| 106 |
-
|
| 107 |
-
project_path = os.path.join(PROJECT_ROOT, project_name)
|
| 108 |
-
if not os.path.exists(project_path):
|
| 109 |
-
os.makedirs(project_path)
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
if not os.path.exists(requirements_file):
|
| 114 |
-
with open(requirements_file, "w") as f:
|
| 115 |
-
f.write("# Add your project's dependencies here\n")
|
| 116 |
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
return None
|
| 169 |
-
|
| 170 |
-
def create_agent_from_text(name: str, text: str) -> str:
|
| 171 |
-
skills = text.split("\n")
|
| 172 |
-
agent = AIAgent(name, "AI agent created from text input.", skills)
|
| 173 |
-
save_agent_to_file(agent)
|
| 174 |
-
return agent.create_agent_prompt()
|
| 175 |
-
|
| 176 |
-
def chat_interface_with_agent(input_text: str, agent_name: str) -> str:
|
| 177 |
-
agent_prompt = load_agent_prompt(agent_name)
|
| 178 |
-
if agent_prompt is None:
|
| 179 |
-
return f"Agent {agent_name} not found."
|
| 180 |
-
|
| 181 |
-
model_name = ""
|
| 182 |
try:
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
except Exception as e:
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
try:
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
summarizer = pipeline("summarization")
|
| 222 |
-
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
|
| 223 |
-
return summary[0]['summary_text']
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
def translate_code(code: str, source_language: str, target_language: str) -> str:
|
| 227 |
-
# Use a Hugging Face translation model instead of OpenAI
|
| 228 |
-
# Example: English to Spanish
|
| 229 |
-
translator = pipeline(
|
| 230 |
-
"translation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 231 |
-
translated_code = translator(code, target_lang=target_language)[0]['translation_text']
|
| 232 |
-
return translated_code
|
| 233 |
-
|
| 234 |
-
def generate_code(code_idea: str, model_name: str) -> str:
|
| 235 |
-
"""Generates code using the selected model."""
|
| 236 |
-
try:
|
| 237 |
-
generator = pipeline('text-generation', model=model_name)
|
| 238 |
-
generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
|
| 239 |
-
return generated_code
|
| 240 |
except Exception as e:
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
if
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
"gitignore_template": "web",
|
| 276 |
-
"default_branch": "main",
|
| 277 |
-
"archived": False,
|
| 278 |
-
"files": []
|
| 279 |
-
}
|
| 280 |
-
for filename, contents in files.items():
|
| 281 |
-
data = {
|
| 282 |
-
"content": contents,
|
| 283 |
-
"path": filename,
|
| 284 |
-
"encoding": "utf-8",
|
| 285 |
-
"mode": "overwrite"
|
| 286 |
-
}
|
| 287 |
-
payload["files"].append(data)
|
| 288 |
-
response = requests.post(url, json=payload, headers=headers)
|
| 289 |
-
response.raise_for_status()
|
| 290 |
-
location = response.headers.get("Location")
|
| 291 |
-
# wait_for_processing(location, api) # You might need to implement this if it's not already defined
|
| 292 |
-
|
| 293 |
-
return Repository(name=name, api=api)
|
| 294 |
-
|
| 295 |
-
# Streamlit App
|
| 296 |
-
st.title("AI Agent Creator")
|
| 297 |
-
|
| 298 |
-
# Sidebar navigation
|
| 299 |
-
st.sidebar.title("Navigation")
|
| 300 |
-
app_mode = st.sidebar.selectbox(
|
| 301 |
-
"Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
|
| 302 |
-
|
| 303 |
-
if app_mode == "AI Agent Creator":
|
| 304 |
-
# AI Agent Creator
|
| 305 |
-
st.header("Create an AI Agent from Text")
|
| 306 |
-
|
| 307 |
-
st.subheader("From Text")
|
| 308 |
-
agent_name = st.text_input("Enter agent name:")
|
| 309 |
-
text_input = st.text_area("Enter skills (one per line):")
|
| 310 |
-
if st.button("Create Agent"):
|
| 311 |
-
agent_prompt = create_agent_from_text(agent_name, text_input)
|
| 312 |
-
st.success(f"Agent '{agent_name}' created and saved successfully.")
|
| 313 |
-
st.session_state.available_agents.append(agent_name)
|
| 314 |
-
|
| 315 |
-
elif app_mode == "Tool Box":
|
| 316 |
-
# Tool Box
|
| 317 |
-
st.header("AI-Powered Tools")
|
| 318 |
-
|
| 319 |
-
# Chat Interface
|
| 320 |
-
st.subheader("Chat with CodeCraft")
|
| 321 |
-
chat_input = st.text_area("Enter your message:")
|
| 322 |
-
if st.button("Send"):
|
| 323 |
-
chat_response = chat_interface(chat_input)
|
| 324 |
-
st.session_state.chat_history.append((chat_input, chat_response))
|
| 325 |
-
st.write(f"CodeCraft: {chat_response}")
|
| 326 |
-
|
| 327 |
-
# Terminal Interface
|
| 328 |
-
st.subheader("Terminal")
|
| 329 |
-
terminal_input = st.text_input("Enter a command:")
|
| 330 |
-
if st.button("Run"):
|
| 331 |
-
terminal_output = terminal_interface(terminal_input)
|
| 332 |
-
st.session_state.terminal_history.append(
|
| 333 |
-
(terminal_input, terminal_output))
|
| 334 |
-
st.code(terminal_output, language="bash")
|
| 335 |
-
|
| 336 |
-
# Code Editor Interface
|
| 337 |
-
st.subheader("Code Editor")
|
| 338 |
-
code_editor = st.text_area("Write your code:", height=300)
|
| 339 |
-
if st.button("Format & Lint"):
|
| 340 |
-
formatted_code, lint_message = code_editor_interface(code_editor)
|
| 341 |
-
st.code(formatted_code, language="python")
|
| 342 |
-
st.info(lint_message)
|
| 343 |
-
|
| 344 |
-
# Text Summarization Tool
|
| 345 |
-
st.subheader("Summarize Text")
|
| 346 |
-
text_to_summarize = st.text_area("Enter text to summarize:")
|
| 347 |
-
if st.button("Summarize"):
|
| 348 |
-
summary = summarize_text(text_to_summarize)
|
| 349 |
-
st.write(f"Summary: {summary}")
|
| 350 |
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
st.write(f"{selected_agent}: {agent_chat_response}")
|
| 450 |
-
|
| 451 |
-
# Code Generation
|
| 452 |
-
st.subheader("Code Generation")
|
| 453 |
-
code_idea = st.text_input("Enter your code idea:")
|
| 454 |
-
|
| 455 |
-
# Model Selection Menu
|
| 456 |
-
selected_model = st.selectbox(
|
| 457 |
-
"Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
|
| 458 |
-
|
| 459 |
-
if st.button("Generate Code"):
|
| 460 |
-
generated_code = generate_code(code_idea, selected_model)
|
| 461 |
-
st.code(generated_code, language="python")
|
| 462 |
-
|
| 463 |
-
# Automate Build Process
|
| 464 |
-
st.subheader("Automate Build Process")
|
| 465 |
-
if st.button("Automate"):
|
| 466 |
-
# Load the agent without skills for now
|
| 467 |
-
agent = AIAgent(selected_agent, "", [])
|
| 468 |
-
summary, next_step = agent.autonomous_build(
|
| 469 |
-
st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model)
|
| 470 |
-
st.write("Autonomous Build Summary:")
|
| 471 |
-
st.write(summary)
|
| 472 |
-
st.write("Next Step:")
|
| 473 |
-
st.write(next_step)
|
| 474 |
-
|
| 475 |
-
# If everything went well, proceed to deploy the Space
|
| 476 |
-
if agent._hf_api and agent.has_valid_hf_token():
|
| 477 |
-
agent.deploy_built_space_to_hf()
|
| 478 |
-
# Use the hf_token to interact with the Hugging Face API
|
| 479 |
-
api = HfApi(token="hf_token") # Function to create a Space on Hugging Face
|
| 480 |
-
create_space_on_hugging_face(api, agent.name, agent.description, True, get_built_space_files())
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
import subprocess
|
| 3 |
+
import random
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
import gradio as gr
|
| 6 |
+
from safe_search import safe_search
|
| 7 |
+
from i_search import google
|
| 8 |
+
from i_search import i_search as i_s
|
| 9 |
+
from agent import (
|
| 10 |
+
ACTION_PROMPT,
|
| 11 |
+
ADD_PROMPT,
|
| 12 |
+
COMPRESS_HISTORY_PROMPT,
|
| 13 |
+
LOG_PROMPT,
|
| 14 |
+
LOG_RESPONSE,
|
| 15 |
+
MODIFY_PROMPT,
|
| 16 |
+
PREFIX,
|
| 17 |
+
SEARCH_QUERY,
|
| 18 |
+
READ_PROMPT,
|
| 19 |
+
TASK_PROMPT,
|
| 20 |
+
UNDERSTAND_TEST_RESULTS_PROMPT,
|
| 21 |
)
|
| 22 |
+
from utils import parse_action, parse_file_content, read_python_module_structure
|
| 23 |
+
from datetime import datetime
|
| 24 |
|
| 25 |
+
now = datetime.now()
|
| 26 |
+
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
VERBOSE = True
|
| 31 |
+
MAX_HISTORY = 100
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
def format_prompt(message, history):
|
| 34 |
+
prompt = "<s>"
|
| 35 |
+
for user_prompt, bot_response in history:
|
| 36 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
| 37 |
+
prompt += f" {bot_response}</s> "
|
| 38 |
+
prompt += f"[INST] {message} [/INST]"
|
| 39 |
+
return prompt
|
| 40 |
+
|
| 41 |
+
def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
|
| 42 |
+
seed = random.randint(1, 1111111111111111)
|
| 43 |
+
print(seed)
|
| 44 |
+
generate_kwargs = dict(
|
| 45 |
+
temperature=1.0,
|
| 46 |
+
max_new_tokens=2096,
|
| 47 |
+
top_p=0.99,
|
| 48 |
+
repetition_penalty=1.0,
|
| 49 |
+
do_sample=True,
|
| 50 |
+
seed=seed,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
content = PREFIX.format(
|
| 54 |
+
date_time_str=date_time_str,
|
| 55 |
+
purpose=purpose,
|
| 56 |
+
safe_search=safe_search,
|
| 57 |
+
) + prompt_template.format(**prompt_kwargs)
|
| 58 |
+
if VERBOSE:
|
| 59 |
+
print(LOG_PROMPT.format(content))
|
| 60 |
+
|
| 61 |
+
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 62 |
+
resp = ""
|
| 63 |
+
for response in stream:
|
| 64 |
+
resp += response.token.text
|
| 65 |
+
|
| 66 |
+
if VERBOSE:
|
| 67 |
+
print(LOG_RESPONSE.format(resp))
|
| 68 |
+
return resp
|
| 69 |
+
|
| 70 |
+
def compress_history(purpose, task, history, directory):
|
| 71 |
+
resp = run_gpt(
|
| 72 |
+
COMPRESS_HISTORY_PROMPT,
|
| 73 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
| 74 |
+
max_tokens=512,
|
| 75 |
+
purpose=purpose,
|
| 76 |
+
task=task,
|
| 77 |
+
history=history,
|
| 78 |
+
)
|
| 79 |
+
history = "observation: {}\n".format(resp)
|
| 80 |
+
return history
|
| 81 |
+
|
| 82 |
+
def call_search(purpose, task, history, directory, action_input):
|
| 83 |
+
print("CALLING SEARCH")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
try:
|
| 85 |
+
if "http" in action_input:
|
| 86 |
+
if "<" in action_input:
|
| 87 |
+
action_input = action_input.strip("<")
|
| 88 |
+
if ">" in action_input:
|
| 89 |
+
action_input = action_input.strip(">")
|
| 90 |
+
response = i_s(action_input)
|
| 91 |
+
print(response)
|
| 92 |
+
history += "observation: search result is: {}\n".format(response)
|
| 93 |
+
else:
|
| 94 |
+
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
|
| 95 |
except Exception as e:
|
| 96 |
+
history += "observation: {}'\n".format(e)
|
| 97 |
+
return "MAIN", None, history, task
|
| 98 |
+
|
| 99 |
+
def call_main(purpose, task, history, directory, action_input):
|
| 100 |
+
resp = run_gpt(
|
| 101 |
+
ACTION_PROMPT,
|
| 102 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
| 103 |
+
max_tokens=2096,
|
| 104 |
+
purpose=purpose,
|
| 105 |
+
task=task,
|
| 106 |
+
history=history,
|
| 107 |
+
)
|
| 108 |
+
lines = resp.strip().strip("\n").split("\n")
|
| 109 |
+
for line in lines:
|
| 110 |
+
if line == "":
|
| 111 |
+
continue
|
| 112 |
+
if line.startswith("thought: "):
|
| 113 |
+
history += "{}\n".format(line)
|
| 114 |
+
elif line.startswith("action: "):
|
| 115 |
+
action_name, action_input = parse_action(line)
|
| 116 |
+
print(f'ACTION_NAME :: {action_name}')
|
| 117 |
+
print(f'ACTION_INPUT :: {action_input}')
|
| 118 |
+
history += "{}\n".format(line)
|
| 119 |
+
if "COMPLETE" in action_name or "COMPLETE" in action_input:
|
| 120 |
+
task = "END"
|
| 121 |
+
return action_name, action_input, history, task
|
| 122 |
+
else:
|
| 123 |
+
return action_name, action_input, history, task
|
| 124 |
+
else:
|
| 125 |
+
history += "{}\n".format(line)
|
| 126 |
+
return "MAIN", None, history, task
|
| 127 |
+
|
| 128 |
+
def call_set_task(purpose, task, history, directory, action_input):
|
| 129 |
+
task = run_gpt(
|
| 130 |
+
TASK_PROMPT,
|
| 131 |
+
stop_tokens=[],
|
| 132 |
+
max_tokens=64,
|
| 133 |
+
purpose=purpose,
|
| 134 |
+
task=task,
|
| 135 |
+
history=history,
|
| 136 |
+
).strip("\n")
|
| 137 |
+
history += "observation: task has been updated to: {}\n".format(task)
|
| 138 |
+
return "MAIN", None, history, task
|
| 139 |
+
|
| 140 |
+
def end_fn(purpose, task, history, directory, action_input):
|
| 141 |
+
task = "END"
|
| 142 |
+
return "COMPLETE", "COMPLETE", history, task
|
| 143 |
+
|
| 144 |
+
NAME_TO_FUNC = {
|
| 145 |
+
"MAIN": call_main,
|
| 146 |
+
"UPDATE-TASK": call_set_task,
|
| 147 |
+
"SEARCH": call_search,
|
| 148 |
+
"COMPLETE": end_fn,
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
def run_action(purpose, task, history, directory, action_name, action_input):
|
| 152 |
+
print(f'action_name::{action_name}')
|
| 153 |
try:
|
| 154 |
+
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
| 155 |
+
action_name = "COMPLETE"
|
| 156 |
+
task = "END"
|
| 157 |
+
return action_name, "COMPLETE", history, task
|
| 158 |
+
|
| 159 |
+
if len(history.split("\n")) > MAX_HISTORY:
|
| 160 |
+
if VERBOSE:
|
| 161 |
+
print("COMPRESSING HISTORY")
|
| 162 |
+
history = compress_history(purpose, task, history, directory)
|
| 163 |
+
if not action_name in NAME_TO_FUNC:
|
| 164 |
+
action_name = "MAIN"
|
| 165 |
+
if action_name == "" or action_name is None:
|
| 166 |
+
action_name = "MAIN"
|
| 167 |
+
assert action_name in NAME_TO_FUNC
|
| 168 |
+
|
| 169 |
+
print("RUN: ", action_name, action_input)
|
| 170 |
+
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
except Exception as e:
|
| 172 |
+
history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
|
| 173 |
+
return "MAIN", None, history, task
|
| 174 |
+
|
| 175 |
+
def run(purpose, history):
|
| 176 |
+
task = None
|
| 177 |
+
directory = "./"
|
| 178 |
+
if history:
|
| 179 |
+
history = str(history).strip("[]")
|
| 180 |
+
if not history:
|
| 181 |
+
history = ""
|
| 182 |
+
|
| 183 |
+
action_name = "UPDATE-TASK" if task is None else "MAIN"
|
| 184 |
+
action_input = None
|
| 185 |
+
while True:
|
| 186 |
+
print("")
|
| 187 |
+
print("")
|
| 188 |
+
print("---")
|
| 189 |
+
print("purpose:", purpose)
|
| 190 |
+
print("task:", task)
|
| 191 |
+
print("---")
|
| 192 |
+
print(history)
|
| 193 |
+
print("---")
|
| 194 |
+
|
| 195 |
+
action_name, action_input, history, task = run_action(
|
| 196 |
+
purpose,
|
| 197 |
+
task,
|
| 198 |
+
history,
|
| 199 |
+
directory,
|
| 200 |
+
action_name,
|
| 201 |
+
action_input,
|
| 202 |
+
)
|
| 203 |
+
yield (history)
|
| 204 |
+
if task == "END":
|
| 205 |
+
return (history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
+
def format_prompt(message, history):
|
| 208 |
+
prompt = "<s>"
|
| 209 |
+
for user_prompt, bot_response in history:
|
| 210 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
| 211 |
+
prompt += f" {bot_response}</s> "
|
| 212 |
+
prompt += f"[INST] {message} [/INST]"
|
| 213 |
+
return prompt
|
| 214 |
+
|
| 215 |
+
agents = [
|
| 216 |
+
"WEB_DEV",
|
| 217 |
+
agents = [
|
| 218 |
+
"WEB_DEV",
|
| 219 |
+
"AI_SYSTEM_PROMPT",
|
| 220 |
+
"PYTHON_CODE_DEV"
|
| 221 |
+
]
|
| 222 |
+
|
| 223 |
+
def generate(
|
| 224 |
+
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
| 225 |
+
):
|
| 226 |
+
seed = random.randint(1, 1111111111111111)
|
| 227 |
+
|
| 228 |
+
agent = prompts.WEB_DEV
|
| 229 |
+
if agent_name == "WEB_DEV":
|
| 230 |
+
agent = prompts.WEB_DEV
|
| 231 |
+
if agent_name == "AI_SYSTEM_PROMPT":
|
| 232 |
+
agent = prompts.AI_SYSTEM_PROMPT
|
| 233 |
+
if agent_name == "PYTHON_CODE_DEV":
|
| 234 |
+
agent = prompts.PYTHON_CODE_DEV
|
| 235 |
+
system_prompt = agent
|
| 236 |
+
temperature = float(temperature)
|
| 237 |
+
if temperature < 1e-2:
|
| 238 |
+
temperature = 1e-2
|
| 239 |
+
top_p = float(top_p)
|
| 240 |
+
|
| 241 |
+
generate_kwargs = dict(
|
| 242 |
+
temperature=temperature,
|
| 243 |
+
max_new_tokens=max_new_tokens,
|
| 244 |
+
top_p=top_p,
|
| 245 |
+
repetition_penalty=repetition_penalty,
|
| 246 |
+
do_sample=True,
|
| 247 |
+
seed=seed,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
| 251 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 252 |
+
output = ""
|
| 253 |
+
|
| 254 |
+
for response in stream:
|
| 255 |
+
output += response.token.text
|
| 256 |
+
yield output
|
| 257 |
+
return output
|
| 258 |
+
|
| 259 |
+
additional_inputs = [
|
| 260 |
+
gr.Dropdown(
|
| 261 |
+
label="Agents",
|
| 262 |
+
choices=[s for s in agents],
|
| 263 |
+
value=agents[0],
|
| 264 |
+
interactive=True,
|
| 265 |
+
),
|
| 266 |
+
gr.Textbox(
|
| 267 |
+
label="System Prompt",
|
| 268 |
+
max_lines=1,
|
| 269 |
+
interactive=True,
|
| 270 |
+
),
|
| 271 |
+
gr.Slider(
|
| 272 |
+
label="Temperature",
|
| 273 |
+
value=0.9,
|
| 274 |
+
minimum=0.0,
|
| 275 |
+
maximum=1.0,
|
| 276 |
+
step=0.05,
|
| 277 |
+
interactive=True,
|
| 278 |
+
info="Higher values produce more diverse outputs",
|
| 279 |
+
),
|
| 280 |
+
gr.Slider(
|
| 281 |
+
label="Max new tokens",
|
| 282 |
+
value=1048 * 10,
|
| 283 |
+
minimum=0,
|
| 284 |
+
maximum=1048 * 10,
|
| 285 |
+
step=64,
|
| 286 |
+
interactive=True,
|
| 287 |
+
info="The maximum numbers of new tokens",
|
| 288 |
+
),
|
| 289 |
+
gr.Slider(
|
| 290 |
+
label="Top-p (nucleus sampling)",
|
| 291 |
+
value=0.90,
|
| 292 |
+
minimum=0.0,
|
| 293 |
+
maximum=1,
|
| 294 |
+
step=0.05,
|
| 295 |
+
interactive=True,
|
| 296 |
+
info="Higher values sample more low-probability tokens",
|
| 297 |
+
),
|
| 298 |
+
gr.Slider(
|
| 299 |
+
label="Repetition penalty",
|
| 300 |
+
value=1.2,
|
| 301 |
+
minimum=1.0,
|
| 302 |
+
maximum=2.0,
|
| 303 |
+
step=0.05,
|
| 304 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|