MegaTronX commited on
Commit
c1a6fab
·
verified ·
1 Parent(s): 8361a08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -82
app.py CHANGED
@@ -5,90 +5,80 @@ import spaces
5
 
6
  @spaces.GPU(duration=120)
7
  def process_code_generation(ai_response: str):
8
- import json
9
- import re
10
- from huggingface_hub import hf_hub_download
11
- from llama_cpp import Llama
12
- from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
- from llama_cpp_agent.providers import LlamaCppPythonProvider
14
-
15
- # Download the model (cached if already exists)
16
- model_path = hf_hub_download(
17
- repo_id="tHottie/NeuralDaredevil-8B-abliterated-Q4_K_M-GGUF",
18
- filename="neuraldaredevil-8b-abliterated-q4_k_m-imat.gguf",
19
- local_dir="./models"
20
- )
21
-
22
- # Initialize LLaMA model (CPU-safe)
23
- llm = Llama(
24
- model_path=model_path,
25
- n_gpu_layers=0, # IMPORTANT: ZeroGPU uses CPU under the hood
26
- n_batch=512,
27
- n_ctx=4096,
28
- verbose=True
29
- )
30
- provider = LlamaCppPythonProvider(llm)
31
- agent = LlamaCppAgent(
32
- provider,
33
- system_prompt="You are an AI code parser. Given a pasted AI response with multiple code files, extract the individual files and output a JSON object like {\"file.py\": \"...\", \"utils.py\": \"...\"}.",
34
- predefined_messages_formatter_type=MessagesFormatterType.GEMMA_2,
35
- debug_output=False
36
- )
37
-
38
- # Prompt to interpret the response
39
- user_prompt = f"""
40
- Given the following AI output that includes multiple code files, extract and convert it into a JSON object that maps filenames to code contents.
41
-
42
- Example output:
43
- {{
44
- "app.py": "print('Hello World')",
45
- "utils/helper.py": "def greet(): return 'hi'"
46
- }}
47
-
48
- AI Output:
49
  {ai_response}
50
  """
51
-
52
- full_response = ""
53
- for chunk in agent.get_chat_response(user_prompt, returns_streaming_generator=True):
54
- full_response += chunk
55
-
56
- # Try to parse as JSON
57
- try:
58
- parsed = json.loads(full_response)
59
- except Exception:
60
- # Fallback regex parsing if model output wasn't pure JSON
61
- pattern = r"Filename:\s*(?P<fname>[\w\.\-/]+)\s*```(?:[a-zA-Z0-9]*)\n(?P<code>[\s\S]*?)```"
62
- parsed = {}
63
- for m in re.finditer(pattern, full_response):
64
- parsed[m.group("fname").strip()] = m.group("code")
65
-
66
- if not parsed:
67
- parsed = {"output.txt": full_response}
68
-
69
- # Write files and zip them
70
- import os
71
- import zipfile
72
- import uuid
73
-
74
- temp_dir = f"/tmp/code_files_{uuid.uuid4().hex}"
75
- os.makedirs(temp_dir, exist_ok=True)
76
-
77
- for filename, content in parsed.items():
78
- file_path = os.path.join(temp_dir, filename)
79
- os.makedirs(os.path.dirname(file_path), exist_ok=True)
80
- with open(file_path, "w", encoding="utf-8") as f:
81
- f.write(content)
82
-
83
- zip_path = f"/tmp/code_bundle_{uuid.uuid4().hex}.zip"
84
- with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
85
- for root, _, files in os.walk(temp_dir):
86
- for file in files:
87
- full_path = os.path.join(root, file)
88
- arcname = os.path.relpath(full_path, start=temp_dir)
89
- zipf.write(full_path, arcname=arcname)
90
-
91
- return zip_path
92
 
93
 
94
  with gr.Blocks() as demo:
 
5
 
6
  @spaces.GPU(duration=120)
7
  def process_code_generation(ai_response: str):
8
+ try:
9
+ import json
10
+ import re
11
+ from huggingface_hub import hf_hub_download
12
+ from llama_cpp import Llama
13
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
14
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
15
+ import os
16
+ import zipfile
17
+ import uuid
18
+
19
+ model_path = hf_hub_download(
20
+ repo_id="tHottie/NeuralDaredevil-8B-abliterated-Q4_K_M-GGUF",
21
+ filename="neuraldaredevil-8b-abliterated-q4_k_m-imat.gguf",
22
+ local_dir="./models"
23
+ )
24
+
25
+ llm = Llama(
26
+ model_path=model_path,
27
+ n_gpu_layers=0,
28
+ n_batch=512,
29
+ n_ctx=4096,
30
+ verbose=True
31
+ )
32
+
33
+ provider = LlamaCppPythonProvider(llm)
34
+ agent = LlamaCppAgent(
35
+ provider,
36
+ system_prompt="You are an AI code parser. Given a pasted AI response with multiple code files, output a JSON like {\"file.py\": \"code\"}.",
37
+ predefined_messages_formatter_type=MessagesFormatterType.GEMMA_2,
38
+ debug_output=False
39
+ )
40
+
41
+ prompt = f"""Here is an AI output with multiple files. Convert it to a JSON object mapping filenames to code.
42
+
 
 
 
 
 
 
43
  {ai_response}
44
  """
45
+ full_response = ""
46
+ for chunk in agent.get_chat_response(prompt, returns_streaming_generator=True):
47
+ full_response += chunk
48
+
49
+ try:
50
+ parsed = json.loads(full_response)
51
+ except Exception as e:
52
+ print("JSON parse failed:", e)
53
+ parsed = {}
54
+ pattern = r"Filename:\s*(?P<fname>[\w\.\-/]+)\s*```(?:[a-zA-Z0-9]*)\n(?P<code>[\s\S]*?)```"
55
+ for m in re.finditer(pattern, full_response):
56
+ parsed[m.group("fname").strip()] = m.group("code")
57
+ if not parsed:
58
+ parsed = {"output.txt": full_response}
59
+
60
+ temp_dir = f"/tmp/code_files_{uuid.uuid4().hex}"
61
+ os.makedirs(temp_dir, exist_ok=True)
62
+
63
+ for fname, content in parsed.items():
64
+ file_path = os.path.join(temp_dir, fname)
65
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
66
+ with open(file_path, "w", encoding="utf-8") as f:
67
+ f.write(content)
68
+
69
+ zip_path = f"/tmp/code_bundle_{uuid.uuid4().hex}.zip"
70
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
71
+ for root, _, files in os.walk(temp_dir):
72
+ for file in files:
73
+ full_path = os.path.join(root, file)
74
+ arcname = os.path.relpath(full_path, temp_dir)
75
+ zf.write(full_path, arcname)
76
+
77
+ return zip_path
78
+
79
+ except Exception as err:
80
+ print("🔥 Error in process_code_generation():", err)
81
+ raise err
 
 
 
 
82
 
83
 
84
  with gr.Blocks() as demo: