File size: 5,735 Bytes
b9d3df6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed57d5d
b9d3df6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import os
import json
import datetime
# Import Path for robust file handling
from pathlib import Path 
from openai import OpenAI
from dotenv import load_dotenv

# --- Configuration & Initialization ---
# Load environment variables from .env file
load_dotenv()
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")

# OpenRouter client setup
client = OpenAI(
    base_url="https://openrouter.ai/api/v1",
    api_key=OPENROUTER_API_KEY,
)

# --- CORRECTED PATH HANDLING ---
# The root of the project is two levels up from 'app/core_logic.py'
PROJECT_ROOT = Path(__file__).resolve().parent.parent

# Use Path objects for reliable access
MASTER_PROMPT_PATH = PROJECT_ROOT / "data" / "master_prompt.json"
FEEDBACK_LOG_PATH = PROJECT_ROOT / "data" / "feedback_log.json"

# --- CORRECTED LLM Model IDs ---
# Swapping to a confirmed free model ID on OpenRouter
TASK_LLM_MODEL = "x-ai/grok-4-fast" 
# Keeping the Meta-LLM for later use
META_LLM_MODEL = "x-ai/grok-4-fast" 


def load_master_prompt():
    """Loads the current system message from the master configuration file."""
    try:
        #  Using Path.read_text() is clean and safe
        return json.loads(MASTER_PROMPT_PATH.read_text())
    except FileNotFoundError:
        # Create the file with initial data if it doesn't exist
        initial_data = {
            "system_message": "Error: Master prompt file not found.",
            "model_name": TASK_LLM_MODEL,
            "version": "0.0.0",
            "last_updated": datetime.datetime.now().isoformat()
        }
        with open(MASTER_PROMPT_PATH, 'w') as f:
             json.dump(initial_data, f, indent=4)
        return initial_data
    except Exception as e:
        print(f"Error loading master prompt: {e}")
        return {"system_message": f"CRITICAL ERROR: {e}"}


def rewrite_and_generate(user_input: str) -> tuple[str, str]:
    """

    1. Loads the system prompt.

    2. Sends the combined prompt to the Task-LLM via OpenRouter.

    3. Returns the rewritten prompt (for user visibility) and the final response.

    """
    if not user_input:
        return "Please enter a question.", "---"

    config = load_master_prompt()
    system_message = config.get("system_message", "")
    
    # The Meta-Prompt for the LLM to structure its own output
    rewrite_instruction = (
        "TASK: First, rewrite the following VAGUE_INPUT into a highly specific and professional instruction (the 'OPTIMIZED PROMPT'). "
        "The optimized prompt MUST include a clear Persona, the desired Output Format, and clear Constraints. "
        "Second, provide the final answer based on the OPTIMIZED PROMPT. "
        "Format your output strictly as: \n\n<OPTIMIZED_PROMPT>\n\n<FINAL_RESPONSE>"
    )

    full_prompt = f"VAGUE_INPUT: {user_input}"

    messages = [
        {"role": "system", "content": system_message + " " + rewrite_instruction},
        {"role": "user", "content": full_prompt}
    ]

    try:
        response = client.chat.completions.create(
            model=TASK_LLM_MODEL,
            messages=messages,
            temperature=0.7,
            max_tokens=2048,
        )
        
        raw_output = response.choices[0].message.content.strip()
        
        if "\n\n" in raw_output:
            # We use .split("\n\n", 1) to ensure we only split on the first occurrence
            optimized_prompt, final_response = raw_output.split("\n\n", 1)
        else:
            optimized_prompt = "--- (Parsing Error: Could not separate prompt and response. The LLM output might not follow the required format.)"
            final_response = raw_output

        return optimized_prompt.strip(), final_response.strip()

    except Exception as e:
        return (
            f"ERROR: Failed to connect to OpenRouter API. Check key/internet. Details: {e}",
            "---"
        )


def log_feedback(

    original_prompt: str,

    optimized_prompt: str,

    final_response: str,

    rating: int

):
    """Logs the interaction data and user feedback to the JSON log file."""
    
    new_entry = {
        "timestamp": datetime.datetime.now().isoformat(),
        "original_prompt": original_prompt,
        "optimized_prompt": optimized_prompt,
        "final_response": final_response,
        "rating": rating
    }

    try:
        # 1. Load existing data
        if FEEDBACK_LOG_PATH.exists():
            with open(FEEDBACK_LOG_PATH, 'r') as f:
                # Handle case where file is empty (e.g., just '[]')
                content = f.read().strip()
                data = json.loads(content) if content else []
        else:
            data = []
        
        # 2. Append new entry and write back
        data.append(new_entry)
        
        with open(FEEDBACK_LOG_PATH, 'w') as f:
            json.dump(data, f, indent=4)
        
        return "Feedback Logged successfully! Thank you."
    
    except Exception as e:
        return f"ERROR: Could not log feedback to JSON file. Details: {e}"

# If you run this file directly, it runs a quick test:
if __name__ == "__main__":
    print("--- Testing Core Logic ---")
    
    # Check loading
    config = load_master_prompt()
    print(f"Current System Message: {config.get('system_message', 'N/A')}")
    
    # Test generation
    opt_prompt, response = rewrite_and_generate("Tell me about the biggest planet in our solar system.")
    print("\n[Optimized Prompt]:\n", opt_prompt)
    print("\n[Final Response]:\n", response)

    # Test logging
    log_status = log_feedback("Vague Test", opt_prompt, response, 1)
    print("\n[Log Status]:", log_status)