Spaces:
Running
Running
Commit
·
2b6608e
1
Parent(s):
68ec808
Feat: Added status_log.txt for MLOps observability and tracking.
Browse files- data/status_log.txt +0 -0
- scripts/optimize_prompt.py +33 -9
data/status_log.txt
ADDED
|
File without changes
|
scripts/optimize_prompt.py
CHANGED
|
@@ -14,7 +14,7 @@ PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
|
| 14 |
# File Paths
|
| 15 |
MASTER_PROMPT_PATH = PROJECT_ROOT / "data" / "master_prompt.json"
|
| 16 |
FEEDBACK_LOG_PATH = PROJECT_ROOT / "data" / "feedback_log.json"
|
| 17 |
-
|
| 18 |
# LLM Model ID for Rewriting (Meta-LLM)
|
| 19 |
META_LLM_MODEL = "x-ai/grok-4-fast"
|
| 20 |
# The minimum number of negative feedback entries required to trigger an update
|
|
@@ -26,19 +26,21 @@ def load_data(path: Path):
|
|
| 26 |
try:
|
| 27 |
if path.exists():
|
| 28 |
content = path.read_text().strip()
|
| 29 |
-
|
|
|
|
| 30 |
if not content:
|
| 31 |
return []
|
| 32 |
|
| 33 |
data = json.loads(content)
|
| 34 |
-
|
|
|
|
| 35 |
if path == MASTER_PROMPT_PATH and not isinstance(data, dict):
|
| 36 |
return {}
|
| 37 |
if path == FEEDBACK_LOG_PATH and not isinstance(data, list):
|
| 38 |
return []
|
| 39 |
return data
|
| 40 |
|
| 41 |
-
# Create initial
|
| 42 |
if path == MASTER_PROMPT_PATH:
|
| 43 |
return {"system_message": "A critical error occurred.", "version": "1.0.0", "last_updated": datetime.datetime.now().isoformat()}
|
| 44 |
return []
|
|
@@ -127,18 +129,21 @@ def optimize_system_prompt(current_system_message: str, feedback_summary: str) -
|
|
| 127 |
def increment_version(version_str: str) -> str:
|
| 128 |
"""Safely increments the minor version (Y in X.Y.Z) of a version string."""
|
| 129 |
try:
|
|
|
|
| 130 |
parts = version_str.split('.')
|
| 131 |
if len(parts) < 2:
|
| 132 |
-
return "1.0.0"
|
| 133 |
|
| 134 |
# Increment the second part (the minor version)
|
| 135 |
new_minor_version = int(parts[1]) + 1
|
| 136 |
|
| 137 |
-
#
|
| 138 |
-
|
|
|
|
|
|
|
| 139 |
|
| 140 |
except Exception:
|
| 141 |
-
# If any part fails (
|
| 142 |
return "1.0.0"
|
| 143 |
|
| 144 |
|
|
@@ -153,6 +158,8 @@ def run_optimization():
|
|
| 153 |
|
| 154 |
if not feedback_data:
|
| 155 |
print("INFO: Feedback log is empty. Exiting optimization.")
|
|
|
|
|
|
|
| 156 |
return
|
| 157 |
|
| 158 |
# 2. Aggregate Feedback
|
|
@@ -160,6 +167,7 @@ def run_optimization():
|
|
| 160 |
|
| 161 |
if feedback_summary is None:
|
| 162 |
# Optimization was skipped because not enough negative feedback was found
|
|
|
|
| 163 |
return
|
| 164 |
|
| 165 |
# 3. Optimize Prompt
|
|
@@ -182,9 +190,25 @@ def run_optimization():
|
|
| 182 |
with open(FEEDBACK_LOG_PATH, 'w') as f:
|
| 183 |
json.dump([], f)
|
| 184 |
print("Feedback log cleared.")
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
| 186 |
else:
|
| 187 |
print("\nINFO: No significant change or API error. Master prompt remains the same.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
|
| 190 |
if __name__ == '__main__':
|
|
|
|
| 14 |
# File Paths
|
| 15 |
MASTER_PROMPT_PATH = PROJECT_ROOT / "data" / "master_prompt.json"
|
| 16 |
FEEDBACK_LOG_PATH = PROJECT_ROOT / "data" / "feedback_log.json"
|
| 17 |
+
STATUS_LOG_PATH = PROJECT_ROOT / "data" / "status_log.txt"
|
| 18 |
# LLM Model ID for Rewriting (Meta-LLM)
|
| 19 |
META_LLM_MODEL = "x-ai/grok-4-fast"
|
| 20 |
# The minimum number of negative feedback entries required to trigger an update
|
|
|
|
| 26 |
try:
|
| 27 |
if path.exists():
|
| 28 |
content = path.read_text().strip()
|
| 29 |
+
|
| 30 |
+
# Handle empty content
|
| 31 |
if not content:
|
| 32 |
return []
|
| 33 |
|
| 34 |
data = json.loads(content)
|
| 35 |
+
|
| 36 |
+
# Ensure correct type based on file (MASTER_PROMPT is dict, FEEDBACK_LOG is list)
|
| 37 |
if path == MASTER_PROMPT_PATH and not isinstance(data, dict):
|
| 38 |
return {}
|
| 39 |
if path == FEEDBACK_LOG_PATH and not isinstance(data, list):
|
| 40 |
return []
|
| 41 |
return data
|
| 42 |
|
| 43 |
+
# Create initial state if file doesn't exist
|
| 44 |
if path == MASTER_PROMPT_PATH:
|
| 45 |
return {"system_message": "A critical error occurred.", "version": "1.0.0", "last_updated": datetime.datetime.now().isoformat()}
|
| 46 |
return []
|
|
|
|
| 129 |
def increment_version(version_str: str) -> str:
|
| 130 |
"""Safely increments the minor version (Y in X.Y.Z) of a version string."""
|
| 131 |
try:
|
| 132 |
+
# Assumes format X.Y.Z
|
| 133 |
parts = version_str.split('.')
|
| 134 |
if len(parts) < 2:
|
| 135 |
+
return "1.0.0"
|
| 136 |
|
| 137 |
# Increment the second part (the minor version)
|
| 138 |
new_minor_version = int(parts[1]) + 1
|
| 139 |
|
| 140 |
+
# Rebuild the version string
|
| 141 |
+
# Uses parts[2:] for safety, handles missing Z value if needed
|
| 142 |
+
new_parts = [parts[0], str(new_minor_version)] + parts[2:]
|
| 143 |
+
return ".".join(new_parts)
|
| 144 |
|
| 145 |
except Exception:
|
| 146 |
+
# If any part fails (non-integer, etc.), reset to a safe, known state.
|
| 147 |
return "1.0.0"
|
| 148 |
|
| 149 |
|
|
|
|
| 158 |
|
| 159 |
if not feedback_data:
|
| 160 |
print("INFO: Feedback log is empty. Exiting optimization.")
|
| 161 |
+
# Update the status log to reflect that the scheduled job ran but was skipped.
|
| 162 |
+
update_status_log("MLOps Optimization skipped (No new feedback).")
|
| 163 |
return
|
| 164 |
|
| 165 |
# 2. Aggregate Feedback
|
|
|
|
| 167 |
|
| 168 |
if feedback_summary is None:
|
| 169 |
# Optimization was skipped because not enough negative feedback was found
|
| 170 |
+
update_status_log("MLOps Optimization skipped (Insufficient negative feedback).")
|
| 171 |
return
|
| 172 |
|
| 173 |
# 3. Optimize Prompt
|
|
|
|
| 190 |
with open(FEEDBACK_LOG_PATH, 'w') as f:
|
| 191 |
json.dump([], f)
|
| 192 |
print("Feedback log cleared.")
|
| 193 |
+
|
| 194 |
+
# 7. Update the Status Log (Rewrites the file with new timestamp)
|
| 195 |
+
update_status_log(f"MLOps Prompt Optimization deployed successfully. New version: {current_config['version']}")
|
| 196 |
+
|
| 197 |
else:
|
| 198 |
print("\nINFO: No significant change or API error. Master prompt remains the same.")
|
| 199 |
+
update_status_log("MLOps Optimization failed to deploy (API error or no meaningful change detected).")
|
| 200 |
+
|
| 201 |
+
def update_status_log(status_message: str):
|
| 202 |
+
"""Writes the current status to the status log file, overwriting the previous entry."""
|
| 203 |
+
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S IST")
|
| 204 |
+
log_content = f"[{current_time}] {status_message}"
|
| 205 |
+
|
| 206 |
+
try:
|
| 207 |
+
with open(STATUS_LOG_PATH, 'w') as f:
|
| 208 |
+
f.write(log_content)
|
| 209 |
+
print(f"Status log updated: {log_content}")
|
| 210 |
+
except Exception as e:
|
| 211 |
+
print(f"CRITICAL ERROR: Failed to write to status log: {e}")
|
| 212 |
|
| 213 |
|
| 214 |
if __name__ == '__main__':
|