Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -254,7 +254,6 @@ async def bulk_analyze(data: BulkReviewInput, token: str = Query(None)):
|
|
| 254 |
raise HTTPException(status_code=500, detail="Failed to analyze bulk reviews")
|
| 255 |
|
| 256 |
# Already set with os.environ β nothing else needed
|
| 257 |
-
|
| 258 |
@app.post("/rootcause/")
|
| 259 |
async def root_cause_analysis(payload: dict, x_api_key: str = Header(None)):
|
| 260 |
if x_api_key and x_api_key != VALID_API_KEY:
|
|
@@ -278,25 +277,38 @@ async def root_cause_analysis(payload: dict, x_api_key: str = Header(None)):
|
|
| 278 |
Suggestion: ...
|
| 279 |
"""
|
| 280 |
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
"cause": extract_line("Cause"),
|
| 298 |
-
"suggestion": extract_line("Suggestion")
|
| 299 |
-
}
|
| 300 |
|
| 301 |
except Exception as e:
|
| 302 |
logging.error(f"Root cause analysis failed: {traceback.format_exc()}")
|
|
|
|
| 254 |
raise HTTPException(status_code=500, detail="Failed to analyze bulk reviews")
|
| 255 |
|
| 256 |
# Already set with os.environ β nothing else needed
|
|
|
|
| 257 |
@app.post("/rootcause/")
|
| 258 |
async def root_cause_analysis(payload: dict, x_api_key: str = Header(None)):
|
| 259 |
if x_api_key and x_api_key != VALID_API_KEY:
|
|
|
|
| 277 |
Suggestion: ...
|
| 278 |
"""
|
| 279 |
|
| 280 |
+
# Models to try in order
|
| 281 |
+
models_to_try = ["gpt-4", "gpt-4o-mini", "gpt-3.5-turbo"]
|
| 282 |
+
|
| 283 |
+
last_error = None
|
| 284 |
+
for model_name in models_to_try:
|
| 285 |
+
try:
|
| 286 |
+
response = openai.chat.completions.create(
|
| 287 |
+
model=model_name,
|
| 288 |
+
messages=[{"role": "user", "content": prompt}]
|
| 289 |
+
)
|
| 290 |
+
output = response.choices[0].message.content
|
| 291 |
+
lines = output.splitlines()
|
| 292 |
+
|
| 293 |
+
def extract_line(tag):
|
| 294 |
+
for line in lines:
|
| 295 |
+
if line.lower().startswith(tag.lower()):
|
| 296 |
+
return line.split(":", 1)[-1].strip()
|
| 297 |
+
return "β"
|
| 298 |
+
|
| 299 |
+
return {
|
| 300 |
+
"problem": extract_line("Problem"),
|
| 301 |
+
"cause": extract_line("Cause"),
|
| 302 |
+
"suggestion": extract_line("Suggestion"),
|
| 303 |
+
"model_used": model_name
|
| 304 |
+
}
|
| 305 |
+
except Exception as e:
|
| 306 |
+
last_error = str(e)
|
| 307 |
+
logging.warning(f"Model {model_name} failed: {last_error}")
|
| 308 |
+
continue
|
| 309 |
|
| 310 |
+
# If all models fail
|
| 311 |
+
raise HTTPException(status_code=500, detail=f"All model attempts failed. Last error: {last_error}")
|
|
|
|
|
|
|
|
|
|
| 312 |
|
| 313 |
except Exception as e:
|
| 314 |
logging.error(f"Root cause analysis failed: {traceback.format_exc()}")
|