Spaces:
Runtime error
Runtime error
fix: extract prediction label for accurate weight management in consensus calculation
Browse files- app_mcp.py +4 -3
app_mcp.py
CHANGED
|
@@ -326,11 +326,12 @@ def predict_image_with_json(img, confidence_threshold, augment_methods, rotate_d
|
|
| 326 |
|
| 327 |
for model_id, prediction in model_predictions_raw.items(): # Use raw predictions for weighting
|
| 328 |
# Ensure the prediction label is valid for weighted_predictions
|
| 329 |
-
|
| 330 |
-
|
|
|
|
| 331 |
else:
|
| 332 |
# Handle cases where prediction might be an error or unexpected label
|
| 333 |
-
logger.warning(f"Unexpected prediction label '{
|
| 334 |
|
| 335 |
final_prediction_label = "UNCERTAIN"
|
| 336 |
if weighted_predictions["AI"] > weighted_predictions["REAL"] and weighted_predictions["AI"] > weighted_predictions["UNCERTAIN"]:
|
|
|
|
| 326 |
|
| 327 |
for model_id, prediction in model_predictions_raw.items(): # Use raw predictions for weighting
|
| 328 |
# Ensure the prediction label is valid for weighted_predictions
|
| 329 |
+
prediction_label = prediction.get("Label") # Extract the label
|
| 330 |
+
if prediction_label in weighted_predictions:
|
| 331 |
+
weighted_predictions[prediction_label] += adjusted_weights[model_id]
|
| 332 |
else:
|
| 333 |
# Handle cases where prediction might be an error or unexpected label
|
| 334 |
+
logger.warning(f"Unexpected prediction label '{prediction_label}' from model '{model_id}'. Skipping its weight in consensus.")
|
| 335 |
|
| 336 |
final_prediction_label = "UNCERTAIN"
|
| 337 |
if weighted_predictions["AI"] > weighted_predictions["REAL"] and weighted_predictions["AI"] > weighted_predictions["UNCERTAIN"]:
|