Spaces:
Runtime error
Runtime error
เทสสมการคำนวน CTR 001
Browse files
app.py
CHANGED
|
@@ -293,48 +293,211 @@ def removePreprocess(output, info):
|
|
| 293 |
return output
|
| 294 |
|
| 295 |
|
| 296 |
-
def
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
H = landmarks[94:]
|
| 301 |
RL = landmarks[0:44]
|
| 302 |
LL = landmarks[44:94]
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
|
| 311 |
-
def
|
| 312 |
-
"""
|
| 313 |
try:
|
| 314 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
edges = cv2.Canny((img * 255).astype(np.uint8), 50, 150)
|
|
|
|
| 316 |
|
| 317 |
-
#
|
| 318 |
-
|
| 319 |
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
|
|
|
| 323 |
rho, theta = line[0]
|
| 324 |
-
angle = np.degrees(theta) - 90
|
| 325 |
-
|
| 326 |
-
if abs(angle) < 30 or abs(angle) > 60:
|
| 327 |
angles.append(angle)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 328 |
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
|
| 335 |
return 0
|
|
|
|
| 336 |
except Exception as e:
|
| 337 |
-
print(f"Error in rotation detection: {e}")
|
| 338 |
return 0
|
| 339 |
|
| 340 |
def rotate_image(img, angle):
|
|
@@ -378,28 +541,40 @@ def segment(input_img):
|
|
| 378 |
original_img = cv2.imread(input_img, 0) / 255.0
|
| 379 |
original_shape = original_img.shape[:2]
|
| 380 |
|
| 381 |
-
# Step 1:
|
| 382 |
-
|
| 383 |
-
detected_rotation = 0 # Temporarily disabled
|
| 384 |
was_rotated = False
|
| 385 |
processing_img = original_img
|
| 386 |
-
|
| 387 |
-
# Step 2:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 388 |
img, (h, w, padding) = preprocess(processing_img)
|
| 389 |
|
| 390 |
-
# Step
|
| 391 |
data = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).to(device).float()
|
| 392 |
|
| 393 |
with torch.no_grad():
|
| 394 |
output = hybrid(data)[0].cpu().numpy().reshape(-1, 2)
|
| 395 |
|
| 396 |
-
# Step
|
| 397 |
output = removePreprocess(output, (h, w, padding))
|
| 398 |
|
| 399 |
-
# Step
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
output = output.astype('int')
|
| 401 |
|
| 402 |
-
# Step
|
| 403 |
outseg, corrected_data = drawOnTop(original_img, output, original_shape)
|
| 404 |
|
| 405 |
except Exception as e:
|
|
@@ -410,32 +585,47 @@ def segment(input_img):
|
|
| 410 |
seg_to_save = (outseg.copy() * 255).astype('uint8')
|
| 411 |
cv2.imwrite("tmp/overlap_segmentation.png", cv2.cvtColor(seg_to_save, cv2.COLOR_RGB2BGR))
|
| 412 |
|
| 413 |
-
|
|
|
|
|
|
|
|
|
|
| 414 |
|
| 415 |
-
#
|
| 416 |
-
|
| 417 |
-
if was_rotated:
|
| 418 |
-
rotation_warning = f" (🔄 Image was rotated {detected_rotation:.1f}° for AI processing)"
|
| 419 |
-
|
| 420 |
-
# Add remaining tilt warning (after AI processing correction)
|
| 421 |
-
tilt_warning = ""
|
| 422 |
-
if tilt_angle > 5:
|
| 423 |
-
tilt_warning = f" (⚠️ Remaining tilt: {tilt_angle:.1f}°)"
|
| 424 |
-
elif tilt_angle > 2:
|
| 425 |
-
tilt_warning = f" (Minor tilt: {tilt_angle:.1f}°)"
|
| 426 |
|
|
|
|
| 427 |
if ctr_value < 0.5:
|
| 428 |
-
|
| 429 |
-
elif 0.
|
| 430 |
-
|
| 431 |
elif 0.56 <= ctr_value <= 0.60:
|
| 432 |
-
|
| 433 |
elif ctr_value > 0.60:
|
| 434 |
-
|
| 435 |
else:
|
| 436 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 437 |
|
| 438 |
-
return outseg, "tmp/overlap_segmentation.png", ctr_value,
|
| 439 |
|
| 440 |
|
| 441 |
if __name__ == "__main__":
|
|
|
|
| 293 |
return output
|
| 294 |
|
| 295 |
|
| 296 |
+
def validate_landmarks_consistency(landmarks, original_landmarks, threshold=0.05):
|
| 297 |
+
"""Validate that corrected landmarks maintain anatomical consistency"""
|
| 298 |
+
try:
|
| 299 |
+
# Check if heart is still between lungs
|
|
|
|
| 300 |
RL = landmarks[0:44]
|
| 301 |
LL = landmarks[44:94]
|
| 302 |
+
H = landmarks[94:]
|
| 303 |
+
|
| 304 |
+
rl_center_x = np.mean(RL[:, 0])
|
| 305 |
+
ll_center_x = np.mean(LL[:, 0])
|
| 306 |
+
h_center_x = np.mean(H[:, 0])
|
| 307 |
+
|
| 308 |
+
# Heart should be between lung centers
|
| 309 |
+
if not (min(rl_center_x, ll_center_x) <= h_center_x <= max(rl_center_x, ll_center_x)):
|
| 310 |
+
print("Warning: Heart position validation failed")
|
| 311 |
+
return False
|
| 312 |
+
|
| 313 |
+
# Check if total change is reasonable
|
| 314 |
+
total_change = np.mean(np.linalg.norm(landmarks - original_landmarks, axis=1))
|
| 315 |
+
relative_change = total_change / np.mean(np.linalg.norm(original_landmarks, axis=1))
|
| 316 |
+
|
| 317 |
+
if relative_change > threshold:
|
| 318 |
+
print(f"Warning: Landmarks changed by {relative_change:.3f}, exceeds threshold {threshold}")
|
| 319 |
+
return False
|
| 320 |
+
|
| 321 |
+
return True
|
| 322 |
+
|
| 323 |
+
except Exception as e:
|
| 324 |
+
print(f"Error in landmark validation: {e}")
|
| 325 |
+
return False
|
| 326 |
|
| 327 |
+
def calculate_ctr_robust(landmarks, corrected_landmarks=None):
|
| 328 |
+
"""Calculate CTR with multiple validation steps"""
|
| 329 |
try:
|
| 330 |
+
original_landmarks = landmarks.copy()
|
| 331 |
+
|
| 332 |
+
if corrected_landmarks is not None:
|
| 333 |
+
RL, LL, H, tilt_angle = corrected_landmarks
|
| 334 |
+
|
| 335 |
+
# Validate correction
|
| 336 |
+
corrected_all = np.vstack([RL, LL, H])
|
| 337 |
+
if validate_landmarks_consistency(corrected_all, original_landmarks):
|
| 338 |
+
landmarks_to_use = corrected_all
|
| 339 |
+
correction_applied = True
|
| 340 |
+
else:
|
| 341 |
+
# Use original landmarks if validation fails
|
| 342 |
+
H = landmarks[94:]
|
| 343 |
+
RL = landmarks[0:44]
|
| 344 |
+
LL = landmarks[44:94]
|
| 345 |
+
landmarks_to_use = landmarks
|
| 346 |
+
correction_applied = False
|
| 347 |
+
tilt_angle = 0
|
| 348 |
+
else:
|
| 349 |
+
H = landmarks[94:]
|
| 350 |
+
RL = landmarks[0:44]
|
| 351 |
+
LL = landmarks[44:94]
|
| 352 |
+
landmarks_to_use = landmarks
|
| 353 |
+
tilt_angle = 0
|
| 354 |
+
correction_applied = False
|
| 355 |
+
|
| 356 |
+
# Method 1: Traditional width measurement
|
| 357 |
+
cardiac_width_1 = np.max(H[:, 0]) - np.min(H[:, 0])
|
| 358 |
+
thoracic_width_1 = max(np.max(RL[:, 0]), np.max(LL[:, 0])) - min(np.min(RL[:, 0]), np.min(LL[:, 0]))
|
| 359 |
+
|
| 360 |
+
# Method 2: Centroid-based measurement (more robust to outliers)
|
| 361 |
+
h_centroid = np.mean(H, axis=0)
|
| 362 |
+
rl_centroid = np.mean(RL, axis=0)
|
| 363 |
+
ll_centroid = np.mean(LL, axis=0)
|
| 364 |
+
|
| 365 |
+
# Find widest points from centroids
|
| 366 |
+
h_distances = np.linalg.norm(H - h_centroid, axis=1)
|
| 367 |
+
cardiac_width_2 = 2 * np.max(h_distances)
|
| 368 |
+
|
| 369 |
+
thoracic_width_2 = max(np.max(RL[:, 0]), np.max(LL[:, 0])) - min(np.min(RL[:, 0]), np.min(LL[:, 0]))
|
| 370 |
+
|
| 371 |
+
# Method 3: Percentile-based measurement (removes extreme outliers)
|
| 372 |
+
cardiac_x_coords = H[:, 0]
|
| 373 |
+
cardiac_width_3 = np.percentile(cardiac_x_coords, 95) - np.percentile(cardiac_x_coords, 5)
|
| 374 |
+
|
| 375 |
+
lung_x_coords = np.concatenate([RL[:, 0], LL[:, 0]])
|
| 376 |
+
thoracic_width_3 = np.percentile(lung_x_coords, 95) - np.percentile(lung_x_coords, 5)
|
| 377 |
+
|
| 378 |
+
# Calculate CTR for each method
|
| 379 |
+
ctr_1 = cardiac_width_1 / thoracic_width_1 if thoracic_width_1 > 0 else 0
|
| 380 |
+
ctr_2 = cardiac_width_2 / thoracic_width_2 if thoracic_width_2 > 0 else 0
|
| 381 |
+
ctr_3 = cardiac_width_3 / thoracic_width_3 if thoracic_width_3 > 0 else 0
|
| 382 |
+
|
| 383 |
+
# Validate consistency between methods
|
| 384 |
+
ctr_values = [ctr_1, ctr_2, ctr_3]
|
| 385 |
+
ctr_std = np.std(ctr_values)
|
| 386 |
+
|
| 387 |
+
if ctr_std > 0.05: # High variance between methods
|
| 388 |
+
print(f"Warning: CTR calculation methods show high variance (std: {ctr_std:.3f})")
|
| 389 |
+
confidence = "Low"
|
| 390 |
+
elif ctr_std > 0.02:
|
| 391 |
+
confidence = "Medium"
|
| 392 |
+
else:
|
| 393 |
+
confidence = "High"
|
| 394 |
+
|
| 395 |
+
# Use median of methods for final result
|
| 396 |
+
final_ctr = np.median(ctr_values)
|
| 397 |
+
|
| 398 |
+
return {
|
| 399 |
+
'ctr': round(final_ctr, 3),
|
| 400 |
+
'tilt_angle': abs(tilt_angle),
|
| 401 |
+
'correction_applied': correction_applied,
|
| 402 |
+
'confidence': confidence,
|
| 403 |
+
'method_variance': round(ctr_std, 4),
|
| 404 |
+
'individual_results': {
|
| 405 |
+
'traditional': round(ctr_1, 3),
|
| 406 |
+
'centroid': round(ctr_2, 3),
|
| 407 |
+
'percentile': round(ctr_3, 3)
|
| 408 |
+
}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
except Exception as e:
|
| 412 |
+
print(f"Error in robust CTR calculation: {e}")
|
| 413 |
+
return {
|
| 414 |
+
'ctr': 0,
|
| 415 |
+
'tilt_angle': 0,
|
| 416 |
+
'correction_applied': False,
|
| 417 |
+
'confidence': 'Error',
|
| 418 |
+
'method_variance': 0,
|
| 419 |
+
'individual_results': {}
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def detect_image_rotation_advanced(img):
|
| 424 |
+
"""Enhanced rotation detection using multiple methods"""
|
| 425 |
+
try:
|
| 426 |
+
angles = []
|
| 427 |
+
|
| 428 |
+
# Method 1: Edge-based detection with focus on spine/mediastinum
|
| 429 |
edges = cv2.Canny((img * 255).astype(np.uint8), 50, 150)
|
| 430 |
+
h, w = img.shape
|
| 431 |
|
| 432 |
+
# Focus on central region where spine should be
|
| 433 |
+
spine_region = edges[h//4:3*h//4, w//3:2*w//3]
|
| 434 |
|
| 435 |
+
# Find strong vertical lines (spine alignment)
|
| 436 |
+
lines = cv2.HoughLines(spine_region, 1, np.pi/180, threshold=50)
|
| 437 |
+
if lines is not None:
|
| 438 |
+
for line in lines[:5]: # Top 5 lines
|
| 439 |
rho, theta = line[0]
|
| 440 |
+
angle = np.degrees(theta) - 90
|
| 441 |
+
if abs(angle) < 30: # Near vertical lines
|
|
|
|
| 442 |
angles.append(angle)
|
| 443 |
+
|
| 444 |
+
# Method 2: Chest boundary detection
|
| 445 |
+
# Find chest outline using contours
|
| 446 |
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 447 |
+
if contours:
|
| 448 |
+
# Get largest contour (chest boundary)
|
| 449 |
+
largest_contour = max(contours, key=cv2.contourArea)
|
| 450 |
|
| 451 |
+
# Fit ellipse to chest boundary
|
| 452 |
+
if len(largest_contour) >= 5:
|
| 453 |
+
ellipse = cv2.fitEllipse(largest_contour)
|
| 454 |
+
chest_angle = ellipse[2] - 90 # Convert to rotation angle
|
| 455 |
+
if abs(chest_angle) < 45:
|
| 456 |
+
angles.append(chest_angle)
|
| 457 |
+
|
| 458 |
+
# Method 3: Template-based symmetry detection
|
| 459 |
+
# Check left-right symmetry
|
| 460 |
+
left_half = img[:, :w//2]
|
| 461 |
+
right_half = np.fliplr(img[:, w//2:])
|
| 462 |
+
|
| 463 |
+
# Try different rotation angles to find best symmetry
|
| 464 |
+
best_angle = 0
|
| 465 |
+
best_correlation = 0
|
| 466 |
+
|
| 467 |
+
for test_angle in range(-15, 16, 2):
|
| 468 |
+
if test_angle == 0:
|
| 469 |
+
test_left = left_half
|
| 470 |
+
else:
|
| 471 |
+
center = (left_half.shape[1]//2, left_half.shape[0]//2)
|
| 472 |
+
rotation_matrix = cv2.getRotationMatrix2D(center, test_angle, 1.0)
|
| 473 |
+
test_left = cv2.warpAffine(left_half, rotation_matrix,
|
| 474 |
+
(left_half.shape[1], left_half.shape[0]))
|
| 475 |
+
|
| 476 |
+
# Calculate correlation
|
| 477 |
+
correlation = cv2.matchTemplate(test_left, right_half, cv2.TM_CCOEFF_NORMED).max()
|
| 478 |
+
if correlation > best_correlation:
|
| 479 |
+
best_correlation = correlation
|
| 480 |
+
best_angle = test_angle
|
| 481 |
+
|
| 482 |
+
if best_correlation > 0.3: # Good symmetry found
|
| 483 |
+
angles.append(best_angle)
|
| 484 |
+
|
| 485 |
+
# Combine all methods
|
| 486 |
+
if angles:
|
| 487 |
+
# Remove outliers using IQR
|
| 488 |
+
angles = np.array(angles)
|
| 489 |
+
Q1, Q3 = np.percentile(angles, [25, 75])
|
| 490 |
+
IQR = Q3 - Q1
|
| 491 |
+
filtered_angles = angles[(angles >= Q1 - 1.5*IQR) & (angles <= Q3 + 1.5*IQR)]
|
| 492 |
+
|
| 493 |
+
if len(filtered_angles) > 0:
|
| 494 |
+
final_angle = np.median(filtered_angles)
|
| 495 |
+
return final_angle if abs(final_angle) > 1 else 0
|
| 496 |
|
| 497 |
return 0
|
| 498 |
+
|
| 499 |
except Exception as e:
|
| 500 |
+
print(f"Error in advanced rotation detection: {e}")
|
| 501 |
return 0
|
| 502 |
|
| 503 |
def rotate_image(img, angle):
|
|
|
|
| 541 |
original_img = cv2.imread(input_img, 0) / 255.0
|
| 542 |
original_shape = original_img.shape[:2]
|
| 543 |
|
| 544 |
+
# Step 1: Enhanced rotation detection (re-enabled)
|
| 545 |
+
detected_rotation = detect_image_rotation_advanced(original_img)
|
|
|
|
| 546 |
was_rotated = False
|
| 547 |
processing_img = original_img
|
| 548 |
+
|
| 549 |
+
# Step 2: Rotate image if significant rotation detected
|
| 550 |
+
if abs(detected_rotation) > 3:
|
| 551 |
+
processing_img, actual_rotation = rotate_image(original_img, -detected_rotation)
|
| 552 |
+
was_rotated = True
|
| 553 |
+
print(f"Applied rotation correction: {detected_rotation:.1f}°")
|
| 554 |
+
else:
|
| 555 |
+
actual_rotation = 0
|
| 556 |
+
|
| 557 |
+
# Step 3: Preprocess the image
|
| 558 |
img, (h, w, padding) = preprocess(processing_img)
|
| 559 |
|
| 560 |
+
# Step 4: AI segmentation
|
| 561 |
data = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).to(device).float()
|
| 562 |
|
| 563 |
with torch.no_grad():
|
| 564 |
output = hybrid(data)[0].cpu().numpy().reshape(-1, 2)
|
| 565 |
|
| 566 |
+
# Step 5: Remove preprocessing
|
| 567 |
output = removePreprocess(output, (h, w, padding))
|
| 568 |
|
| 569 |
+
# Step 6: Rotate landmarks back if image was rotated
|
| 570 |
+
if was_rotated:
|
| 571 |
+
center = np.array([original_shape[1]/2, original_shape[0]/2])
|
| 572 |
+
output = rotate_points(output, actual_rotation, center)
|
| 573 |
+
|
| 574 |
+
# Step 7: Convert output to int
|
| 575 |
output = output.astype('int')
|
| 576 |
|
| 577 |
+
# Step 8: Draw results on original image
|
| 578 |
outseg, corrected_data = drawOnTop(original_img, output, original_shape)
|
| 579 |
|
| 580 |
except Exception as e:
|
|
|
|
| 585 |
seg_to_save = (outseg.copy() * 255).astype('uint8')
|
| 586 |
cv2.imwrite("tmp/overlap_segmentation.png", cv2.cvtColor(seg_to_save, cv2.COLOR_RGB2BGR))
|
| 587 |
|
| 588 |
+
# Step 9: Robust CTR calculation
|
| 589 |
+
ctr_result = calculate_ctr_robust(output, corrected_data)
|
| 590 |
+
ctr_value = ctr_result['ctr']
|
| 591 |
+
tilt_angle = ctr_result['tilt_angle']
|
| 592 |
|
| 593 |
+
# Enhanced interpretation with quality indicators
|
| 594 |
+
interpretation_parts = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 595 |
|
| 596 |
+
# CTR interpretation
|
| 597 |
if ctr_value < 0.5:
|
| 598 |
+
base_interpretation = "Normal"
|
| 599 |
+
elif 0.50 <= ctr_value <= 0.55:
|
| 600 |
+
base_interpretation = "Mild Cardiomegaly (CTR 50-55%)"
|
| 601 |
elif 0.56 <= ctr_value <= 0.60:
|
| 602 |
+
base_interpretation = "Moderate Cardiomegaly (CTR 56-60%)"
|
| 603 |
elif ctr_value > 0.60:
|
| 604 |
+
base_interpretation = "Severe Cardiomegaly (CTR > 60%)"
|
| 605 |
else:
|
| 606 |
+
base_interpretation = "Cardiomegaly"
|
| 607 |
+
|
| 608 |
+
interpretation_parts.append(base_interpretation)
|
| 609 |
+
|
| 610 |
+
# Add quality indicators
|
| 611 |
+
if was_rotated:
|
| 612 |
+
interpretation_parts.append(f"🔄 Image rotation corrected ({detected_rotation:.1f}°)")
|
| 613 |
+
|
| 614 |
+
if ctr_result['correction_applied']:
|
| 615 |
+
interpretation_parts.append(f"📐 Anatomical tilt corrected ({tilt_angle:.1f}°)")
|
| 616 |
+
elif tilt_angle > 3:
|
| 617 |
+
interpretation_parts.append(f"⚠️ Residual tilt detected ({tilt_angle:.1f}°)")
|
| 618 |
+
|
| 619 |
+
# Add confidence indicator
|
| 620 |
+
confidence_icons = {'High': '✅', 'Medium': '⚡', 'Low': '⚠️', 'Error': '❌'}
|
| 621 |
+
interpretation_parts.append(f"{confidence_icons[ctr_result['confidence']]} Confidence: {ctr_result['confidence']}")
|
| 622 |
+
|
| 623 |
+
if ctr_result['method_variance'] > 0.02:
|
| 624 |
+
interpretation_parts.append(f"📊 Method variance: {ctr_result['method_variance']:.3f}")
|
| 625 |
+
|
| 626 |
+
final_interpretation = " | ".join(interpretation_parts)
|
| 627 |
|
| 628 |
+
return outseg, "tmp/overlap_segmentation.png", ctr_value, final_interpretation
|
| 629 |
|
| 630 |
|
| 631 |
if __name__ == "__main__":
|