✅ [Update] type hint and mAP calculation func
Browse files
tests/conftest.py
CHANGED
|
@@ -4,15 +4,16 @@ from pathlib import Path
|
|
| 4 |
import pytest
|
| 5 |
import torch
|
| 6 |
from hydra import compose, initialize
|
|
|
|
| 7 |
|
| 8 |
project_root = Path(__file__).resolve().parent.parent
|
| 9 |
sys.path.append(str(project_root))
|
| 10 |
|
| 11 |
from yolo import Anc2Box, Config, Vec2Box, create_converter, create_model
|
| 12 |
from yolo.model.yolo import YOLO
|
| 13 |
-
from yolo.tools.data_loader import StreamDataLoader,
|
| 14 |
from yolo.tools.dataset_preparation import prepare_dataset
|
| 15 |
-
from yolo.utils.logging_utils import
|
| 16 |
|
| 17 |
|
| 18 |
def pytest_configure(config):
|
|
@@ -52,18 +53,6 @@ def device():
|
|
| 52 |
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 53 |
|
| 54 |
|
| 55 |
-
@pytest.fixture(scope="session")
|
| 56 |
-
def train_progress_logger(train_cfg: Config):
|
| 57 |
-
progress_logger = ProgressLogger(train_cfg, exp_name=train_cfg.name)
|
| 58 |
-
return progress_logger
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
@pytest.fixture(scope="session")
|
| 62 |
-
def validation_progress_logger(validation_cfg: Config):
|
| 63 |
-
progress_logger = ProgressLogger(validation_cfg, exp_name=validation_cfg.name)
|
| 64 |
-
return progress_logger
|
| 65 |
-
|
| 66 |
-
|
| 67 |
@pytest.fixture(scope="session")
|
| 68 |
def model(train_cfg: Config, device) -> YOLO:
|
| 69 |
model = create_model(train_cfg.model)
|
|
@@ -76,6 +65,22 @@ def model_v7(inference_v7_cfg: Config, device) -> YOLO:
|
|
| 76 |
return model.to(device)
|
| 77 |
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
@pytest.fixture(scope="session")
|
| 80 |
def vec2box(train_cfg: Config, model: YOLO, device) -> Vec2Box:
|
| 81 |
vec2box = create_converter(train_cfg.model.name, model, train_cfg.model.anchor, train_cfg.image_size, device)
|
|
@@ -93,13 +98,13 @@ def anc2box(inference_v7_cfg: Config, model: YOLO, device) -> Anc2Box:
|
|
| 93 |
@pytest.fixture(scope="session")
|
| 94 |
def train_dataloader(train_cfg: Config):
|
| 95 |
prepare_dataset(train_cfg.dataset, task="train")
|
| 96 |
-
return
|
| 97 |
|
| 98 |
|
| 99 |
@pytest.fixture(scope="session")
|
| 100 |
def validation_dataloader(validation_cfg: Config):
|
| 101 |
prepare_dataset(validation_cfg.dataset, task="val")
|
| 102 |
-
return
|
| 103 |
|
| 104 |
|
| 105 |
@pytest.fixture(scope="session")
|
|
|
|
| 4 |
import pytest
|
| 5 |
import torch
|
| 6 |
from hydra import compose, initialize
|
| 7 |
+
from lightning import Trainer
|
| 8 |
|
| 9 |
project_root = Path(__file__).resolve().parent.parent
|
| 10 |
sys.path.append(str(project_root))
|
| 11 |
|
| 12 |
from yolo import Anc2Box, Config, Vec2Box, create_converter, create_model
|
| 13 |
from yolo.model.yolo import YOLO
|
| 14 |
+
from yolo.tools.data_loader import StreamDataLoader, create_dataloader
|
| 15 |
from yolo.tools.dataset_preparation import prepare_dataset
|
| 16 |
+
from yolo.utils.logging_utils import set_seed, setup
|
| 17 |
|
| 18 |
|
| 19 |
def pytest_configure(config):
|
|
|
|
| 53 |
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 54 |
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
@pytest.fixture(scope="session")
|
| 57 |
def model(train_cfg: Config, device) -> YOLO:
|
| 58 |
model = create_model(train_cfg.model)
|
|
|
|
| 65 |
return model.to(device)
|
| 66 |
|
| 67 |
|
| 68 |
+
@pytest.fixture(scope="session")
|
| 69 |
+
def solver(train_cfg: Config) -> Trainer:
|
| 70 |
+
callbacks, loggers = setup(train_cfg)
|
| 71 |
+
trainer = Trainer(
|
| 72 |
+
accelerator="cuda",
|
| 73 |
+
max_epochs=getattr(train_cfg.task, "epoch", None),
|
| 74 |
+
precision="16-mixed",
|
| 75 |
+
callbacks=callbacks,
|
| 76 |
+
logger=loggers,
|
| 77 |
+
log_every_n_steps=1,
|
| 78 |
+
gradient_clip_val=10,
|
| 79 |
+
deterministic=True,
|
| 80 |
+
)
|
| 81 |
+
return trainer
|
| 82 |
+
|
| 83 |
+
|
| 84 |
@pytest.fixture(scope="session")
|
| 85 |
def vec2box(train_cfg: Config, model: YOLO, device) -> Vec2Box:
|
| 86 |
vec2box = create_converter(train_cfg.model.name, model, train_cfg.model.anchor, train_cfg.image_size, device)
|
|
|
|
| 98 |
@pytest.fixture(scope="session")
|
| 99 |
def train_dataloader(train_cfg: Config):
|
| 100 |
prepare_dataset(train_cfg.dataset, task="train")
|
| 101 |
+
return create_dataloader(train_cfg.task.data, train_cfg.dataset, train_cfg.task.task)
|
| 102 |
|
| 103 |
|
| 104 |
@pytest.fixture(scope="session")
|
| 105 |
def validation_dataloader(validation_cfg: Config):
|
| 106 |
prepare_dataset(validation_cfg.dataset, task="val")
|
| 107 |
+
return create_dataloader(validation_cfg.task.data, validation_cfg.dataset, validation_cfg.task.task)
|
| 108 |
|
| 109 |
|
| 110 |
@pytest.fixture(scope="session")
|
tests/test_tools/test_loss_functions.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import sys
|
|
|
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
import pytest
|
|
@@ -51,6 +52,6 @@ def test_yolo_loss(loss_function, data):
|
|
| 51 |
predicts, targets = data
|
| 52 |
loss, loss_dict = loss_function(predicts, predicts, targets)
|
| 53 |
assert torch.isnan(loss)
|
| 54 |
-
assert
|
| 55 |
-
assert
|
| 56 |
-
assert
|
|
|
|
| 1 |
import sys
|
| 2 |
+
from math import isinf, isnan
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import pytest
|
|
|
|
| 52 |
predicts, targets = data
|
| 53 |
loss, loss_dict = loss_function(predicts, predicts, targets)
|
| 54 |
assert torch.isnan(loss)
|
| 55 |
+
assert isnan(loss_dict["Loss/BoxLoss"])
|
| 56 |
+
assert isnan(loss_dict["Loss/DFLLoss"])
|
| 57 |
+
assert isinf(loss_dict["Loss/BCELoss"])
|
tests/test_utils/test_bounding_box_utils.py
CHANGED
|
@@ -216,9 +216,8 @@ def test_calculate_map():
|
|
| 216 |
ground_truths = tensor([[0, 50, 50, 150, 150], [0, 30, 30, 100, 100]]) # [class, x1, y1, x2, y2]
|
| 217 |
|
| 218 |
mAP = calculate_map(predictions, ground_truths)
|
|
|
|
|
|
|
| 219 |
|
| 220 |
-
expected_ap50 =
|
| 221 |
-
expected_ap50_95 =
|
| 222 |
-
|
| 223 |
-
assert isclose(mAP["mAP.5"], expected_ap50, atol=1e-5), f"AP50 mismatch"
|
| 224 |
-
assert isclose(mAP["mAP.5:.95"], expected_ap50_95, atol=1e-5), f"Mean AP mismatch"
|
|
|
|
| 216 |
ground_truths = tensor([[0, 50, 50, 150, 150], [0, 30, 30, 100, 100]]) # [class, x1, y1, x2, y2]
|
| 217 |
|
| 218 |
mAP = calculate_map(predictions, ground_truths)
|
| 219 |
+
expected_ap50 = tensor(0.5050)
|
| 220 |
+
expected_ap50_95 = tensor(0.2020)
|
| 221 |
|
| 222 |
+
assert isclose(mAP["map_50"], expected_ap50, atol=1e-4), f"AP50 mismatch"
|
| 223 |
+
assert isclose(mAP["map"], expected_ap50_95, atol=1e-4), f"Mean AP mismatch"
|
|
|
|
|
|
|
|
|
yolo/tools/loss_functions.py
CHANGED
|
@@ -119,7 +119,7 @@ class DualLoss:
|
|
| 119 |
|
| 120 |
def __call__(
|
| 121 |
self, aux_predicts: List[Tensor], main_predicts: List[Tensor], targets: Tensor
|
| 122 |
-
) -> Tuple[Tensor, Dict[str,
|
| 123 |
# TODO: Need Refactor this region, make it flexible!
|
| 124 |
aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
|
| 125 |
main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
|
|
|
|
| 119 |
|
| 120 |
def __call__(
|
| 121 |
self, aux_predicts: List[Tensor], main_predicts: List[Tensor], targets: Tensor
|
| 122 |
+
) -> Tuple[Tensor, Dict[str, float]]:
|
| 123 |
# TODO: Need Refactor this region, make it flexible!
|
| 124 |
aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
|
| 125 |
main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
|
yolo/utils/bounding_box_utils.py
CHANGED
|
@@ -4,10 +4,11 @@ from typing import Dict, List, Optional, Tuple, Union
|
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
| 6 |
from einops import rearrange
|
| 7 |
-
from torch import Tensor,
|
|
|
|
| 8 |
from torchvision.ops import batched_nms
|
| 9 |
|
| 10 |
-
from yolo.config.config import AnchorConfig, MatcherConfig,
|
| 11 |
from yolo.model.yolo import YOLO
|
| 12 |
from yolo.utils.logger import logger
|
| 13 |
|
|
@@ -406,50 +407,9 @@ def bbox_nms(cls_dist: Tensor, bbox: Tensor, nms_cfg: NMSConfig, confidence: Opt
|
|
| 406 |
return predicts_nms
|
| 407 |
|
| 408 |
|
| 409 |
-
def calculate_map(predictions, ground_truths
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
n_preds = predictions.size(0)
|
| 413 |
-
n_gts = (ground_truths[:, 0] != -1).sum()
|
| 414 |
-
ground_truths = ground_truths[:n_gts]
|
| 415 |
-
aps = []
|
| 416 |
-
|
| 417 |
-
ious = calculate_iou(predictions[:, 1:-1], ground_truths[:, 1:]) # [n_preds, n_gts]
|
| 418 |
-
|
| 419 |
-
for threshold in iou_thresholds:
|
| 420 |
-
tp = torch.zeros(n_preds, device=device, dtype=bool)
|
| 421 |
-
|
| 422 |
-
max_iou, max_indices = ious.max(dim=1)
|
| 423 |
-
above_threshold = max_iou >= threshold
|
| 424 |
-
matched_classes = predictions[:, 0] == ground_truths[max_indices, 0]
|
| 425 |
-
max_match = torch.zeros_like(ious)
|
| 426 |
-
max_match[arange(n_preds), max_indices] = max_iou
|
| 427 |
-
if max_match.size(0):
|
| 428 |
-
tp[max_match.argmax(dim=0)] = True
|
| 429 |
-
tp[~above_threshold | ~matched_classes] = False
|
| 430 |
-
|
| 431 |
-
_, indices = torch.sort(predictions[:, 1], descending=True)
|
| 432 |
-
tp = tp[indices]
|
| 433 |
-
|
| 434 |
-
tp_cumsum = torch.cumsum(tp, dim=0)
|
| 435 |
-
fp_cumsum = torch.cumsum(~tp, dim=0)
|
| 436 |
-
|
| 437 |
-
precision = tp_cumsum / (tp_cumsum + fp_cumsum + 1e-6)
|
| 438 |
-
recall = tp_cumsum / (n_gts + 1e-6)
|
| 439 |
-
|
| 440 |
-
precision = torch.cat([torch.ones(1, device=device), precision, torch.zeros(1, device=device)])
|
| 441 |
-
recall = torch.cat([torch.zeros(1, device=device), recall, torch.ones(1, device=device)])
|
| 442 |
-
|
| 443 |
-
precision, _ = torch.cummax(precision.flip(0), dim=0)
|
| 444 |
-
precision = precision.flip(0)
|
| 445 |
-
|
| 446 |
-
ap = torch.trapezoid(precision, recall)
|
| 447 |
-
aps.append(ap)
|
| 448 |
-
|
| 449 |
-
mAP = {
|
| 450 |
-
"mAP.5": aps[0],
|
| 451 |
-
"mAP.5:.95": torch.mean(torch.stack(aps)),
|
| 452 |
-
}
|
| 453 |
return mAP
|
| 454 |
|
| 455 |
|
|
|
|
| 4 |
import torch
|
| 5 |
import torch.nn.functional as F
|
| 6 |
from einops import rearrange
|
| 7 |
+
from torch import Tensor, tensor
|
| 8 |
+
from torchmetrics.detection import MeanAveragePrecision
|
| 9 |
from torchvision.ops import batched_nms
|
| 10 |
|
| 11 |
+
from yolo.config.config import AnchorConfig, MatcherConfig, NMSConfig
|
| 12 |
from yolo.model.yolo import YOLO
|
| 13 |
from yolo.utils.logger import logger
|
| 14 |
|
|
|
|
| 407 |
return predicts_nms
|
| 408 |
|
| 409 |
|
| 410 |
+
def calculate_map(predictions, ground_truths) -> Dict[str, Tensor]:
|
| 411 |
+
metric = MeanAveragePrecision(iou_type="bbox", box_format="xyxy")
|
| 412 |
+
mAP = metric([to_metrics_format(predictions)], [to_metrics_format(ground_truths)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
return mAP
|
| 414 |
|
| 415 |
|