🎨 [Update] progress bar, update epoch each batch
Browse files
yolo/utils/logging_utils.py
CHANGED
|
@@ -22,6 +22,7 @@ from rich.console import Console
|
|
| 22 |
from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn
|
| 23 |
from rich.table import Table
|
| 24 |
from torch import Tensor
|
|
|
|
| 25 |
|
| 26 |
from yolo.config.config import Config, GeneralConfig, YOLOLayer
|
| 27 |
|
|
@@ -40,7 +41,7 @@ class ProgressTracker:
|
|
| 40 |
self.progress = Progress(
|
| 41 |
TextColumn("[progress.description]{task.description}"),
|
| 42 |
BarColumn(bar_width=None),
|
| 43 |
-
TextColumn("{task.completed}/{task.total}"),
|
| 44 |
TimeRemainingColumn(),
|
| 45 |
)
|
| 46 |
self.use_wandb = use_wandb
|
|
@@ -53,7 +54,8 @@ class ProgressTracker:
|
|
| 53 |
def start_train(self, num_epochs: int):
|
| 54 |
self.task_epoch = self.progress.add_task("[cyan]Epochs [white]| Loss | Box | DFL | BCE |", total=num_epochs)
|
| 55 |
|
| 56 |
-
def start_one_epoch(self, num_batches, optimizer, epoch_idx):
|
|
|
|
| 57 |
if self.use_wandb:
|
| 58 |
lr_values = [params["lr"] for params in optimizer.param_groups]
|
| 59 |
lr_names = ["bias", "norm", "conv"]
|
|
@@ -71,10 +73,10 @@ class ProgressTracker:
|
|
| 71 |
loss_str += f" {loss_val:2.2f} |"
|
| 72 |
|
| 73 |
self.progress.update(self.batch_task, advance=1, description=f"[green]Batches [white]{loss_str}")
|
|
|
|
| 74 |
|
| 75 |
def finish_one_epoch(self):
|
| 76 |
self.progress.remove_task(self.batch_task)
|
| 77 |
-
self.progress.update(self.task_epoch, advance=1)
|
| 78 |
|
| 79 |
def finish_train(self):
|
| 80 |
self.wandb.finish()
|
|
|
|
| 22 |
from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn
|
| 23 |
from rich.table import Table
|
| 24 |
from torch import Tensor
|
| 25 |
+
from torch.optim import Optimizer
|
| 26 |
|
| 27 |
from yolo.config.config import Config, GeneralConfig, YOLOLayer
|
| 28 |
|
|
|
|
| 41 |
self.progress = Progress(
|
| 42 |
TextColumn("[progress.description]{task.description}"),
|
| 43 |
BarColumn(bar_width=None),
|
| 44 |
+
TextColumn("{task.completed:.0f}/{task.total:.0f}"),
|
| 45 |
TimeRemainingColumn(),
|
| 46 |
)
|
| 47 |
self.use_wandb = use_wandb
|
|
|
|
| 54 |
def start_train(self, num_epochs: int):
|
| 55 |
self.task_epoch = self.progress.add_task("[cyan]Epochs [white]| Loss | Box | DFL | BCE |", total=num_epochs)
|
| 56 |
|
| 57 |
+
def start_one_epoch(self, num_batches: int, optimizer: Optimizer, epoch_idx: int):
|
| 58 |
+
self.num_batches = num_batches
|
| 59 |
if self.use_wandb:
|
| 60 |
lr_values = [params["lr"] for params in optimizer.param_groups]
|
| 61 |
lr_names = ["bias", "norm", "conv"]
|
|
|
|
| 73 |
loss_str += f" {loss_val:2.2f} |"
|
| 74 |
|
| 75 |
self.progress.update(self.batch_task, advance=1, description=f"[green]Batches [white]{loss_str}")
|
| 76 |
+
self.progress.update(self.task_epoch, advance=1 / self.num_batches)
|
| 77 |
|
| 78 |
def finish_one_epoch(self):
|
| 79 |
self.progress.remove_task(self.batch_task)
|
|
|
|
| 80 |
|
| 81 |
def finish_train(self):
|
| 82 |
self.wandb.finish()
|