Update app.py
Browse files
app.py
CHANGED
|
@@ -661,7 +661,7 @@ def _maybe_set_model_field(cfg: dict, key: str, value):
|
|
| 661 |
return
|
| 662 |
cfg[key] = value
|
| 663 |
|
| 664 |
-
# ---
|
| 665 |
def patch_base_config(base_cfg_path, merged_dir, class_count, run_name,
|
| 666 |
epochs, batch, imgsz, lr, optimizer, pretrained_path: str | None):
|
| 667 |
if not base_cfg_path or not os.path.exists(base_cfg_path):
|
|
@@ -669,7 +669,7 @@ def patch_base_config(base_cfg_path, merged_dir, class_count, run_name,
|
|
| 669 |
|
| 670 |
template_dir = os.path.dirname(base_cfg_path)
|
| 671 |
|
| 672 |
-
# Load YAML then absolutize include-like paths
|
| 673 |
with open(base_cfg_path, "r", encoding="utf-8") as f:
|
| 674 |
cfg = yaml.safe_load(f)
|
| 675 |
_absify_any_paths_deep(cfg, template_dir)
|
|
@@ -683,17 +683,6 @@ def patch_base_config(base_cfg_path, merged_dir, class_count, run_name,
|
|
| 683 |
cfg.setdefault("device", "")
|
| 684 |
cfg["find_unused_parameters"] = False
|
| 685 |
|
| 686 |
-
# STRONG include pruning: keep ONLY runtime.yml to avoid overrides
|
| 687 |
-
if "__include__" in cfg and isinstance(cfg["__include__"], list):
|
| 688 |
-
kept = []
|
| 689 |
-
for p in cfg["__include__"]:
|
| 690 |
-
if not isinstance(p, str):
|
| 691 |
-
continue
|
| 692 |
-
pp = p.replace("\\", "/")
|
| 693 |
-
if pp.endswith("/configs/runtime.yml"):
|
| 694 |
-
kept.append(p)
|
| 695 |
-
cfg["__include__"] = kept
|
| 696 |
-
|
| 697 |
ann_dir = os.path.join(merged_dir, "annotations")
|
| 698 |
paths = {
|
| 699 |
"train_json": os.path.abspath(os.path.join(ann_dir, "instances_train.json")),
|
|
@@ -800,6 +789,10 @@ def patch_base_config(base_cfg_path, merged_dir, class_count, run_name,
|
|
| 800 |
_maybe_set_model_field(cfg, "pretrain", p)
|
| 801 |
_maybe_set_model_field(cfg, "pretrained", p)
|
| 802 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 803 |
cfg_out_dir = os.path.join(template_dir, "generated")
|
| 804 |
os.makedirs(cfg_out_dir, exist_ok=True)
|
| 805 |
out_path = os.path.join(cfg_out_dir, f"{run_name}.yaml")
|
|
@@ -981,6 +974,9 @@ def training_handler(dataset_path, model_key, run_name, epochs, batch, imgsz, lr
|
|
| 981 |
env.setdefault("WANDB_DISABLED", "true")
|
| 982 |
env.setdefault("RTDETR_PYMODULE", "rtdetrv2_pytorch.src")
|
| 983 |
env.setdefault("PYTHONUNBUFFERED", "1") # nicer real-time logs
|
|
|
|
|
|
|
|
|
|
| 984 |
|
| 985 |
proc = subprocess.Popen(cmd, cwd=train_cwd,
|
| 986 |
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
|
|
| 661 |
return
|
| 662 |
cfg[key] = value
|
| 663 |
|
| 664 |
+
# --- UPDATED: dataset override (+ keep includes) + sync_bn off ----------------
|
| 665 |
def patch_base_config(base_cfg_path, merged_dir, class_count, run_name,
|
| 666 |
epochs, batch, imgsz, lr, optimizer, pretrained_path: str | None):
|
| 667 |
if not base_cfg_path or not os.path.exists(base_cfg_path):
|
|
|
|
| 669 |
|
| 670 |
template_dir = os.path.dirname(base_cfg_path)
|
| 671 |
|
| 672 |
+
# Load YAML then absolutize include-like paths (KEEP includes; do not prune)
|
| 673 |
with open(base_cfg_path, "r", encoding="utf-8") as f:
|
| 674 |
cfg = yaml.safe_load(f)
|
| 675 |
_absify_any_paths_deep(cfg, template_dir)
|
|
|
|
| 683 |
cfg.setdefault("device", "")
|
| 684 |
cfg["find_unused_parameters"] = False
|
| 685 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 686 |
ann_dir = os.path.join(merged_dir, "annotations")
|
| 687 |
paths = {
|
| 688 |
"train_json": os.path.abspath(os.path.join(ann_dir, "instances_train.json")),
|
|
|
|
| 789 |
_maybe_set_model_field(cfg, "pretrain", p)
|
| 790 |
_maybe_set_model_field(cfg, "pretrained", p)
|
| 791 |
|
| 792 |
+
# Defensive: if after keeping includes we still don't have a model block, add a stub
|
| 793 |
+
if not cfg.get("model"):
|
| 794 |
+
cfg["model"] = {"type": "RTDETR", "num_classes": int(class_count)}
|
| 795 |
+
|
| 796 |
cfg_out_dir = os.path.join(template_dir, "generated")
|
| 797 |
os.makedirs(cfg_out_dir, exist_ok=True)
|
| 798 |
out_path = os.path.join(cfg_out_dir, f"{run_name}.yaml")
|
|
|
|
| 974 |
env.setdefault("WANDB_DISABLED", "true")
|
| 975 |
env.setdefault("RTDETR_PYMODULE", "rtdetrv2_pytorch.src")
|
| 976 |
env.setdefault("PYTHONUNBUFFERED", "1") # nicer real-time logs
|
| 977 |
+
# Optional tiny guard: pick a single visible GPU if available
|
| 978 |
+
if torch.cuda.is_available():
|
| 979 |
+
env.setdefault("CUDA_VISIBLE_DEVICES", "0")
|
| 980 |
|
| 981 |
proc = subprocess.Popen(cmd, cwd=train_cwd,
|
| 982 |
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|