Spaces:
Running
Running
Commit
·
5826e7f
1
Parent(s):
2e9f7e2
Change default tab
Browse files
app.py
CHANGED
|
@@ -34,7 +34,7 @@ from monai.transforms import Compose, ScaleIntensityd, SpatialPadd
|
|
| 34 |
from tqdm import tqdm
|
| 35 |
|
| 36 |
# cache directories
|
| 37 |
-
cache_dir = Path(
|
| 38 |
cache_dir.mkdir(parents=True, exist_ok=True)
|
| 39 |
|
| 40 |
|
|
@@ -282,7 +282,7 @@ def segmentation_sax_inference(
|
|
| 282 |
view: str,
|
| 283 |
transform: Compose,
|
| 284 |
model: ConvUNetR,
|
| 285 |
-
progress
|
| 286 |
) -> np.ndarray:
|
| 287 |
model.to(device)
|
| 288 |
n_slices, n_frames = images.shape[-2:]
|
|
@@ -457,7 +457,7 @@ def segmentation_lax_inference(
|
|
| 457 |
view: str,
|
| 458 |
transform: Compose,
|
| 459 |
model: ConvUNetR,
|
| 460 |
-
progress
|
| 461 |
) -> np.ndarray:
|
| 462 |
model.to(device)
|
| 463 |
n_frames = images.shape[-1]
|
|
@@ -604,7 +604,7 @@ def landmark_heatmap_inference(
|
|
| 604 |
view: str,
|
| 605 |
transform: Compose,
|
| 606 |
model: ConvUNetR,
|
| 607 |
-
progress
|
| 608 |
) -> tuple[np.ndarray, np.ndarray]:
|
| 609 |
model.to(device)
|
| 610 |
|
|
@@ -638,7 +638,7 @@ def landmark_coordinate_inference(
|
|
| 638 |
view: str,
|
| 639 |
transform: Compose,
|
| 640 |
model: ConvViT,
|
| 641 |
-
progress
|
| 642 |
) -> np.ndarray:
|
| 643 |
model.to(device)
|
| 644 |
|
|
@@ -822,7 +822,7 @@ with gr.Blocks(
|
|
| 822 |
"""
|
| 823 |
)
|
| 824 |
|
| 825 |
-
with gr.Tabs(selected="
|
| 826 |
with gr.TabItem("🖼️ Cine CMR Views", id="cmr"):
|
| 827 |
cmr_tab()
|
| 828 |
with gr.TabItem("🧩 Masked Autoencoder", id="mae"):
|
|
|
|
| 34 |
from tqdm import tqdm
|
| 35 |
|
| 36 |
# cache directories
|
| 37 |
+
cache_dir = Path(__file__).parent
|
| 38 |
cache_dir.mkdir(parents=True, exist_ok=True)
|
| 39 |
|
| 40 |
|
|
|
|
| 282 |
view: str,
|
| 283 |
transform: Compose,
|
| 284 |
model: ConvUNetR,
|
| 285 |
+
progress: gr.Progress,
|
| 286 |
) -> np.ndarray:
|
| 287 |
model.to(device)
|
| 288 |
n_slices, n_frames = images.shape[-2:]
|
|
|
|
| 457 |
view: str,
|
| 458 |
transform: Compose,
|
| 459 |
model: ConvUNetR,
|
| 460 |
+
progress: gr.Progress,
|
| 461 |
) -> np.ndarray:
|
| 462 |
model.to(device)
|
| 463 |
n_frames = images.shape[-1]
|
|
|
|
| 604 |
view: str,
|
| 605 |
transform: Compose,
|
| 606 |
model: ConvUNetR,
|
| 607 |
+
progress: gr.Progress,
|
| 608 |
) -> tuple[np.ndarray, np.ndarray]:
|
| 609 |
model.to(device)
|
| 610 |
|
|
|
|
| 638 |
view: str,
|
| 639 |
transform: Compose,
|
| 640 |
model: ConvViT,
|
| 641 |
+
progress: gr.Progress,
|
| 642 |
) -> np.ndarray:
|
| 643 |
model.to(device)
|
| 644 |
|
|
|
|
| 822 |
"""
|
| 823 |
)
|
| 824 |
|
| 825 |
+
with gr.Tabs(selected="sax_seg") as tabs:
|
| 826 |
with gr.TabItem("🖼️ Cine CMR Views", id="cmr"):
|
| 827 |
cmr_tab()
|
| 828 |
with gr.TabItem("🧩 Masked Autoencoder", id="mae"):
|