Spaces:
Runtime error
Runtime error
Add files for gradio space
Browse files- README.md +3 -3
- app.py +50 -0
- examples/spleen_46.nii.gz +3 -0
- requirements.txt +2 -0
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
title: Spleen Segmentation
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 3.1.1
|
| 8 |
app_file: app.py
|
|
|
|
| 1 |
---
|
| 2 |
title: Spleen Segmentation
|
| 3 |
+
emoji: 👀
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 3.1.1
|
| 8 |
app_file: app.py
|
app.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import torch
|
| 4 |
+
from monai import bundle
|
| 5 |
+
|
| 6 |
+
BUNDLE_NAME = 'spleen_ct_segmentation_v0.1.0'
|
| 7 |
+
BUNDLE_PATH = os.path.join(torch.hub.get_dir(), 'bundle', BUNDLE_NAME)
|
| 8 |
+
|
| 9 |
+
examples = ['examples/spleen_46.nii.gz']
|
| 10 |
+
|
| 11 |
+
model, _, _ = bundle.load(
|
| 12 |
+
name = BUNDLE_NAME,
|
| 13 |
+
source = 'hf_hub',
|
| 14 |
+
repo = 'katielink/spleen_ct_segmentation_v0.1.0',
|
| 15 |
+
load_ts_module=True,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 19 |
+
|
| 20 |
+
parser = bundle.load_bundle_config(BUNDLE_PATH, 'inference.json')
|
| 21 |
+
preproc_transforms = parser.get_parsed_content('preprocessing', lazy=True, eval_expr=True, instantiate=True)
|
| 22 |
+
inferer = parser.get_parsed_content('inferer', lazy=True, eval_expr=True, instantiate=True)
|
| 23 |
+
|
| 24 |
+
def predict(input_file, z_axis, model=model, device=device):
|
| 25 |
+
data = {'image': [input_file.name]}
|
| 26 |
+
data = preproc_transforms(data)
|
| 27 |
+
|
| 28 |
+
model.to(device)
|
| 29 |
+
model.eval()
|
| 30 |
+
with torch.no_grad():
|
| 31 |
+
inputs = data['image'].to(device)[None,...]
|
| 32 |
+
data['pred'] = inferer(inputs=inputs, network=model)
|
| 33 |
+
|
| 34 |
+
input_image = data['image'].numpy()
|
| 35 |
+
pred_image = torch.argmax(data['pred'], dim=1).cpu().detach().numpy()
|
| 36 |
+
|
| 37 |
+
return input_image[0, :, :, z_axis], pred_image[0, :, :, z_axis]*255
|
| 38 |
+
|
| 39 |
+
iface = gr.Interface(
|
| 40 |
+
fn=predict,
|
| 41 |
+
inputs=[
|
| 42 |
+
gr.File(label='Nifti file'),
|
| 43 |
+
gr.Slider(0, 200, label='z-axis', value=50)
|
| 44 |
+
],
|
| 45 |
+
outputs=['image', 'image'],
|
| 46 |
+
title='Segment the Spleen from a CT Scan using MONAI',
|
| 47 |
+
examples=examples,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
iface.launch()
|
examples/spleen_46.nii.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1dc87026596e782c25de81ab171cd2b456a8e2c76e304949e9318451406c8edf
|
| 3 |
+
size 28610147
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git+https://github.com/katielink/MONAI.git@4042-download-hf-hub-bundle
|
| 2 |
+
huggingface_hub
|