Spaces:
Running
on
T4
Running
on
T4
fix: π gradio fix for space to work.
#2
by
onuralpszr
- opened
app.py
CHANGED
|
@@ -17,11 +17,9 @@ ImageType = TypeVar("ImageType", Image.Image, np.ndarray)
|
|
| 17 |
|
| 18 |
MARKDOWN = """
|
| 19 |
# RF-DETR π₯
|
| 20 |
-
|
| 21 |
[`[code]`](https://github.com/roboflow/rf-detr)
|
| 22 |
[`[blog]`](https://blog.roboflow.com/rf-detr)
|
| 23 |
[`[notebook]`](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/how-to-finetune-rf-detr-on-detection-dataset.ipynb)
|
| 24 |
-
|
| 25 |
RF-DETR is a real-time, transformer-based object detection model architecture developed
|
| 26 |
by [Roboflow](https://roboflow.com/) and released under the Apache 2.0 license.
|
| 27 |
"""
|
|
@@ -51,7 +49,7 @@ create_directory(directory_path=VIDEO_TARGET_DIRECTORY)
|
|
| 51 |
def detect_and_annotate(
|
| 52 |
model: RFDETR,
|
| 53 |
image: ImageType,
|
| 54 |
-
confidence: float
|
| 55 |
) -> ImageType:
|
| 56 |
detections = model.predict(image, threshold=confidence)
|
| 57 |
|
|
@@ -101,7 +99,6 @@ def video_processing_inference(
|
|
| 101 |
confidence: float,
|
| 102 |
resolution: int,
|
| 103 |
checkpoint: str,
|
| 104 |
-
progress=gr.Progress(track_tqdm=True)
|
| 105 |
):
|
| 106 |
model = load_model(resolution=resolution, checkpoint=checkpoint)
|
| 107 |
|
|
@@ -120,7 +117,7 @@ def video_processing_inference(
|
|
| 120 |
annotated_frame = detect_and_annotate(
|
| 121 |
model=model,
|
| 122 |
image=frame,
|
| 123 |
-
confidence=confidence
|
| 124 |
)
|
| 125 |
annotated_frame = sv.scale_image(annotated_frame, VIDEO_SCALE_FACTOR)
|
| 126 |
sink.write_frame(annotated_frame)
|
|
@@ -177,8 +174,6 @@ with gr.Blocks() as demo:
|
|
| 177 |
image_processing_checkpoint_dropdown
|
| 178 |
],
|
| 179 |
outputs=image_processing_output_image,
|
| 180 |
-
cache_examples=True,
|
| 181 |
-
run_on_click=True
|
| 182 |
)
|
| 183 |
|
| 184 |
image_processing_submit_button.click(
|
|
@@ -234,8 +229,7 @@ with gr.Blocks() as demo:
|
|
| 234 |
video_processing_resolution_slider,
|
| 235 |
video_processing_checkpoint_dropdown
|
| 236 |
],
|
| 237 |
-
outputs=video_processing_output_video
|
| 238 |
-
run_on_click=True
|
| 239 |
)
|
| 240 |
|
| 241 |
video_processing_submit_button.click(
|
|
|
|
| 17 |
|
| 18 |
MARKDOWN = """
|
| 19 |
# RF-DETR π₯
|
|
|
|
| 20 |
[`[code]`](https://github.com/roboflow/rf-detr)
|
| 21 |
[`[blog]`](https://blog.roboflow.com/rf-detr)
|
| 22 |
[`[notebook]`](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/how-to-finetune-rf-detr-on-detection-dataset.ipynb)
|
|
|
|
| 23 |
RF-DETR is a real-time, transformer-based object detection model architecture developed
|
| 24 |
by [Roboflow](https://roboflow.com/) and released under the Apache 2.0 license.
|
| 25 |
"""
|
|
|
|
| 49 |
def detect_and_annotate(
|
| 50 |
model: RFDETR,
|
| 51 |
image: ImageType,
|
| 52 |
+
confidence: float,
|
| 53 |
) -> ImageType:
|
| 54 |
detections = model.predict(image, threshold=confidence)
|
| 55 |
|
|
|
|
| 99 |
confidence: float,
|
| 100 |
resolution: int,
|
| 101 |
checkpoint: str,
|
|
|
|
| 102 |
):
|
| 103 |
model = load_model(resolution=resolution, checkpoint=checkpoint)
|
| 104 |
|
|
|
|
| 117 |
annotated_frame = detect_and_annotate(
|
| 118 |
model=model,
|
| 119 |
image=frame,
|
| 120 |
+
confidence=confidence,
|
| 121 |
)
|
| 122 |
annotated_frame = sv.scale_image(annotated_frame, VIDEO_SCALE_FACTOR)
|
| 123 |
sink.write_frame(annotated_frame)
|
|
|
|
| 174 |
image_processing_checkpoint_dropdown
|
| 175 |
],
|
| 176 |
outputs=image_processing_output_image,
|
|
|
|
|
|
|
| 177 |
)
|
| 178 |
|
| 179 |
image_processing_submit_button.click(
|
|
|
|
| 229 |
video_processing_resolution_slider,
|
| 230 |
video_processing_checkpoint_dropdown
|
| 231 |
],
|
| 232 |
+
outputs=video_processing_output_video
|
|
|
|
| 233 |
)
|
| 234 |
|
| 235 |
video_processing_submit_button.click(
|