Spaces:
Running
on
Zero
Running
on
Zero
Use supervision library to visualize predictions (#6)
Browse files- Use supervision library to visualize predictions (6510f0dd0fbd1f6131438cf8470fffab4f3c61bc)
Co-authored-by: Piotr Skalski <SkalskiP@users.noreply.huggingface.co>
- app.py +22 -3
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
import torch
|
| 2 |
import numpy as np
|
| 3 |
import gradio as gr
|
|
|
|
| 4 |
from scipy.ndimage import binary_fill_holes
|
| 5 |
from ultralytics import YOLOE
|
| 6 |
from ultralytics.utils.torch_utils import smart_inference_mode
|
|
@@ -46,8 +46,27 @@ def yoloe_inference(image, prompts, target_image, model_id, image_size, conf_thr
|
|
| 46 |
model.model.model[-1].max_det = 1000
|
| 47 |
|
| 48 |
results = model.predict(source=image, imgsz=image_size, conf=conf_thresh, iou=iou_thresh, **kwargs)
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
|
| 53 |
def app():
|
|
|
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
import gradio as gr
|
| 3 |
+
import supervision as sv
|
| 4 |
from scipy.ndimage import binary_fill_holes
|
| 5 |
from ultralytics import YOLOE
|
| 6 |
from ultralytics.utils.torch_utils import smart_inference_mode
|
|
|
|
| 46 |
model.model.model[-1].max_det = 1000
|
| 47 |
|
| 48 |
results = model.predict(source=image, imgsz=image_size, conf=conf_thresh, iou=iou_thresh, **kwargs)
|
| 49 |
+
detections = sv.Detections.from_ultralytics(results[0])
|
| 50 |
+
|
| 51 |
+
resolution_wh = image.size
|
| 52 |
+
thickness = sv.calculate_optimal_line_thickness(resolution_wh=resolution_wh)
|
| 53 |
+
text_scale = sv.calculate_optimal_text_scale(resolution_wh=resolution_wh)
|
| 54 |
+
|
| 55 |
+
labels = [
|
| 56 |
+
f"{class_name} {confidence:.2f}"
|
| 57 |
+
for class_name, confidence
|
| 58 |
+
in zip(detections['class_name'], detections.confidence)
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
annotated_image = image.copy()
|
| 62 |
+
annotated_image = sv.MaskAnnotator(color_lookup=sv.ColorLookup.INDEX, opacity=0.4).annotate(
|
| 63 |
+
scene=annotated_image, detections=detections)
|
| 64 |
+
annotated_image = sv.BoxAnnotator(color_lookup=sv.ColorLookup.INDEX, thickness=thickness).annotate(
|
| 65 |
+
scene=annotated_image, detections=detections)
|
| 66 |
+
annotated_image = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX, text_scale=text_scale, smart_position=True).annotate(
|
| 67 |
+
scene=annotated_image, detections=detections, labels=labels)
|
| 68 |
+
|
| 69 |
+
return annotated_image
|
| 70 |
|
| 71 |
|
| 72 |
def app():
|
requirements.txt
CHANGED
|
@@ -6,4 +6,5 @@ gradio==4.42.0
|
|
| 6 |
gradio_client==1.3.0
|
| 7 |
gradio_image_prompter==0.1.0
|
| 8 |
huggingface-hub==0.26.3
|
| 9 |
-
fastapi==0.112.2
|
|
|
|
|
|
| 6 |
gradio_client==1.3.0
|
| 7 |
gradio_image_prompter==0.1.0
|
| 8 |
huggingface-hub==0.26.3
|
| 9 |
+
fastapi==0.112.2
|
| 10 |
+
supervision==0.25.1
|