Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,7 +17,7 @@ CLIP_MODEL_NAME = "openai/clip-vit-base-patch32"
|
|
| 17 |
|
| 18 |
# FastSAM
|
| 19 |
# *Corrected* HuggingFace link for the weights
|
| 20 |
-
FASTSAM_WEIGHTS_URL = "https://
|
| 21 |
FASTSAM_WEIGHTS_NAME = "FastSAM-s.pt"
|
| 22 |
|
| 23 |
# Default FastSAM parameters
|
|
@@ -120,7 +120,7 @@ def process_image_fastsam(image, imgsz, conf, iou, retina_masks):
|
|
| 120 |
# Check if results are valid
|
| 121 |
if results is None or len(results) == 0 or results[0] is None:
|
| 122 |
return None, "FastSAM did not return valid results. Try adjusting parameters or using a different image."
|
| 123 |
-
|
| 124 |
# Get detections
|
| 125 |
detections = sv.Detections.from_ultralytics(results[0])
|
| 126 |
# Check if detections are valid
|
|
@@ -155,7 +155,6 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
| 155 |
This demo combines two powerful AI models:
|
| 156 |
- **CLIP**: For zero-shot image classification
|
| 157 |
- **FastSAM**: For automatic image segmentation
|
| 158 |
-
|
| 159 |
Try uploading an image and use either of the tabs below!
|
| 160 |
""")
|
| 161 |
|
|
@@ -187,7 +186,7 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
| 187 |
conf_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=DEFAULT_CONFIDENCE, label="Confidence Threshold")
|
| 188 |
iou_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=DEFAULT_IOU, label="IoU Threshold")
|
| 189 |
retina_checkbox = gr.Checkbox(label="Retina Masks", value=DEFAULT_RETINA_MASKS)
|
| 190 |
-
|
| 191 |
with gr.Row():
|
| 192 |
image_output = gr.Image(label="Segmentation Result")
|
| 193 |
error_output = gr.Textbox(label="Error Message", type="text") # Added for displaying errors
|
|
@@ -212,7 +211,6 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
| 212 |
### How to use:
|
| 213 |
1. **CLIP Classification**: Upload an image and enter text to check if that concept exists in the image
|
| 214 |
2. **FastSAM Segmentation**: Upload an image to get automatic segmentation with bounding boxes and masks
|
| 215 |
-
|
| 216 |
### Note:
|
| 217 |
- The models run on CPU by default, so processing might take a few seconds. If you have a GPU, it will be used automatically.
|
| 218 |
- For best results, use clear images with good lighting.
|
|
|
|
| 17 |
|
| 18 |
# FastSAM
|
| 19 |
# *Corrected* HuggingFace link for the weights
|
| 20 |
+
FASTSAM_WEIGHTS_URL = "https://huggingface.co/spaces/An-619/FastSAM/resolve/6f76f474c656d2cb29599f49c296a8784b02d04b/weights/FastSAM-s.pt"
|
| 21 |
FASTSAM_WEIGHTS_NAME = "FastSAM-s.pt"
|
| 22 |
|
| 23 |
# Default FastSAM parameters
|
|
|
|
| 120 |
# Check if results are valid
|
| 121 |
if results is None or len(results) == 0 or results[0] is None:
|
| 122 |
return None, "FastSAM did not return valid results. Try adjusting parameters or using a different image."
|
| 123 |
+
|
| 124 |
# Get detections
|
| 125 |
detections = sv.Detections.from_ultralytics(results[0])
|
| 126 |
# Check if detections are valid
|
|
|
|
| 155 |
This demo combines two powerful AI models:
|
| 156 |
- **CLIP**: For zero-shot image classification
|
| 157 |
- **FastSAM**: For automatic image segmentation
|
|
|
|
| 158 |
Try uploading an image and use either of the tabs below!
|
| 159 |
""")
|
| 160 |
|
|
|
|
| 186 |
conf_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=DEFAULT_CONFIDENCE, label="Confidence Threshold")
|
| 187 |
iou_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=DEFAULT_IOU, label="IoU Threshold")
|
| 188 |
retina_checkbox = gr.Checkbox(label="Retina Masks", value=DEFAULT_RETINA_MASKS)
|
| 189 |
+
|
| 190 |
with gr.Row():
|
| 191 |
image_output = gr.Image(label="Segmentation Result")
|
| 192 |
error_output = gr.Textbox(label="Error Message", type="text") # Added for displaying errors
|
|
|
|
| 211 |
### How to use:
|
| 212 |
1. **CLIP Classification**: Upload an image and enter text to check if that concept exists in the image
|
| 213 |
2. **FastSAM Segmentation**: Upload an image to get automatic segmentation with bounding boxes and masks
|
|
|
|
| 214 |
### Note:
|
| 215 |
- The models run on CPU by default, so processing might take a few seconds. If you have a GPU, it will be used automatically.
|
| 216 |
- For best results, use clear images with good lighting.
|