Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,10 +47,10 @@ def segment_to_bbox(segment_indexs):
|
|
| 47 |
return [np.min(x_points), np.min(y_points), np.max(x_points), np.max(y_points)]
|
| 48 |
|
| 49 |
def clipseg_prediction(image):
|
| 50 |
-
inputs =
|
| 51 |
# predict
|
| 52 |
with torch.no_grad():
|
| 53 |
-
outputs =
|
| 54 |
preds = outputs.logits.unsqueeze(1)
|
| 55 |
# Setting threshold and classify the image contains vehicle or not
|
| 56 |
flat_preds = torch.sigmoid(preds.squeeze()).reshape((preds.shape[0], -1))
|
|
@@ -71,9 +71,9 @@ def clipseg_prediction(image):
|
|
| 71 |
|
| 72 |
# Vehicle checking
|
| 73 |
if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
|
| 74 |
-
return True, bbox_normalization(damage_bbox)
|
| 75 |
else:
|
| 76 |
-
return False, []
|
| 77 |
|
| 78 |
|
| 79 |
@torch.no_grad()
|
|
@@ -89,7 +89,7 @@ def foward_pass(image_input: np.ndarray, points: List[List[int]]) -> np.ndarray:
|
|
| 89 |
|
| 90 |
outputs = model.forward(image_embeddings=cache_data[1], **inputs)
|
| 91 |
masks = processor.image_processor.post_process_masks(
|
| 92 |
-
outputs.pred_masks.cpu(), inputs["original_sizes"].
|
| 93 |
)
|
| 94 |
masks = masks[0].squeeze(0).numpy().transpose(1, 2, 0)
|
| 95 |
|
|
|
|
| 47 |
return [np.min(x_points), np.min(y_points), np.max(x_points), np.max(y_points)]
|
| 48 |
|
| 49 |
def clipseg_prediction(image):
|
| 50 |
+
inputs = clip_processor(text=prompts, images=[image] * len(prompts), padding="max_length", return_tensors="pt")
|
| 51 |
# predict
|
| 52 |
with torch.no_grad():
|
| 53 |
+
outputs = clip_model(**inputs)
|
| 54 |
preds = outputs.logits.unsqueeze(1)
|
| 55 |
# Setting threshold and classify the image contains vehicle or not
|
| 56 |
flat_preds = torch.sigmoid(preds.squeeze()).reshape((preds.shape[0], -1))
|
|
|
|
| 71 |
|
| 72 |
# Vehicle checking
|
| 73 |
if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
|
| 74 |
+
return True, [bbox_normalization(damage_bbox)]
|
| 75 |
else:
|
| 76 |
+
return False, [[]]
|
| 77 |
|
| 78 |
|
| 79 |
@torch.no_grad()
|
|
|
|
| 89 |
|
| 90 |
outputs = model.forward(image_embeddings=cache_data[1], **inputs)
|
| 91 |
masks = processor.image_processor.post_process_masks(
|
| 92 |
+
outputs.pred_masks.cpu(), inputs["original_sizes"].to(device), inputs["reshaped_input_sizes"].to(device)
|
| 93 |
)
|
| 94 |
masks = masks[0].squeeze(0).numpy().transpose(1, 2, 0)
|
| 95 |
|