Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -55,27 +55,28 @@ for model in models:
|
|
| 55 |
model["cfg"].MODEL.DEVICE = "cpu"
|
| 56 |
|
| 57 |
|
| 58 |
-
def inference(
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
im =
|
| 64 |
-
|
| 65 |
-
|
|
|
|
| 66 |
|
| 67 |
-
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
|
| 72 |
-
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
results.append(out.get_image())
|
| 78 |
-
return results
|
| 79 |
|
| 80 |
title = "# DBMDZ Detectron2 Model Demo"
|
| 81 |
description = """
|
|
@@ -90,18 +91,21 @@ with gr.Blocks() as demo:
|
|
| 90 |
gr.Markdown(title)
|
| 91 |
gr.Markdown(description)
|
| 92 |
|
|
|
|
|
|
|
|
|
|
| 93 |
with gr.Tab("From Image"):
|
| 94 |
-
image_input = gr.
|
| 95 |
|
| 96 |
min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
|
| 97 |
|
| 98 |
model_name = gr.Radio(choices=[model["name"] for model in models], value=models[0]["name"], label="Select Detectron2 model")
|
| 99 |
|
| 100 |
-
|
| 101 |
|
| 102 |
inference_button = gr.Button("Submit")
|
| 103 |
|
| 104 |
-
inference_button.click(fn=inference, inputs=[image_input, min_score, model_name], outputs=
|
| 105 |
|
| 106 |
gr.Markdown(footer)
|
| 107 |
|
|
|
|
| 55 |
model["cfg"].MODEL.DEVICE = "cpu"
|
| 56 |
|
| 57 |
|
| 58 |
+
def inference(image_url, image, min_score, model_name):
|
| 59 |
+
if image_url:
|
| 60 |
+
r = requests.get(image_url)
|
| 61 |
+
if r:
|
| 62 |
+
im = np.frombuffer(r.content, dtype="uint8")
|
| 63 |
+
im = cv2.imdecode(im, cv2.IMREAD_COLOR_BGR2RGB)
|
| 64 |
+
else:
|
| 65 |
+
# Model expect BGR!
|
| 66 |
+
im = image[:,:,::-1]
|
| 67 |
|
| 68 |
+
model_id = model_name_to_id[model_name]
|
| 69 |
|
| 70 |
+
models[model_id]["cfg"].MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
|
| 71 |
+
predictor = DefaultPredictor(models[model_id]["cfg"])
|
| 72 |
|
| 73 |
+
outputs = predictor(im)
|
| 74 |
|
| 75 |
+
v = Visualizer(im, models[model_id]["metadata"], scale=1.2)
|
| 76 |
+
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
| 77 |
+
|
| 78 |
+
return out.get_image()
|
| 79 |
|
|
|
|
|
|
|
| 80 |
|
| 81 |
title = "# DBMDZ Detectron2 Model Demo"
|
| 82 |
description = """
|
|
|
|
| 91 |
gr.Markdown(title)
|
| 92 |
gr.Markdown(description)
|
| 93 |
|
| 94 |
+
with gr.Tab("From URL"):
|
| 95 |
+
url_input = gr.Textbox(label="Image URL", placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg")
|
| 96 |
+
|
| 97 |
with gr.Tab("From Image"):
|
| 98 |
+
image_input = gr.Gallery(type="numpy", label="Input Image")
|
| 99 |
|
| 100 |
min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
|
| 101 |
|
| 102 |
model_name = gr.Radio(choices=[model["name"] for model in models], value=models[0]["name"], label="Select Detectron2 model")
|
| 103 |
|
| 104 |
+
output_image = gr.Gallery(type="pil", label="Output")
|
| 105 |
|
| 106 |
inference_button = gr.Button("Submit")
|
| 107 |
|
| 108 |
+
inference_button.click(fn=inference, inputs=[url_input, image_input, min_score, model_name], outputs=output_image)
|
| 109 |
|
| 110 |
gr.Markdown(footer)
|
| 111 |
|