Commit
·
184fd64
0
Parent(s):
Duplicate from kellyxiaowei/OWL-ViT
Browse filesCo-authored-by: xiaowei <kellyxiaowei@users.noreply.huggingface.co>
- .gitattributes +33 -0
- README.md +14 -0
- app.py +79 -0
- assets/.DS_Store +0 -0
- assets/Helvetica.ttf +0 -0
- assets/astronaut.png +0 -0
- assets/butterflies.jpeg +0 -0
- assets/coffee.png +0 -0
- requirements.txt +7 -0
.gitattributes
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
assets/Helvatica.ttc filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
assets/Helvatica.ttf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: OWL-ViT Demo
|
| 3 |
+
emoji: 🔥
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.1.3
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
duplicated_from: kellyxiaowei/OWL-ViT
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import cv2
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import numpy as np
|
| 5 |
+
import requests
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from transformers import OwlViTProcessor, OwlViTForObjectDetection
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Use GPU if available
|
| 12 |
+
if torch.cuda.is_available():
|
| 13 |
+
device = torch.device("cuda")
|
| 14 |
+
else:
|
| 15 |
+
device = torch.device("cpu")
|
| 16 |
+
|
| 17 |
+
model = OwlViTForObjectDetection.from_pretrained("google/owlvit-large-patch14").to(device)
|
| 18 |
+
model.eval()
|
| 19 |
+
processor = OwlViTProcessor.from_pretrained("google/owlvit-large-patch14")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def query_image(img_url, text_queries, score_threshold):
|
| 23 |
+
text_queries = text_queries.split(",")
|
| 24 |
+
|
| 25 |
+
response = requests.get(img_url)
|
| 26 |
+
img = Image.open(BytesIO(response.content))
|
| 27 |
+
img = np.array(img)
|
| 28 |
+
|
| 29 |
+
target_sizes = torch.Tensor([img.shape[:2]])
|
| 30 |
+
inputs = processor(text=text_queries, images=img, return_tensors="pt").to(device)
|
| 31 |
+
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
outputs = model(**inputs)
|
| 34 |
+
|
| 35 |
+
outputs.logits = outputs.logits.cpu()
|
| 36 |
+
outputs.pred_boxes = outputs.pred_boxes.cpu()
|
| 37 |
+
results = processor.post_process(outputs=outputs, target_sizes=target_sizes)
|
| 38 |
+
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
|
| 39 |
+
|
| 40 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 41 |
+
|
| 42 |
+
for box, score, label in zip(boxes, scores, labels):
|
| 43 |
+
box = [int(i) for i in box.tolist()]
|
| 44 |
+
|
| 45 |
+
if score >= score_threshold:
|
| 46 |
+
img = cv2.rectangle(img, box[:2], box[2:], (255,0,0), 5)
|
| 47 |
+
if box[3] + 25 > 768:
|
| 48 |
+
y = box[3] - 10
|
| 49 |
+
else:
|
| 50 |
+
y = box[3] + 25
|
| 51 |
+
|
| 52 |
+
img = cv2.putText(
|
| 53 |
+
img, text_queries[label], (box[0], y), font, 1, (255,0,0), 2, cv2.LINE_AA
|
| 54 |
+
)
|
| 55 |
+
return img
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
description = """
|
| 59 |
+
Gradio demo for <a href="https://huggingface.co/docs/transformers/main/en/model_doc/owlvit">OWL-ViT</a>,
|
| 60 |
+
introduced in <a href="https://arxiv.org/abs/2205.06230">Simple Open-Vocabulary Object Detection
|
| 61 |
+
with Vision Transformers</a>.
|
| 62 |
+
\n\nYou can use OWL-ViT to query images with text descriptions of any object.
|
| 63 |
+
To use it, simply input the URL of an image and enter comma separated text descriptions of objects you want to query the image for. You
|
| 64 |
+
can also use the score threshold slider to set a threshold to filter out low probability predictions.
|
| 65 |
+
|
| 66 |
+
\n\nOWL-ViT is trained on text templates,
|
| 67 |
+
hence you can get better predictions by querying the image with text templates used in training the original model: *"photo of a star-spangled banner"*,
|
| 68 |
+
*"image of a shoe"*. Refer to the <a href="https://arxiv.org/abs/2103.00020">CLIP</a> paper to see the full list of text templates used to augment the training data.
|
| 69 |
+
\n\n<a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb">Colab demo</a>
|
| 70 |
+
"""
|
| 71 |
+
demo = gr.Interface(
|
| 72 |
+
query_image,
|
| 73 |
+
inputs=["text", "text", gr.Slider(0, 1, value=0.1)],
|
| 74 |
+
outputs="image",
|
| 75 |
+
title="Zero-Shot Object Detection with OWL-ViT",
|
| 76 |
+
description=description,
|
| 77 |
+
examples=[],
|
| 78 |
+
)
|
| 79 |
+
demo.launch()
|
assets/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
assets/Helvetica.ttf
ADDED
|
Binary file (318 kB). View file
|
|
|
assets/astronaut.png
ADDED
|
assets/butterflies.jpeg
ADDED
|
assets/coffee.png
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# pip install -r requirements.txt
|
| 2 |
+
|
| 3 |
+
numpy>=1.18.5
|
| 4 |
+
torch>=1.7.0
|
| 5 |
+
torchvision>=0.8.1
|
| 6 |
+
git+https://github.com/huggingface/transformers.git
|
| 7 |
+
opencv-python
|