Spaces:
Sleeping
Sleeping
Commit
·
bf22e13
1
Parent(s):
405c26e
changed model
Browse files- app.py +12 -13
- weights/yolov8n-face.onnx +3 -0
- yolov8.py +136 -0
app.py
CHANGED
|
@@ -2,28 +2,27 @@ import gradio as gr
|
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
-
from
|
| 6 |
|
|
|
|
| 7 |
|
| 8 |
-
def detect_and_blur_faces(image):
|
| 9 |
|
| 10 |
-
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
if 'face_1' in resp:
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
output_image[y:h, x:w] = blurred_face
|
| 24 |
|
| 25 |
return output_image
|
| 26 |
-
|
| 27 |
|
| 28 |
# Set up the Gradio interface.
|
| 29 |
image_input = gr.inputs.Image(shape=(None, None))
|
|
|
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
+
from yolov8 import YOLOv8Face
|
| 6 |
|
| 7 |
+
model = YOLOv8Face('weights/yolov8n-face.onnx')
|
| 8 |
|
|
|
|
| 9 |
|
| 10 |
+
def detect_and_blur_faces(model, image):
|
| 11 |
|
| 12 |
+
boxes, scores, classids, landmarks = model.detect(image)
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
output_image = image.copy()
|
| 15 |
|
| 16 |
+
for i, box in enumerate(boxes):
|
| 17 |
+
x1, y1, w, h = [int(val) for val in box]
|
| 18 |
+
x2, y2 = x1 + w, y1 + h
|
| 19 |
|
| 20 |
+
face = output_image[y1:y2, x1:x2]
|
| 21 |
+
blurred_face = cv2.GaussianBlur(face, (99, 99), 30)
|
| 22 |
+
output_image[y1:y2, x1:x2] = blurred_face
|
|
|
|
| 23 |
|
| 24 |
return output_image
|
| 25 |
+
|
| 26 |
|
| 27 |
# Set up the Gradio interface.
|
| 28 |
image_input = gr.inputs.Image(shape=(None, None))
|
weights/yolov8n-face.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d22820ff253677420e56562f8351955f741bfed7ab4960cf74aed543f52afeb5
|
| 3 |
+
size 12345102
|
yolov8.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
class YOLOv8Face:
|
| 6 |
+
def __init__(self, path, conf_thres=0.2, iou_thres=0.5):
|
| 7 |
+
self.conf_threshold = conf_thres
|
| 8 |
+
self.iou_threshold = iou_thres
|
| 9 |
+
self.class_names = ['face']
|
| 10 |
+
self.num_classes = len(self.class_names)
|
| 11 |
+
# Initialize model
|
| 12 |
+
self.net = cv2.dnn.readNet(path)
|
| 13 |
+
self.input_height = 640
|
| 14 |
+
self.input_width = 640
|
| 15 |
+
self.reg_max = 16
|
| 16 |
+
|
| 17 |
+
self.project = np.arange(self.reg_max)
|
| 18 |
+
self.strides = (8, 16, 32)
|
| 19 |
+
self.feats_hw = [(math.ceil(self.input_height / self.strides[i]), math.ceil(self.input_width / self.strides[i])) for i in range(len(self.strides))]
|
| 20 |
+
self.anchors = self.make_anchors(self.feats_hw)
|
| 21 |
+
|
| 22 |
+
def make_anchors(self, feats_hw, grid_cell_offset=0.5):
|
| 23 |
+
"""Generate anchors from features."""
|
| 24 |
+
anchor_points = {}
|
| 25 |
+
for i, stride in enumerate(self.strides):
|
| 26 |
+
h,w = feats_hw[i]
|
| 27 |
+
x = np.arange(0, w) + grid_cell_offset # shift x
|
| 28 |
+
y = np.arange(0, h) + grid_cell_offset # shift y
|
| 29 |
+
sx, sy = np.meshgrid(x, y)
|
| 30 |
+
# sy, sx = np.meshgrid(y, x)
|
| 31 |
+
anchor_points[stride] = np.stack((sx, sy), axis=-1).reshape(-1, 2)
|
| 32 |
+
return anchor_points
|
| 33 |
+
|
| 34 |
+
def softmax(self, x, axis=1):
|
| 35 |
+
x_exp = np.exp(x)
|
| 36 |
+
x_sum = np.sum(x_exp, axis=axis, keepdims=True)
|
| 37 |
+
s = x_exp / x_sum
|
| 38 |
+
return s
|
| 39 |
+
|
| 40 |
+
def resize_image(self, srcimg, keep_ratio=True):
|
| 41 |
+
top, left, newh, neww = 0, 0, self.input_width, self.input_height
|
| 42 |
+
if keep_ratio and srcimg.shape[0] != srcimg.shape[1]:
|
| 43 |
+
hw_scale = srcimg.shape[0] / srcimg.shape[1]
|
| 44 |
+
if hw_scale > 1:
|
| 45 |
+
newh, neww = self.input_height, int(self.input_width / hw_scale)
|
| 46 |
+
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
|
| 47 |
+
left = int((self.input_width - neww) * 0.5)
|
| 48 |
+
img = cv2.copyMakeBorder(img, 0, 0, left, self.input_width - neww - left, cv2.BORDER_CONSTANT,
|
| 49 |
+
value=(0, 0, 0)) # add border
|
| 50 |
+
else:
|
| 51 |
+
newh, neww = int(self.input_height * hw_scale), self.input_width
|
| 52 |
+
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
|
| 53 |
+
top = int((self.input_height - newh) * 0.5)
|
| 54 |
+
img = cv2.copyMakeBorder(img, top, self.input_height - newh - top, 0, 0, cv2.BORDER_CONSTANT,
|
| 55 |
+
value=(0, 0, 0))
|
| 56 |
+
else:
|
| 57 |
+
img = cv2.resize(srcimg, (self.input_width, self.input_height), interpolation=cv2.INTER_AREA)
|
| 58 |
+
return img, newh, neww, top, left
|
| 59 |
+
|
| 60 |
+
def detect(self, srcimg):
|
| 61 |
+
input_img, newh, neww, padh, padw = self.resize_image(cv2.cvtColor(srcimg, cv2.COLOR_BGR2RGB))
|
| 62 |
+
scale_h, scale_w = srcimg.shape[0]/newh, srcimg.shape[1]/neww
|
| 63 |
+
input_img = input_img.astype(np.float32) / 255.0
|
| 64 |
+
|
| 65 |
+
blob = cv2.dnn.blobFromImage(input_img)
|
| 66 |
+
self.net.setInput(blob)
|
| 67 |
+
outputs = self.net.forward(self.net.getUnconnectedOutLayersNames())
|
| 68 |
+
det_bboxes, det_conf, det_classid, landmarks = self.post_process(outputs, scale_h, scale_w, padh, padw)
|
| 69 |
+
return det_bboxes, det_conf, det_classid, landmarks
|
| 70 |
+
|
| 71 |
+
def post_process(self, preds, scale_h, scale_w, padh, padw):
|
| 72 |
+
bboxes, scores, landmarks = [], [], []
|
| 73 |
+
for i, pred in enumerate(preds):
|
| 74 |
+
stride = int(self.input_height/pred.shape[2])
|
| 75 |
+
pred = pred.transpose((0, 2, 3, 1))
|
| 76 |
+
|
| 77 |
+
box = pred[..., :self.reg_max * 4]
|
| 78 |
+
cls = 1 / (1 + np.exp(-pred[..., self.reg_max * 4:-15])).reshape((-1,1))
|
| 79 |
+
kpts = pred[..., -15:].reshape((-1,15)) ### x1,y1,score1, ..., x5,y5,score5
|
| 80 |
+
|
| 81 |
+
tmp = box.reshape(-1, 4, self.reg_max)
|
| 82 |
+
bbox_pred = self.softmax(tmp, axis=-1)
|
| 83 |
+
bbox_pred = np.dot(bbox_pred, self.project).reshape((-1,4))
|
| 84 |
+
|
| 85 |
+
bbox = self.distance2bbox(self.anchors[stride], bbox_pred, max_shape=(self.input_height, self.input_width)) * stride
|
| 86 |
+
kpts[:, 0::3] = (kpts[:, 0::3] * 2.0 + (self.anchors[stride][:, 0].reshape((-1,1)) - 0.5)) * stride
|
| 87 |
+
kpts[:, 1::3] = (kpts[:, 1::3] * 2.0 + (self.anchors[stride][:, 1].reshape((-1,1)) - 0.5)) * stride
|
| 88 |
+
kpts[:, 2::3] = 1 / (1+np.exp(-kpts[:, 2::3]))
|
| 89 |
+
|
| 90 |
+
bbox -= np.array([[padw, padh, padw, padh]])
|
| 91 |
+
bbox *= np.array([[scale_w, scale_h, scale_w, scale_h]])
|
| 92 |
+
kpts -= np.tile(np.array([padw, padh, 0]), 5).reshape((1,15))
|
| 93 |
+
kpts *= np.tile(np.array([scale_w, scale_h, 1]), 5).reshape((1,15))
|
| 94 |
+
|
| 95 |
+
bboxes.append(bbox)
|
| 96 |
+
scores.append(cls)
|
| 97 |
+
landmarks.append(kpts)
|
| 98 |
+
|
| 99 |
+
bboxes = np.concatenate(bboxes, axis=0)
|
| 100 |
+
scores = np.concatenate(scores, axis=0)
|
| 101 |
+
landmarks = np.concatenate(landmarks, axis=0)
|
| 102 |
+
|
| 103 |
+
bboxes_wh = bboxes.copy()
|
| 104 |
+
bboxes_wh[:, 2:4] = bboxes[:, 2:4] - bboxes[:, 0:2] # x y w h
|
| 105 |
+
classIds = np.argmax(scores, axis=1)
|
| 106 |
+
confidences = np.max(scores, axis=1) # max_class_confidence
|
| 107 |
+
|
| 108 |
+
mask = confidences>self.conf_threshold
|
| 109 |
+
bboxes_wh = bboxes_wh[mask]
|
| 110 |
+
confidences = confidences[mask]
|
| 111 |
+
classIds = classIds[mask]
|
| 112 |
+
landmarks = landmarks[mask]
|
| 113 |
+
|
| 114 |
+
indices = cv2.dnn.NMSBoxes(bboxes_wh.tolist(), confidences.tolist(), self.conf_threshold,
|
| 115 |
+
self.iou_threshold).flatten()
|
| 116 |
+
if len(indices) > 0:
|
| 117 |
+
mlvl_bboxes = bboxes_wh[indices]
|
| 118 |
+
confidences = confidences[indices]
|
| 119 |
+
classIds = classIds[indices]
|
| 120 |
+
landmarks = landmarks[indices]
|
| 121 |
+
return mlvl_bboxes, confidences, classIds, landmarks
|
| 122 |
+
else:
|
| 123 |
+
print('nothing detect')
|
| 124 |
+
return np.array([]), np.array([]), np.array([]), np.array([])
|
| 125 |
+
|
| 126 |
+
def distance2bbox(self, points, distance, max_shape=None):
|
| 127 |
+
x1 = points[:, 0] - distance[:, 0]
|
| 128 |
+
y1 = points[:, 1] - distance[:, 1]
|
| 129 |
+
x2 = points[:, 0] + distance[:, 2]
|
| 130 |
+
y2 = points[:, 1] + distance[:, 3]
|
| 131 |
+
if max_shape is not None:
|
| 132 |
+
x1 = np.clip(x1, 0, max_shape[1])
|
| 133 |
+
y1 = np.clip(y1, 0, max_shape[0])
|
| 134 |
+
x2 = np.clip(x2, 0, max_shape[1])
|
| 135 |
+
y2 = np.clip(y2, 0, max_shape[0])
|
| 136 |
+
return np.stack([x1, y1, x2, y2], axis=-1)
|