|
|
|
|
|
|
|
|
import glob |
|
|
import os |
|
|
import random |
|
|
import time |
|
|
|
|
|
import cv2 |
|
|
import numpy as np |
|
|
import torch |
|
|
import torchvision |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
torch.set_printoptions(linewidth=320, precision=5, profile='long') |
|
|
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) |
|
|
cv2.setNumThreads(0) |
|
|
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) |
|
|
|
|
|
def init_seeds(seed=0): |
|
|
|
|
|
random.seed(seed) |
|
|
np.random.seed(seed) |
|
|
init_torch_seeds(seed) |
|
|
|
|
|
|
|
|
def get_latest_run(search_dir='.'): |
|
|
|
|
|
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) |
|
|
return max(last_list, key=os.path.getctime) if last_list else '' |
|
|
|
|
|
|
|
|
def imwrite(img, file_path, params=None, auto_mkdir=True): |
|
|
"""Write image to file. |
|
|
|
|
|
Args: |
|
|
img (ndarray): Image array to be written. |
|
|
file_path (str): Image file path. |
|
|
params (None or list): Same as opencv's :func:`imwrite` interface. |
|
|
auto_mkdir (bool): If the parent folder of `file_path` does not exist, |
|
|
whether to create it automatically. |
|
|
|
|
|
Returns: |
|
|
bool: Successful or not. |
|
|
""" |
|
|
if auto_mkdir: |
|
|
dir_name = os.path.abspath(os.path.dirname(file_path)) |
|
|
os.makedirs(dir_name, exist_ok=True) |
|
|
return cv2.imwrite(file_path, img, params) |
|
|
|
|
|
|
|
|
def img2tensor(imgs, bgr2rgb=True, float32=True): |
|
|
"""Numpy array to tensor. |
|
|
|
|
|
Args: |
|
|
imgs (list[ndarray] | ndarray): Input images. |
|
|
bgr2rgb (bool): Whether to change bgr to rgb. |
|
|
float32 (bool): Whether to change to float32. |
|
|
|
|
|
Returns: |
|
|
list[tensor] | tensor: Tensor images. If returned results only have |
|
|
one element, just return tensor. |
|
|
""" |
|
|
|
|
|
def _totensor(img, bgr2rgb, float32): |
|
|
if img.shape[2] == 3 and bgr2rgb: |
|
|
if img.dtype == 'float64': |
|
|
img = img.astype('float32') |
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
img = torch.from_numpy(img.transpose(2, 0, 1)) |
|
|
if float32: |
|
|
img = img.float() |
|
|
return img |
|
|
|
|
|
if isinstance(imgs, list): |
|
|
return [_totensor(img, bgr2rgb, float32) for img in imgs] |
|
|
else: |
|
|
return _totensor(imgs, bgr2rgb, float32) |
|
|
|
|
|
def is_gray(img, threshold=10): |
|
|
img = Image.fromarray(img) |
|
|
if len(img.getbands()) == 1: |
|
|
return True |
|
|
img1 = np.asarray(img.getchannel(channel=0), dtype=np.int16) |
|
|
img2 = np.asarray(img.getchannel(channel=1), dtype=np.int16) |
|
|
img3 = np.asarray(img.getchannel(channel=2), dtype=np.int16) |
|
|
diff1 = (img1 - img2).var() |
|
|
diff2 = (img2 - img3).var() |
|
|
diff3 = (img3 - img1).var() |
|
|
diff_sum = (diff1 + diff2 + diff3) / 3.0 |
|
|
if diff_sum <= threshold: |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
|
|
|
def rgb2gray(img, out_channel=3): |
|
|
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2] |
|
|
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b |
|
|
if out_channel == 3: |
|
|
gray = gray[:,:,np.newaxis].repeat(3, axis=2) |
|
|
return gray |
|
|
|
|
|
def bgr2gray(img, out_channel=3): |
|
|
b, g, r = img[:,:,0], img[:,:,1], img[:,:,2] |
|
|
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b |
|
|
if out_channel == 3: |
|
|
gray = gray[:,:,np.newaxis].repeat(3, axis=2) |
|
|
return gray |
|
|
|
|
|
def calc_mean_std(feat, eps=1e-5): |
|
|
""" |
|
|
Args: |
|
|
feat (numpy): 3D [w h c]s |
|
|
""" |
|
|
size = feat.shape |
|
|
assert len(size) == 3, 'The input feature should be 3D tensor.' |
|
|
c = size[2] |
|
|
feat_var = feat.reshape(-1, c).var(axis=0) + eps |
|
|
feat_std = np.sqrt(feat_var).reshape(1, 1, c) |
|
|
feat_mean = feat.reshape(-1, c).mean(axis=0).reshape(1, 1, c) |
|
|
return feat_mean, feat_std |
|
|
|
|
|
|
|
|
def adain_npy(content_feat, style_feat): |
|
|
"""Adaptive instance normalization for numpy. |
|
|
|
|
|
Args: |
|
|
content_feat (numpy): The input feature. |
|
|
style_feat (numpy): The reference feature. |
|
|
""" |
|
|
size = content_feat.shape |
|
|
style_mean, style_std = calc_mean_std(style_feat) |
|
|
content_mean, content_std = calc_mean_std(content_feat) |
|
|
normalized_feat = (content_feat - np.broadcast_to(content_mean, size)) / np.broadcast_to(content_std, size) |
|
|
return normalized_feat * np.broadcast_to(style_std, size) + np.broadcast_to(style_mean, size) |
|
|
|
|
|
def xyxy2xywh(x): |
|
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
|
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 |
|
|
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 |
|
|
y[:, 2] = x[:, 2] - x[:, 0] |
|
|
y[:, 3] = x[:, 3] - x[:, 1] |
|
|
return y |
|
|
|
|
|
|
|
|
def xywh2xyxy(x): |
|
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
|
y[:, 0] = x[:, 0] - x[:, 2] / 2 |
|
|
y[:, 1] = x[:, 1] - x[:, 3] / 2 |
|
|
y[:, 2] = x[:, 0] + x[:, 2] / 2 |
|
|
y[:, 3] = x[:, 1] + x[:, 3] / 2 |
|
|
return y |
|
|
|
|
|
|
|
|
def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): |
|
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
|
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw |
|
|
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh |
|
|
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw |
|
|
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh |
|
|
return y |
|
|
|
|
|
|
|
|
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): |
|
|
|
|
|
if ratio_pad is None: |
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) |
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 |
|
|
else: |
|
|
gain = ratio_pad[0][0] |
|
|
pad = ratio_pad[1] |
|
|
|
|
|
coords[:, [0, 2]] -= pad[0] |
|
|
coords[:, [1, 3]] -= pad[1] |
|
|
coords[:, :4] /= gain |
|
|
clip_coords(coords, img0_shape) |
|
|
return coords |
|
|
|
|
|
|
|
|
def clip_coords(boxes, img_shape): |
|
|
|
|
|
boxes[:, 0].clamp_(0, img_shape[1]) |
|
|
boxes[:, 1].clamp_(0, img_shape[0]) |
|
|
boxes[:, 2].clamp_(0, img_shape[1]) |
|
|
boxes[:, 3].clamp_(0, img_shape[0]) |
|
|
|
|
|
def box_iou(box1, box2): |
|
|
|
|
|
""" |
|
|
Return intersection-over-union (Jaccard index) of boxes. |
|
|
Both sets of boxes are expected to be in (x1, y1, x2, y2) format. |
|
|
Arguments: |
|
|
box1 (Tensor[N, 4]) |
|
|
box2 (Tensor[M, 4]) |
|
|
Returns: |
|
|
iou (Tensor[N, M]): the NxM matrix containing the pairwise |
|
|
IoU values for every element in boxes1 and boxes2 |
|
|
""" |
|
|
|
|
|
def box_area(box): |
|
|
|
|
|
return (box[2] - box[0]) * (box[3] - box[1]) |
|
|
|
|
|
area1 = box_area(box1.T) |
|
|
area2 = box_area(box2.T) |
|
|
|
|
|
|
|
|
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - |
|
|
torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) |
|
|
|
|
|
return inter / (area1[:, None] + area2 - inter) |
|
|
|
|
|
|
|
|
def wh_iou(wh1, wh2): |
|
|
|
|
|
wh1 = wh1[:, None] |
|
|
wh2 = wh2[None] |
|
|
inter = torch.min(wh1, wh2).prod(2) |
|
|
|
|
|
return inter / (wh1.prod(2) + wh2.prod(2) - inter) |
|
|
|
|
|
def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): |
|
|
"""Performs Non-Maximum Suppression (NMS) on inference results |
|
|
Returns: |
|
|
detections with shape: nx6 (x1, y1, x2, y2, conf, cls) |
|
|
""" |
|
|
|
|
|
nc = prediction.shape[2] - 15 |
|
|
xc = prediction[..., 4] > conf_thres |
|
|
|
|
|
|
|
|
min_wh, max_wh = 2, 4096 |
|
|
time_limit = 10.0 |
|
|
redundant = True |
|
|
multi_label = nc > 1 |
|
|
merge = False |
|
|
|
|
|
t = time.time() |
|
|
output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0] |
|
|
for xi, x in enumerate(prediction): |
|
|
|
|
|
|
|
|
x = x[xc[xi]] |
|
|
|
|
|
|
|
|
if labels and len(labels[xi]): |
|
|
l = labels[xi] |
|
|
v = torch.zeros((len(l), nc + 15), device=x.device) |
|
|
v[:, :4] = l[:, 1:5] |
|
|
v[:, 4] = 1.0 |
|
|
v[range(len(l)), l[:, 0].long() + 15] = 1.0 |
|
|
x = torch.cat((x, v), 0) |
|
|
|
|
|
|
|
|
if not x.shape[0]: |
|
|
continue |
|
|
|
|
|
|
|
|
x[:, 15:] *= x[:, 4:5] |
|
|
|
|
|
|
|
|
box = xywh2xyxy(x[:, :4]) |
|
|
|
|
|
|
|
|
if multi_label: |
|
|
i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T |
|
|
x = torch.cat((box[i], x[i, j + 15, None], x[i, 5:15] ,j[:, None].float()), 1) |
|
|
else: |
|
|
conf, j = x[:, 15:].max(1, keepdim=True) |
|
|
x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres] |
|
|
|
|
|
|
|
|
if classes is not None: |
|
|
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] |
|
|
|
|
|
|
|
|
n = x.shape[0] |
|
|
if not n: |
|
|
continue |
|
|
|
|
|
|
|
|
c = x[:, 15:16] * (0 if agnostic else max_wh) |
|
|
boxes, scores = x[:, :4] + c, x[:, 4] |
|
|
i = torchvision.ops.nms(boxes, scores, iou_thres) |
|
|
|
|
|
|
|
|
if merge and (1 < n < 3E3): |
|
|
|
|
|
iou = box_iou(boxes[i], boxes) > iou_thres |
|
|
weights = iou * scores[None] |
|
|
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) |
|
|
if redundant: |
|
|
i = i[iou.sum(1) > 1] |
|
|
|
|
|
output[xi] = x[i] |
|
|
if (time.time() - t) > time_limit: |
|
|
break |
|
|
|
|
|
return output |
|
|
|
|
|
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): |
|
|
|
|
|
if ratio_pad is None: |
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) |
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 |
|
|
else: |
|
|
gain = ratio_pad[0][0] |
|
|
pad = ratio_pad[1] |
|
|
|
|
|
coords[:, [0, 2, 4, 6, 8]] -= pad[0] |
|
|
coords[:, [1, 3, 5, 7, 9]] -= pad[1] |
|
|
coords[:, :10] /= gain |
|
|
coords[:, 0].clamp_(0, img0_shape[1]) |
|
|
coords[:, 1].clamp_(0, img0_shape[0]) |
|
|
coords[:, 2].clamp_(0, img0_shape[1]) |
|
|
coords[:, 3].clamp_(0, img0_shape[0]) |
|
|
coords[:, 4].clamp_(0, img0_shape[1]) |
|
|
coords[:, 5].clamp_(0, img0_shape[0]) |
|
|
coords[:, 6].clamp_(0, img0_shape[1]) |
|
|
coords[:, 7].clamp_(0, img0_shape[0]) |
|
|
coords[:, 8].clamp_(0, img0_shape[1]) |
|
|
coords[:, 9].clamp_(0, img0_shape[0]) |
|
|
return coords |
|
|
|
|
|
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True): |
|
|
|
|
|
shape = img.shape[:2] |
|
|
if isinstance(new_shape, int): |
|
|
new_shape = (new_shape, new_shape) |
|
|
|
|
|
|
|
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) |
|
|
if not scaleup: |
|
|
r = min(r, 1.0) |
|
|
|
|
|
|
|
|
|
|
|
ratio = r, r |
|
|
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) |
|
|
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] |
|
|
|
|
|
if auto: |
|
|
dw, dh = np.mod(dw, 64), np.mod(dh, 64) |
|
|
|
|
|
elif scaleFill: |
|
|
dw, dh = 0.0, 0.0 |
|
|
new_unpad = (new_shape[1], new_shape[0]) |
|
|
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] |
|
|
|
|
|
dw /= 2 |
|
|
dh /= 2 |
|
|
|
|
|
if shape[::-1] != new_unpad: |
|
|
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) |
|
|
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) |
|
|
left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) |
|
|
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) |
|
|
return img, ratio, (dw, dh) |
|
|
|