File size: 4,120 Bytes
b21d91d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import time
import cv2
import numpy as np
import torch
from ultralytics import YOLO
from PIL import Image as PILImage
from keras_facenet import FaceNet
from transformers import pipeline
import gradio as gr
from datetime import datetime, timedelta
import gc

# -----------------------------
# Device Setup
# -----------------------------
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {DEVICE}")

# -----------------------------
# Load YOLOv8 Face Model
# -----------------------------
MODEL_PATH = "yolov8n-face.pt"  # put this file in your Space repository
face_model = YOLO(MODEL_PATH).to(DEVICE)

# -----------------------------
# Load FaceNet Embedder
# -----------------------------
embedder = FaceNet()  # CPU mode

# -----------------------------
# Load HuggingFace Age & Gender Models
# -----------------------------
age_model = pipeline(
    "image-classification",
    model="prithivMLmods/Age-Classification-SigLIP2",
    device=-1
)
gender_model = pipeline(
    "image-classification",
    model="dima806/fairface_gender_image_detection",
    device=-1
)

# -----------------------------
# Face DB
# -----------------------------
FACE_DB = []
NEXT_ID = 1

def clean_gpu():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
    gc.collect()

def cosine_similarity(a, b):
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

# -----------------------------
# Main Inference Function
# -----------------------------
def process_image(image):
    global NEXT_ID, FACE_DB
    start_time = time.time()

    rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Detect faces
    results = face_model(rgb_img, verbose=False)
    boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)

    now = datetime.now()
    FACE_DB = [f for f in FACE_DB if now - f["time"] <= timedelta(hours=1)]

    faces = []
    for (x1, y1, x2, y2) in boxes:
        face_crop = rgb_img[y1:y2, x1:x2]
        if face_crop.size == 0:
            continue

        face_embedding = embedder.embeddings([face_crop])[0]

        assigned_id = None
        age_pred, gender_pred = "unknown", "unknown"

        # Compare with DB
        if FACE_DB:
            similarities = [cosine_similarity(face_embedding, entry["embedding"]) for entry in FACE_DB]
            best_match_index = int(np.argmax(similarities))
            best_score = similarities[best_match_index]

            if best_score > 0.6:
                assigned_id = FACE_DB[best_match_index]["id"]
                FACE_DB[best_match_index]["time"] = now
                FACE_DB[best_match_index]["seen_count"] += 1
                age_pred = FACE_DB[best_match_index]["age"]
                gender_pred = FACE_DB[best_match_index]["gender"]

        if assigned_id is None:
            assigned_id = NEXT_ID
            face_pil = PILImage.fromarray(face_crop)
            try:
                age_pred = age_model(face_pil)[0]["label"]
                gender_pred = gender_model(face_pil)[0]["label"]
            except Exception:
                age_pred, gender_pred = "unknown", "unknown"

            FACE_DB.append({
                "id": assigned_id,
                "embedding": face_embedding,
                "time": now,
                "seen_count": 1,
                "age": age_pred,
                "gender": gender_pred
            })
            NEXT_ID += 1

        faces.append({
            "id": assigned_id,
            "age": age_pred,
            "gender": gender_pred,
            "box": [int(x1), int(y1), int(x2), int(y2)]
        })

    total_time = round(time.time() - start_time, 3)
    clean_gpu()

    return {"faces": faces, "processing_time_sec": total_time}

# -----------------------------
# Gradio UI
# -----------------------------
iface = gr.Interface(
    fn=process_image,
    inputs=gr.Image(type="numpy"),
    outputs="json",
    title="Face Detection + Age/Gender"
)

if __name__ == "__main__":
    iface.launch()