SynthAIzer commited on
Commit
d9dd411
·
verified ·
1 Parent(s): 8bb2cee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -119
app.py CHANGED
@@ -1,35 +1,28 @@
1
  import os
2
- # Disable GPU for TensorFlow / Keras BEFORE importing anything
3
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
4
-
5
  import time
6
  import cv2
7
  import numpy as np
8
  import torch
9
- from flask import Flask, request, jsonify
10
- from ultralytics import YOLO
11
  from PIL import Image as PILImage
12
- from datetime import datetime, timedelta
13
  import gc
 
14
 
 
15
  from keras_facenet import FaceNet
16
  from transformers import pipeline
17
-
18
- # -----------------------------
19
- # Flask Setup
20
- # -----------------------------
21
- app = Flask(__name__)
22
 
23
  # -----------------------------
24
  # Device Setup
25
  # -----------------------------
 
26
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
27
  print(f"Using device: {DEVICE}")
28
 
29
  # -----------------------------
30
  # Load YOLOv8 Face Model
31
  # -----------------------------
32
- MODEL_PATH = "yolov8n-face.pt" # put this file in your repo root
33
  if not os.path.exists(MODEL_PATH):
34
  raise FileNotFoundError(f"Model file not found at {MODEL_PATH}")
35
 
@@ -40,18 +33,18 @@ print("YOLOv8 loaded")
40
  # -----------------------------
41
  # Load FaceNet Embedder
42
  # -----------------------------
43
- print("Loading FaceNet (Google) model...")
44
- embedder = FaceNet() # CPU mode
45
  print("FaceNet loaded")
46
 
47
  # -----------------------------
48
  # Load HuggingFace Age & Gender Models
49
  # -----------------------------
50
- print("Loading HuggingFace models...")
51
  age_model = pipeline(
52
  "image-classification",
53
  model="prithivMLmods/Age-Classification-SigLIP2",
54
- device=-1 # CPU mode
55
  )
56
  gender_model = pipeline(
57
  "image-classification",
@@ -67,127 +60,112 @@ FACE_DB = []
67
  NEXT_ID = 1
68
 
69
  # -----------------------------
70
- # GPU Cleaner
71
  # -----------------------------
72
  def clean_gpu():
73
  if torch.cuda.is_available():
74
  torch.cuda.empty_cache()
75
  gc.collect()
76
 
77
- # -----------------------------
78
- # Cosine Similarity
79
- # -----------------------------
80
  def cosine_similarity(a, b):
81
  return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
82
 
83
  # -----------------------------
84
- # Upload API Endpoint
85
  # -----------------------------
86
- @app.route("/upload", methods=["POST"])
87
- def upload():
88
  global NEXT_ID, FACE_DB
89
  start_time = time.time()
90
-
91
- try:
92
- # Decode image
93
- jpg_bytes = request.data
94
- np_arr = np.frombuffer(jpg_bytes, np.uint8)
95
- img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
96
- if img is None:
97
- return jsonify({"status": "error", "message": "Invalid image"}), 400
98
-
99
- rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
100
-
101
- # Detect faces using YOLOv8
102
- results = face_model(rgb_img, verbose=False)
103
- boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
104
-
105
- now = datetime.now()
106
- # Remove old entries (> 1 hour)
107
- FACE_DB = [f for f in FACE_DB if now - f["time"] <= timedelta(hours=1)]
108
-
109
- faces = []
110
- for (x1, y1, x2, y2) in boxes:
111
- face_crop = rgb_img[y1:y2, x1:x2]
112
- if face_crop.size == 0:
113
- continue
114
-
115
- # Get embedding from FaceNet
116
- face_embedding = embedder.embeddings([face_crop])[0]
117
-
118
- assigned_id = None
119
- age_pred, gender_pred = "unknown", "unknown"
120
-
121
- # Compare with known embeddings
122
- if FACE_DB:
123
- similarities = [cosine_similarity(face_embedding, entry["embedding"]) for entry in FACE_DB]
124
- best_match_index = int(np.argmax(similarities))
125
- best_score = similarities[best_match_index]
126
-
127
- if best_score > 0.6: # same person threshold
128
- assigned_id = FACE_DB[best_match_index]["id"]
129
- FACE_DB[best_match_index]["time"] = now
130
- FACE_DB[best_match_index]["seen_count"] += 1
131
- age_pred = FACE_DB[best_match_index]["age"]
132
- gender_pred = FACE_DB[best_match_index]["gender"]
133
-
134
- # If new person
135
- if assigned_id is None:
136
- assigned_id = NEXT_ID
137
- face_pil = PILImage.fromarray(face_crop)
138
-
139
- try:
140
- age_pred = age_model(face_pil)[0]["label"]
141
- gender_pred = gender_model(face_pil)[0]["label"]
142
- except Exception:
143
- age_pred, gender_pred = "unknown", "unknown"
144
-
145
- FACE_DB.append({
146
- "id": assigned_id,
147
- "embedding": face_embedding,
148
- "time": now,
149
- "seen_count": 1,
150
- "age": age_pred,
151
- "gender": gender_pred
152
- })
153
- NEXT_ID += 1
154
-
155
- faces.append({
156
  "id": assigned_id,
 
 
 
157
  "age": age_pred,
158
- "gender": gender_pred,
159
- "box": [int(x1), int(y1), int(x2), int(y2)]
160
  })
 
161
 
162
- total_time = round(time.time() - start_time, 3)
163
- clean_gpu()
164
-
165
- summary = [
166
- {
167
- "id": entry["id"],
168
- "seen_count": entry["seen_count"],
169
- "age": entry["age"],
170
- "gender": entry["gender"]
171
- }
172
- for entry in FACE_DB
173
- ]
174
-
175
- return jsonify({
176
- "status": "ok",
177
- "faces": faces,
178
- "face_count": len(faces),
179
- "processing_time_sec": total_time,
180
- "active_faces_last_hour": len(FACE_DB),
181
- "seen_summary_last_hour": summary
182
  })
183
 
184
- except Exception as e:
185
- return jsonify({"status": "error", "message": str(e)}), 500
186
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- # -----------------------------
189
- # Run Server
190
- # -----------------------------
191
  if __name__ == "__main__":
192
- # Hugging Face Spaces expects host 0.0.0.0 and port 7860
193
- app.run(host="0.0.0.0", port=7860, debug=False, use_reloader=False)
 
1
  import os
 
 
 
2
  import time
3
  import cv2
4
  import numpy as np
5
  import torch
 
 
6
  from PIL import Image as PILImage
 
7
  import gc
8
+ from datetime import datetime, timedelta
9
 
10
+ from ultralytics import YOLO
11
  from keras_facenet import FaceNet
12
  from transformers import pipeline
13
+ import gradio as gr
 
 
 
 
14
 
15
  # -----------------------------
16
  # Device Setup
17
  # -----------------------------
18
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Disable GPU
19
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
20
  print(f"Using device: {DEVICE}")
21
 
22
  # -----------------------------
23
  # Load YOLOv8 Face Model
24
  # -----------------------------
25
+ MODEL_PATH = "yolov8n-face.pt" # make sure this is in your repo
26
  if not os.path.exists(MODEL_PATH):
27
  raise FileNotFoundError(f"Model file not found at {MODEL_PATH}")
28
 
 
33
  # -----------------------------
34
  # Load FaceNet Embedder
35
  # -----------------------------
36
+ print("Loading FaceNet model...")
37
+ embedder = FaceNet()
38
  print("FaceNet loaded")
39
 
40
  # -----------------------------
41
  # Load HuggingFace Age & Gender Models
42
  # -----------------------------
43
+ print("Loading HuggingFace Age & Gender models...")
44
  age_model = pipeline(
45
  "image-classification",
46
  model="prithivMLmods/Age-Classification-SigLIP2",
47
+ device=-1
48
  )
49
  gender_model = pipeline(
50
  "image-classification",
 
60
  NEXT_ID = 1
61
 
62
  # -----------------------------
63
+ # Utilities
64
  # -----------------------------
65
  def clean_gpu():
66
  if torch.cuda.is_available():
67
  torch.cuda.empty_cache()
68
  gc.collect()
69
 
 
 
 
70
  def cosine_similarity(a, b):
71
  return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
72
 
73
  # -----------------------------
74
+ # Core Function
75
  # -----------------------------
76
+ def process_image(image: PILImage):
 
77
  global NEXT_ID, FACE_DB
78
  start_time = time.time()
79
+ rgb_img = np.array(image)
80
+
81
+ # Detect faces
82
+ results = face_model(rgb_img, verbose=False)
83
+ boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
84
+
85
+ now = datetime.now()
86
+ # Remove old entries (>1 hour)
87
+ FACE_DB = [f for f in FACE_DB if now - f["time"] <= timedelta(hours=1)]
88
+
89
+ faces = []
90
+ for (x1, y1, x2, y2) in boxes:
91
+ face_crop = rgb_img[y1:y2, x1:x2]
92
+ if face_crop.size == 0:
93
+ continue
94
+
95
+ face_embedding = embedder.embeddings([face_crop])[0]
96
+ assigned_id = None
97
+ age_pred, gender_pred = "unknown", "unknown"
98
+
99
+ # Compare with DB
100
+ if FACE_DB:
101
+ similarities = [cosine_similarity(face_embedding, entry["embedding"]) for entry in FACE_DB]
102
+ best_idx = int(np.argmax(similarities))
103
+ best_score = similarities[best_idx]
104
+ if best_score > 0.6:
105
+ assigned_id = FACE_DB[best_idx]["id"]
106
+ FACE_DB[best_idx]["time"] = now
107
+ FACE_DB[best_idx]["seen_count"] += 1
108
+ age_pred = FACE_DB[best_idx]["age"]
109
+ gender_pred = FACE_DB[best_idx]["gender"]
110
+
111
+ # New face
112
+ if assigned_id is None:
113
+ assigned_id = NEXT_ID
114
+ face_pil = PILImage.fromarray(face_crop)
115
+ try:
116
+ age_pred = age_model(face_pil)[0]["label"]
117
+ gender_pred = gender_model(face_pil)[0]["label"]
118
+ except Exception:
119
+ age_pred, gender_pred = "unknown", "unknown"
120
+
121
+ FACE_DB.append({
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  "id": assigned_id,
123
+ "embedding": face_embedding,
124
+ "time": now,
125
+ "seen_count": 1,
126
  "age": age_pred,
127
+ "gender": gender_pred
 
128
  })
129
+ NEXT_ID += 1
130
 
131
+ faces.append({
132
+ "id": assigned_id,
133
+ "age": age_pred,
134
+ "gender": gender_pred,
135
+ "box": [int(x1), int(y1), int(x2), int(y2)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  })
137
 
138
+ total_time = round(time.time() - start_time, 3)
139
+ clean_gpu()
140
+
141
+ summary = [
142
+ {
143
+ "id": entry["id"],
144
+ "seen_count": entry["seen_count"],
145
+ "age": entry["age"],
146
+ "gender": entry["gender"]
147
+ } for entry in FACE_DB
148
+ ]
149
+
150
+ return {
151
+ "status": "ok",
152
+ "faces": faces,
153
+ "face_count": len(faces),
154
+ "processing_time_sec": total_time,
155
+ "active_faces_last_hour": len(FACE_DB),
156
+ "seen_summary_last_hour": summary
157
+ }
158
+
159
+ # -----------------------------
160
+ # Gradio Interface
161
+ # -----------------------------
162
+ demo = gr.Interface(
163
+ fn=process_image,
164
+ inputs=gr.Image(type="pil"),
165
+ outputs="json",
166
+ title="Face Recognition + Age & Gender",
167
+ description="YOLOv8 + FaceNet + HuggingFace Age/Gender"
168
+ )
169
 
 
 
 
170
  if __name__ == "__main__":
171
+ demo.launch(server_name="0.0.0.0", server_port=7860)