Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -27,8 +27,8 @@ SAVE_DIR = Path("/tmp/emotion_results")
|
|
| 27 |
SAVE_DIR.mkdir(exist_ok=True)
|
| 28 |
|
| 29 |
# Create directories
|
| 30 |
-
(SAVE_DIR / "faces").mkdir(exist_ok=True)
|
| 31 |
-
(SAVE_DIR / "annotated").mkdir(exist_ok=True)
|
| 32 |
for emotion in EMOTION_MAP.keys():
|
| 33 |
(SAVE_DIR / "faces" / emotion).mkdir(exist_ok=True, parents=True)
|
| 34 |
(SAVE_DIR / "annotated" / emotion).mkdir(exist_ok=True, parents=True)
|
|
@@ -56,7 +56,7 @@ def predict_emotion(batch_no: str, image):
|
|
| 56 |
try:
|
| 57 |
# Convert PIL Image to OpenCV format
|
| 58 |
frame = np.array(image)
|
| 59 |
-
if frame.ndim == 3:
|
| 60 |
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
| 61 |
|
| 62 |
# Try different backends for face detection
|
|
@@ -86,14 +86,14 @@ def predict_emotion(batch_no: str, image):
|
|
| 86 |
# Extract face coordinates
|
| 87 |
x, y, w, h = region['x'], region['y'], region['w'], region['h']
|
| 88 |
|
| 89 |
-
# 1. Save raw face crop
|
| 90 |
face_crop = frame[y:y+h, x:x+w]
|
| 91 |
timestamp = int(time.time())
|
| 92 |
face_dir = SAVE_DIR / "faces" / emotion
|
| 93 |
face_path = face_dir / f"{batch_no}_{timestamp}.jpg"
|
| 94 |
cv2.imwrite(str(face_path), face_crop)
|
| 95 |
|
| 96 |
-
# 2. Create and save annotated image
|
| 97 |
annotated_frame = frame.copy()
|
| 98 |
cv2.rectangle(annotated_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
| 99 |
cv2.putText(annotated_frame, f"{emotion} {EMOTION_MAP[emotion]} {confidence:.1f}%",
|
|
@@ -173,11 +173,7 @@ def update_gallery(emotion, image_type):
|
|
| 173 |
for emotion, images in image_dict.items():
|
| 174 |
for img_path in images:
|
| 175 |
gallery.append((img_path, f"{emotion}: {Path(img_path).name}"))
|
| 176 |
-
return
|
| 177 |
-
choices=[img[0] for img in gallery],
|
| 178 |
-
label="Selected Images",
|
| 179 |
-
value=[]
|
| 180 |
-
)
|
| 181 |
|
| 182 |
def get_logs():
|
| 183 |
if LOG_FILE.exists():
|
|
@@ -264,12 +260,10 @@ with gr.Blocks(title="Emotion Capture", css="""
|
|
| 264 |
)
|
| 265 |
|
| 266 |
# Data Management Interface
|
| 267 |
-
with gr.Blocks(title="Data Management"
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
.action-buttons { margin-top: 20px; }
|
| 272 |
-
""") as data_interface:
|
| 273 |
|
| 274 |
gr.Markdown("# Data Management Interface")
|
| 275 |
|
|
@@ -290,12 +284,13 @@ with gr.Blocks(title="Data Management", css="""
|
|
| 290 |
refresh_btn = gr.Button("Refresh Gallery")
|
| 291 |
|
| 292 |
gallery = gr.Gallery(
|
|
|
|
| 293 |
label="Image Gallery",
|
| 294 |
-
elem_classes="gallery",
|
| 295 |
columns=4
|
| 296 |
)
|
| 297 |
selected_images = gr.CheckboxGroup(
|
| 298 |
label="Selected Images",
|
|
|
|
| 299 |
interactive=True
|
| 300 |
)
|
| 301 |
|
|
@@ -339,7 +334,7 @@ with gr.Blocks(title="Data Management", css="""
|
|
| 339 |
)
|
| 340 |
|
| 341 |
download_all_btn.click(
|
| 342 |
-
lambda e, t: create_custom_zip([img
|
| 343 |
inputs=[emotion_selector, image_type_selector],
|
| 344 |
outputs=download_output
|
| 345 |
)
|
|
@@ -392,26 +387,11 @@ with gr.Blocks(title="Data Management", css="""
|
|
| 392 |
lambda: update_gallery("All Emotions", "faces"),
|
| 393 |
outputs=[gallery, selected_images]
|
| 394 |
)
|
| 395 |
-
|
| 396 |
-
# Initial load of logs
|
| 397 |
-
data_interface.load(
|
| 398 |
-
view_logs,
|
| 399 |
-
outputs=logs_display
|
| 400 |
-
)
|
| 401 |
|
| 402 |
# Combine interfaces
|
| 403 |
demo = gr.TabbedInterface(
|
| 404 |
[capture_interface, data_interface],
|
| 405 |
-
["Emotion Capture", "Data Management"]
|
| 406 |
-
css="""
|
| 407 |
-
.tab { padding: 20px; }
|
| 408 |
-
"""
|
| 409 |
-
)
|
| 410 |
-
|
| 411 |
-
# Initialize gallery and logs
|
| 412 |
-
demo.load(
|
| 413 |
-
lambda: update_gallery("All Emotions", "faces"),
|
| 414 |
-
outputs=[gallery, selected_images]
|
| 415 |
)
|
| 416 |
|
| 417 |
if __name__ == "__main__":
|
|
|
|
| 27 |
SAVE_DIR.mkdir(exist_ok=True)
|
| 28 |
|
| 29 |
# Create directories
|
| 30 |
+
(SAVE_DIR / "faces").mkdir(exist_ok=True)
|
| 31 |
+
(SAVE_DIR / "annotated").mkdir(exist_ok=True)
|
| 32 |
for emotion in EMOTION_MAP.keys():
|
| 33 |
(SAVE_DIR / "faces" / emotion).mkdir(exist_ok=True, parents=True)
|
| 34 |
(SAVE_DIR / "annotated" / emotion).mkdir(exist_ok=True, parents=True)
|
|
|
|
| 56 |
try:
|
| 57 |
# Convert PIL Image to OpenCV format
|
| 58 |
frame = np.array(image)
|
| 59 |
+
if frame.ndim == 3:
|
| 60 |
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
| 61 |
|
| 62 |
# Try different backends for face detection
|
|
|
|
| 86 |
# Extract face coordinates
|
| 87 |
x, y, w, h = region['x'], region['y'], region['w'], region['h']
|
| 88 |
|
| 89 |
+
# 1. Save raw face crop
|
| 90 |
face_crop = frame[y:y+h, x:x+w]
|
| 91 |
timestamp = int(time.time())
|
| 92 |
face_dir = SAVE_DIR / "faces" / emotion
|
| 93 |
face_path = face_dir / f"{batch_no}_{timestamp}.jpg"
|
| 94 |
cv2.imwrite(str(face_path), face_crop)
|
| 95 |
|
| 96 |
+
# 2. Create and save annotated image
|
| 97 |
annotated_frame = frame.copy()
|
| 98 |
cv2.rectangle(annotated_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
| 99 |
cv2.putText(annotated_frame, f"{emotion} {EMOTION_MAP[emotion]} {confidence:.1f}%",
|
|
|
|
| 173 |
for emotion, images in image_dict.items():
|
| 174 |
for img_path in images:
|
| 175 |
gallery.append((img_path, f"{emotion}: {Path(img_path).name}"))
|
| 176 |
+
return gallery, [img[0] for img in gallery]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
def get_logs():
|
| 179 |
if LOG_FILE.exists():
|
|
|
|
| 260 |
)
|
| 261 |
|
| 262 |
# Data Management Interface
|
| 263 |
+
with gr.Blocks(title="Data Management") as data_interface:
|
| 264 |
+
|
| 265 |
+
# Initialize gallery data
|
| 266 |
+
initial_gallery, initial_selection = update_gallery("All Emotions", "faces")
|
|
|
|
|
|
|
| 267 |
|
| 268 |
gr.Markdown("# Data Management Interface")
|
| 269 |
|
|
|
|
| 284 |
refresh_btn = gr.Button("Refresh Gallery")
|
| 285 |
|
| 286 |
gallery = gr.Gallery(
|
| 287 |
+
value=initial_gallery,
|
| 288 |
label="Image Gallery",
|
|
|
|
| 289 |
columns=4
|
| 290 |
)
|
| 291 |
selected_images = gr.CheckboxGroup(
|
| 292 |
label="Selected Images",
|
| 293 |
+
choices=initial_selection,
|
| 294 |
interactive=True
|
| 295 |
)
|
| 296 |
|
|
|
|
| 334 |
)
|
| 335 |
|
| 336 |
download_all_btn.click(
|
| 337 |
+
lambda e, t: create_custom_zip([img for img_list in get_image_gallery(e, t).values() for img in img_list]),
|
| 338 |
inputs=[emotion_selector, image_type_selector],
|
| 339 |
outputs=download_output
|
| 340 |
)
|
|
|
|
| 387 |
lambda: update_gallery("All Emotions", "faces"),
|
| 388 |
outputs=[gallery, selected_images]
|
| 389 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 390 |
|
| 391 |
# Combine interfaces
|
| 392 |
demo = gr.TabbedInterface(
|
| 393 |
[capture_interface, data_interface],
|
| 394 |
+
["Emotion Capture", "Data Management"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 395 |
)
|
| 396 |
|
| 397 |
if __name__ == "__main__":
|