Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -46,12 +46,78 @@ def log_emotion(batch_no, emotion, confidence, face_path, annotated_path):
|
|
| 46 |
writer = csv.writer(f)
|
| 47 |
writer.writerow([timestamp, batch_no, emotion, confidence, str(face_path), str(annotated_path)])
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
def predict_emotion(batch_no: str, image):
|
| 50 |
if not batch_no.strip():
|
| 51 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
if image is None:
|
| 54 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
try:
|
| 57 |
# Convert PIL Image to OpenCV format
|
|
@@ -75,7 +141,15 @@ def predict_emotion(batch_no: str, image):
|
|
| 75 |
continue
|
| 76 |
|
| 77 |
if not results:
|
| 78 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
# Process the first face found
|
| 81 |
result = results[0] if isinstance(results, list) else results
|
|
@@ -108,15 +182,27 @@ def predict_emotion(batch_no: str, image):
|
|
| 108 |
|
| 109 |
# Convert back to PIL format for display
|
| 110 |
output_img = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
except Exception as e:
|
| 114 |
-
return
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
def get_image_gallery(emotion, image_type):
|
| 122 |
"""Get image gallery for selected emotion and type"""
|
|
@@ -362,42 +448,63 @@ with gr.Blocks(title="Emotion Capture", css="""
|
|
| 362 |
|
| 363 |
gr.Markdown("""
|
| 364 |
# Emotion Capture Interface
|
| 365 |
-
1. Enter your batch number
|
| 366 |
-
2. Capture your face (will appear
|
| 367 |
-
3. View your emotion analysis results
|
|
|
|
| 368 |
""")
|
| 369 |
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
message = gr.Textbox(label="", visible=False, interactive=False, elem_classes="message")
|
| 373 |
|
| 374 |
-
# Components that will be shown/hidden
|
| 375 |
with gr.Row():
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
interactive=True,
|
| 381 |
-
mirror_webcam=True,
|
| 382 |
-
visible=False
|
| 383 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 384 |
with gr.Row():
|
| 385 |
result_img = gr.Image(label="Analysis Result", interactive=False, visible=False)
|
| 386 |
with gr.Row():
|
| 387 |
result_text = gr.Textbox(label="Emotion Result", interactive=False, visible=False)
|
| 388 |
|
| 389 |
-
#
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 394 |
)
|
| 395 |
|
| 396 |
# Process when webcam captures an image
|
| 397 |
webcam.change(
|
| 398 |
predict_emotion,
|
| 399 |
-
inputs=[
|
| 400 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 401 |
)
|
| 402 |
|
| 403 |
# Data Management Interface
|
|
|
|
| 46 |
writer = csv.writer(f)
|
| 47 |
writer.writerow([timestamp, batch_no, emotion, confidence, str(face_path), str(annotated_path)])
|
| 48 |
|
| 49 |
+
def validate_batch_number(batch_no):
|
| 50 |
+
"""Validate that batch number contains only digits"""
|
| 51 |
+
if not batch_no.strip():
|
| 52 |
+
return False, "Batch number cannot be empty"
|
| 53 |
+
if not batch_no.isdigit():
|
| 54 |
+
return False, "Batch number must contain only numbers"
|
| 55 |
+
return True, ""
|
| 56 |
+
|
| 57 |
+
def toggle_webcam_flow(batch_no):
|
| 58 |
+
"""Control visibility of components based on flow"""
|
| 59 |
+
if batch_no.strip():
|
| 60 |
+
# Validate batch number
|
| 61 |
+
is_valid, validation_msg = validate_batch_number(batch_no)
|
| 62 |
+
if not is_valid:
|
| 63 |
+
return {
|
| 64 |
+
batch_no_input: gr.Textbox(visible=True, value=""),
|
| 65 |
+
webcam: gr.Image(visible=False),
|
| 66 |
+
message: gr.Textbox(value=validation_msg, visible=True),
|
| 67 |
+
batch_no_state: ""
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
# Valid batch number - hide input, show webcam
|
| 71 |
+
return {
|
| 72 |
+
batch_no_input: gr.Textbox(visible=False),
|
| 73 |
+
webcam: gr.Image(visible=True),
|
| 74 |
+
message: gr.Textbox(visible=False),
|
| 75 |
+
batch_no_state: batch_no
|
| 76 |
+
}
|
| 77 |
+
else:
|
| 78 |
+
# No batch number - show input, hide webcam
|
| 79 |
+
return {
|
| 80 |
+
batch_no_input: gr.Textbox(visible=True),
|
| 81 |
+
webcam: gr.Image(visible=False),
|
| 82 |
+
message: gr.Textbox(visible=False),
|
| 83 |
+
batch_no_state: ""
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
def predict_emotion(batch_no: str, image):
|
| 87 |
if not batch_no.strip():
|
| 88 |
+
return {
|
| 89 |
+
result_img: None,
|
| 90 |
+
result_text: None,
|
| 91 |
+
message: gr.Textbox(value="Please enter a batch number first", visible=True),
|
| 92 |
+
result_img_comp: gr.Image(visible=False),
|
| 93 |
+
result_text_comp: gr.Textbox(visible=False),
|
| 94 |
+
batch_no_input: gr.Textbox(visible=True),
|
| 95 |
+
webcam: gr.Image(visible=False)
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
# Validate batch number format
|
| 99 |
+
is_valid, validation_msg = validate_batch_number(batch_no)
|
| 100 |
+
if not is_valid:
|
| 101 |
+
return {
|
| 102 |
+
result_img: None,
|
| 103 |
+
result_text: None,
|
| 104 |
+
message: gr.Textbox(value=validation_msg, visible=True),
|
| 105 |
+
result_img_comp: gr.Image(visible=False),
|
| 106 |
+
result_text_comp: gr.Textbox(visible=False),
|
| 107 |
+
batch_no_input: gr.Textbox(visible=True),
|
| 108 |
+
webcam: gr.Image(visible=False)
|
| 109 |
+
}
|
| 110 |
|
| 111 |
if image is None:
|
| 112 |
+
return {
|
| 113 |
+
result_img: None,
|
| 114 |
+
result_text: None,
|
| 115 |
+
message: gr.Textbox(value="Please capture your face first", visible=True),
|
| 116 |
+
result_img_comp: gr.Image(visible=False),
|
| 117 |
+
result_text_comp: gr.Textbox(visible=False),
|
| 118 |
+
batch_no_input: gr.Textbox(visible=False),
|
| 119 |
+
webcam: gr.Image(visible=True)
|
| 120 |
+
}
|
| 121 |
|
| 122 |
try:
|
| 123 |
# Convert PIL Image to OpenCV format
|
|
|
|
| 141 |
continue
|
| 142 |
|
| 143 |
if not results:
|
| 144 |
+
return {
|
| 145 |
+
result_img: None,
|
| 146 |
+
result_text: None,
|
| 147 |
+
message: gr.Textbox(value="No face detected. Please try again.", visible=True),
|
| 148 |
+
result_img_comp: gr.Image(visible=False),
|
| 149 |
+
result_text_comp: gr.Textbox(visible=False),
|
| 150 |
+
batch_no_input: gr.Textbox(visible=True),
|
| 151 |
+
webcam: gr.Image(visible=False)
|
| 152 |
+
}
|
| 153 |
|
| 154 |
# Process the first face found
|
| 155 |
result = results[0] if isinstance(results, list) else results
|
|
|
|
| 182 |
|
| 183 |
# Convert back to PIL format for display
|
| 184 |
output_img = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
| 185 |
+
|
| 186 |
+
return {
|
| 187 |
+
result_img: output_img,
|
| 188 |
+
result_text: f"Batch {batch_no}: {emotion.title()} ({confidence:.1f}%)",
|
| 189 |
+
message: gr.Textbox(visible=False),
|
| 190 |
+
result_img_comp: gr.Image(visible=True),
|
| 191 |
+
result_text_comp: gr.Textbox(visible=True),
|
| 192 |
+
batch_no_input: gr.Textbox(visible=True, value=""),
|
| 193 |
+
webcam: gr.Image(visible=False)
|
| 194 |
+
}
|
| 195 |
|
| 196 |
except Exception as e:
|
| 197 |
+
return {
|
| 198 |
+
result_img: None,
|
| 199 |
+
result_text: None,
|
| 200 |
+
message: gr.Textbox(value=f"Error processing image: {str(e)}", visible=True),
|
| 201 |
+
result_img_comp: gr.Image(visible=False),
|
| 202 |
+
result_text_comp: gr.Textbox(visible=False),
|
| 203 |
+
batch_no_input: gr.Textbox(visible=True, value=""),
|
| 204 |
+
webcam: gr.Image(visible=False)
|
| 205 |
+
}
|
| 206 |
|
| 207 |
def get_image_gallery(emotion, image_type):
|
| 208 |
"""Get image gallery for selected emotion and type"""
|
|
|
|
| 448 |
|
| 449 |
gr.Markdown("""
|
| 450 |
# Emotion Capture Interface
|
| 451 |
+
1. Enter your batch number (numbers only)
|
| 452 |
+
2. Capture your face (webcam will appear)
|
| 453 |
+
3. View your emotion analysis results
|
| 454 |
+
4. Interface will reset automatically for next user
|
| 455 |
""")
|
| 456 |
|
| 457 |
+
# Add a state to store batch number between callbacks
|
| 458 |
+
batch_no_state = gr.State("")
|
|
|
|
| 459 |
|
|
|
|
| 460 |
with gr.Row():
|
| 461 |
+
batch_no_input = gr.Textbox(
|
| 462 |
+
label="Batch Number",
|
| 463 |
+
placeholder="Enter numbers only",
|
| 464 |
+
visible=True
|
|
|
|
|
|
|
|
|
|
| 465 |
)
|
| 466 |
+
message = gr.Textbox(label="", visible=False, interactive=False, elem_classes="message")
|
| 467 |
+
|
| 468 |
+
# Webcam component starts hidden
|
| 469 |
+
webcam = gr.Image(
|
| 470 |
+
sources=["webcam"],
|
| 471 |
+
type="pil",
|
| 472 |
+
label="Face Capture",
|
| 473 |
+
interactive=True,
|
| 474 |
+
mirror_webcam=True,
|
| 475 |
+
visible=False
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
# Results components
|
| 479 |
with gr.Row():
|
| 480 |
result_img = gr.Image(label="Analysis Result", interactive=False, visible=False)
|
| 481 |
with gr.Row():
|
| 482 |
result_text = gr.Textbox(label="Emotion Result", interactive=False, visible=False)
|
| 483 |
|
| 484 |
+
# Track visibility separately for easier management
|
| 485 |
+
result_img_comp = gr.Image(visible=False)
|
| 486 |
+
result_text_comp = gr.Textbox(visible=False)
|
| 487 |
+
|
| 488 |
+
# Show webcam when valid batch number is entered
|
| 489 |
+
batch_no_input.change(
|
| 490 |
+
toggle_webcam_flow,
|
| 491 |
+
inputs=batch_no_input,
|
| 492 |
+
outputs=[batch_no_input, webcam, message, batch_no_state]
|
| 493 |
)
|
| 494 |
|
| 495 |
# Process when webcam captures an image
|
| 496 |
webcam.change(
|
| 497 |
predict_emotion,
|
| 498 |
+
inputs=[batch_no_state, webcam],
|
| 499 |
+
outputs=[
|
| 500 |
+
result_img,
|
| 501 |
+
result_text,
|
| 502 |
+
message,
|
| 503 |
+
result_img_comp,
|
| 504 |
+
result_text_comp,
|
| 505 |
+
batch_no_input,
|
| 506 |
+
webcam
|
| 507 |
+
]
|
| 508 |
)
|
| 509 |
|
| 510 |
# Data Management Interface
|