File size: 1,450 Bytes
6416175
584ce93
 
 
 
 
 
6416175
584ce93
 
6416175
584ce93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
import face_alignment
import numpy as np
from PIL import Image
import cv2
import torch
import spaces

# Initialize the FaceAlignment model
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D, device='cuda')

@spaces.GPU
def detect_landmarks(image):
    # Convert PIL Image to numpy array
    img_array = np.array(image)
    
    # Detect landmarks
    preds = fa.get_landmarks(img_array)
    
    if preds is None:
        return image, "No face detected in the image."
    
    # Draw landmarks on the image
    for facial_landmarks in preds:
        for (x, y) in facial_landmarks:
            cv2.circle(img_array, (int(x), int(y)), 2, (0, 255, 0), -1)
    
    return Image.fromarray(img_array), f"Detected {len(preds)} face(s) in the image."

# Create Gradio Blocks
with gr.Blocks() as demo:
    gr.Markdown("# Face Alignment Demo")
    gr.Markdown("Upload an image to detect facial landmarks.")
    
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="pil", label="Input Image")
            submit_btn = gr.Button("Detect Landmarks")
        
        with gr.Column():
            output_image = gr.Image(type="pil", label="Output Image")
            output_text = gr.Textbox(label="Detection Result")
    
    submit_btn.click(
        fn=detect_landmarks,
        inputs=input_image,
        outputs=[output_image, output_text]
    )

# Launch the interface
demo.launch()