Duplicate from akhaliq/FaceMesh
Browse filesCo-authored-by: Ahsen Khaliq <akhaliq@users.noreply.huggingface.co>
- .gitattributes +16 -0
- README.md +34 -0
- app.py +51 -0
- packages.txt +1 -0
- requirements.txt +10 -0
.gitattributes
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: FaceMesh
|
| 3 |
+
emoji: 📊
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: gradio
|
| 7 |
+
app_file: app.py
|
| 8 |
+
pinned: false
|
| 9 |
+
duplicated_from: akhaliq/FaceMesh
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Configuration
|
| 13 |
+
|
| 14 |
+
`title`: _string_
|
| 15 |
+
Display title for the Space
|
| 16 |
+
|
| 17 |
+
`emoji`: _string_
|
| 18 |
+
Space emoji (emoji-only character allowed)
|
| 19 |
+
|
| 20 |
+
`colorFrom`: _string_
|
| 21 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 22 |
+
|
| 23 |
+
`colorTo`: _string_
|
| 24 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 25 |
+
|
| 26 |
+
`sdk`: _string_
|
| 27 |
+
Can be either `gradio` or `streamlit`
|
| 28 |
+
|
| 29 |
+
`app_file`: _string_
|
| 30 |
+
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
| 31 |
+
Path is relative to the root of the repository.
|
| 32 |
+
|
| 33 |
+
`pinned`: _boolean_
|
| 34 |
+
Whether the Space stays on top of your list.
|
app.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import mediapipe as mp
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import cv2
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Images
|
| 8 |
+
torch.hub.download_url_to_file('https://artbreeder.b-cdn.net/imgs/c789e54661bfb432c5522a36553f.jpeg', 'face1.jpg')
|
| 9 |
+
torch.hub.download_url_to_file('https://artbreeder.b-cdn.net/imgs/c86622e8cb58d490e35b01cb9996.jpeg', 'face2.jpg')
|
| 10 |
+
|
| 11 |
+
mp_face_mesh = mp.solutions.face_mesh
|
| 12 |
+
|
| 13 |
+
# Prepare DrawingSpec for drawing the face landmarks later.
|
| 14 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 15 |
+
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
|
| 16 |
+
|
| 17 |
+
# Run MediaPipe Face Mesh.
|
| 18 |
+
|
| 19 |
+
def inference(image):
|
| 20 |
+
with mp_face_mesh.FaceMesh(
|
| 21 |
+
static_image_mode=True,
|
| 22 |
+
max_num_faces=2,
|
| 23 |
+
min_detection_confidence=0.5) as face_mesh:
|
| 24 |
+
# Convert the BGR image to RGB and process it with MediaPipe Face Mesh.
|
| 25 |
+
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 26 |
+
|
| 27 |
+
annotated_image = image.copy()
|
| 28 |
+
for face_landmarks in results.multi_face_landmarks:
|
| 29 |
+
mp_drawing.draw_landmarks(
|
| 30 |
+
image=annotated_image,
|
| 31 |
+
landmark_list=face_landmarks,
|
| 32 |
+
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
| 33 |
+
landmark_drawing_spec=drawing_spec,
|
| 34 |
+
connection_drawing_spec=drawing_spec)
|
| 35 |
+
return annotated_image
|
| 36 |
+
|
| 37 |
+
title = "Face Mesh"
|
| 38 |
+
description = "Gradio demo for Face Mesh. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
|
| 39 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1907.06724'>Real-time Facial Surface Geometry from Monocular Video on Mobile GPUs</a> | <a href='https://github.com/google/mediapipe'>Github Repo</a></p>"
|
| 40 |
+
|
| 41 |
+
gr.Interface(
|
| 42 |
+
inference,
|
| 43 |
+
[gr.inputs.Image(label="Input")],
|
| 44 |
+
gr.outputs.Image(type="pil", label="Output"),
|
| 45 |
+
title=title,
|
| 46 |
+
description=description,
|
| 47 |
+
article=article,
|
| 48 |
+
examples=[
|
| 49 |
+
["face1.jpg"],
|
| 50 |
+
["face2.jpg"]
|
| 51 |
+
]).launch()
|
packages.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
libgl1
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py
|
| 2 |
+
attrs>=19.1.0
|
| 3 |
+
numpy
|
| 4 |
+
opencv-python-headless
|
| 5 |
+
protobuf>=3.11.4
|
| 6 |
+
six
|
| 7 |
+
wheel
|
| 8 |
+
gradio
|
| 9 |
+
mediapipe
|
| 10 |
+
torch
|