Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,6 +18,7 @@ zip_ref.close()
|
|
| 18 |
# Load face detector
|
| 19 |
mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
|
| 20 |
|
|
|
|
| 21 |
class DetectionPipeline:
|
| 22 |
"""Pipeline class for detecting faces in the frames of a video file."""
|
| 23 |
|
|
@@ -138,24 +139,22 @@ def deepfakespredict(input_video):
|
|
| 138 |
else:
|
| 139 |
text = "The video is REAL."
|
| 140 |
|
| 141 |
-
face_frames =
|
| 142 |
-
|
| 143 |
-
for face in faces:
|
| 144 |
-
face_frame = Image.fromarray(face.astype('uint8'), 'RGB')
|
| 145 |
-
face_frames.append(face_frame)
|
| 146 |
|
| 147 |
face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration = 250, loop = 100 )
|
| 148 |
clip = mp.VideoFileClip("results.gif")
|
| 149 |
-
clip.write_videofile("
|
| 150 |
|
| 151 |
-
return text, text2, "
|
| 152 |
|
| 153 |
|
| 154 |
title="EfficientNetV2 Deepfakes Video Detector"
|
| 155 |
-
description="This is a demo implementation of Deepfakes
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
| 159 |
['Video1-fake-1-ff.mp4'],
|
| 160 |
['Video6-real-1-ff.mp4'],
|
| 161 |
['Video3-fake-3-ff.mp4'],
|
|
@@ -164,11 +163,10 @@ examples = [
|
|
| 164 |
['fake-1.mp4'],
|
| 165 |
]
|
| 166 |
|
| 167 |
-
|
| 168 |
inputs = ["video"],
|
| 169 |
outputs=["text","text", gr.outputs.Video(label="Detected face sequence")],
|
| 170 |
title=title,
|
| 171 |
description=description,
|
| 172 |
examples=examples
|
| 173 |
-
)
|
| 174 |
-
demo.launch()
|
|
|
|
| 18 |
# Load face detector
|
| 19 |
mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
|
| 20 |
|
| 21 |
+
#Facial Detection function, Reference: (Timesler, 2020); Source link: https://www.kaggle.com/timesler/facial-recognition-model-in-pytorch
|
| 22 |
class DetectionPipeline:
|
| 23 |
"""Pipeline class for detecting faces in the frames of a video file."""
|
| 24 |
|
|
|
|
| 139 |
else:
|
| 140 |
text = "The video is REAL."
|
| 141 |
|
| 142 |
+
face_frames = faces.astype(np.uint8)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration = 250, loop = 100 )
|
| 145 |
clip = mp.VideoFileClip("results.gif")
|
| 146 |
+
clip.write_videofile("video.mp4")
|
| 147 |
|
| 148 |
+
return text, text2, "video.mp4"
|
| 149 |
|
| 150 |
|
| 151 |
title="EfficientNetV2 Deepfakes Video Detector"
|
| 152 |
+
description="This is a demo implementation of EfficientNetV2 Deepfakes Image Detector by using frame-by-frame detection. \
|
| 153 |
+
To use it, simply upload your video, or click one of the examples to load them.\
|
| 154 |
+
This demo and model represent the work of \"Achieving Face Swapped Deepfakes Detection Using EfficientNetV2\" by Lee Sheng Yeh. \
|
| 155 |
+
The samples were extracted from Celeb-DF(V2)(Li et al, 2020) and FaceForensics++(Rossler et al., 2019). Full reference details is available in \"references.txt.\" \
|
| 156 |
+
"
|
| 157 |
+
examples = [
|
| 158 |
['Video1-fake-1-ff.mp4'],
|
| 159 |
['Video6-real-1-ff.mp4'],
|
| 160 |
['Video3-fake-3-ff.mp4'],
|
|
|
|
| 163 |
['fake-1.mp4'],
|
| 164 |
]
|
| 165 |
|
| 166 |
+
gr.Interface(deepfakespredict,
|
| 167 |
inputs = ["video"],
|
| 168 |
outputs=["text","text", gr.outputs.Video(label="Detected face sequence")],
|
| 169 |
title=title,
|
| 170 |
description=description,
|
| 171 |
examples=examples
|
| 172 |
+
).launch()
|
|
|