Update app.py
Browse files
app.py
CHANGED
|
@@ -273,7 +273,7 @@ def main(args):
|
|
| 273 |
else:
|
| 274 |
return predicted_video_256_path, predicted_video_256_path
|
| 275 |
|
| 276 |
-
@spaces.GPU
|
| 277 |
def generate_video(uploaded_img, uploaded_audio, infer_type,
|
| 278 |
pose_yaw, pose_pitch, pose_roll, face_location, face_scale, step_T, face_sr, seed):
|
| 279 |
if uploaded_img is None or uploaded_audio is None:
|
|
@@ -346,8 +346,10 @@ with gr.Blocks() as demo:
|
|
| 346 |
output_video_512 = gr.Video(label="Generated Video (512)")
|
| 347 |
output_message = gr.Markdown()
|
| 348 |
|
| 349 |
-
|
| 350 |
-
|
|
|
|
|
|
|
| 351 |
generate_button = gr.Button("Generate Video")
|
| 352 |
|
| 353 |
with gr.Accordion("Configuration", open=True):
|
|
|
|
| 273 |
else:
|
| 274 |
return predicted_video_256_path, predicted_video_256_path
|
| 275 |
|
| 276 |
+
@spaces.GPU(duration=300)
|
| 277 |
def generate_video(uploaded_img, uploaded_audio, infer_type,
|
| 278 |
pose_yaw, pose_pitch, pose_roll, face_location, face_scale, step_T, face_sr, seed):
|
| 279 |
if uploaded_img is None or uploaded_audio is None:
|
|
|
|
| 346 |
output_video_512 = gr.Video(label="Generated Video (512)")
|
| 347 |
output_message = gr.Markdown()
|
| 348 |
|
| 349 |
+
#
|
| 350 |
+
gr.Markdown("credits: [X-LANCE](https://github.com/X-LANCE/AniTalker) (creators of the github repository), [Yuhan Xu](https://github.com/yuhanxu01)(webui), Delik")
|
| 351 |
+
gr.Markdown("AniTalker: Animate Vivid and Diverse Talking Faces through Identity-Decoupled Facial Motion Encoding. [[arXiv]](https://arxiv.org/abs/2405.03121) [[project]](https://x-lance.github.io/AniTalker/)")
|
| 352 |
+
|
| 353 |
generate_button = gr.Button("Generate Video")
|
| 354 |
|
| 355 |
with gr.Accordion("Configuration", open=True):
|