Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,10 +38,18 @@ def trim_video(video_path, output_dir="trimmed_videos", max_duration=2):
|
|
| 38 |
# If the video is within the duration, return the original path
|
| 39 |
return video_path
|
| 40 |
|
| 41 |
-
def
|
| 42 |
if is_shared_ui :
|
| 43 |
video_path = trim_video(video_path)
|
| 44 |
print("Path to the (trimmed) driving video:", video_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
# Generate a timestamped folder name
|
| 46 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 47 |
output_dir = os.path.join(base_output_dir, f"frames_{timestamp}")
|
|
@@ -79,7 +87,7 @@ def extract_frames_with_labels(video_path, base_output_dir="frames"):
|
|
| 79 |
# Release the video capture object
|
| 80 |
video_capture.release()
|
| 81 |
|
| 82 |
-
return
|
| 83 |
|
| 84 |
# Define a function to run your script with selected inputs
|
| 85 |
def run_xportrait(source_image, driving_video, seed, uc_scale, best_frame, out_frames, num_mix, ddim_steps, progress=gr.Progress(track_tqdm=True)):
|
|
@@ -130,6 +138,7 @@ div#frames-gallery{
|
|
| 130 |
overflow: scroll!important;
|
| 131 |
}
|
| 132 |
"""
|
|
|
|
| 133 |
with gr.Blocks(css=css) as demo:
|
| 134 |
with gr.Column(elem_id="col-container"):
|
| 135 |
gr.Markdown("# X-Portrait: Expressive Portrait Animation with Hierarchical Motion Attention")
|
|
@@ -164,9 +173,9 @@ with gr.Blocks(css=css) as demo:
|
|
| 164 |
submit_btn = gr.Button("Submit")
|
| 165 |
gr.Examples(
|
| 166 |
examples=[
|
| 167 |
-
["./assets/source_image.png", "./assets/driving_video.mp4"]
|
| 168 |
],
|
| 169 |
-
inputs=[source_image, driving_video]
|
| 170 |
)
|
| 171 |
with gr.Column():
|
| 172 |
video_output = gr.Video(label="Output Video")
|
|
@@ -184,10 +193,10 @@ with gr.Blocks(css=css) as demo:
|
|
| 184 |
""")
|
| 185 |
|
| 186 |
|
| 187 |
-
driving_video.
|
| 188 |
-
fn =
|
| 189 |
inputs = [driving_video],
|
| 190 |
-
outputs = [driving_frames, frames_gallery_panel],
|
| 191 |
queue = False
|
| 192 |
)
|
| 193 |
|
|
|
|
| 38 |
# If the video is within the duration, return the original path
|
| 39 |
return video_path
|
| 40 |
|
| 41 |
+
def load_driving_video(video_path):
|
| 42 |
if is_shared_ui :
|
| 43 |
video_path = trim_video(video_path)
|
| 44 |
print("Path to the (trimmed) driving video:", video_path)
|
| 45 |
+
frames_data = extract_frames_with_labels(video_path)
|
| 46 |
+
return video_path, frames_data, gr.update(open="True")
|
| 47 |
+
else:
|
| 48 |
+
frames_data = extract_frames_with_labels(video_path)
|
| 49 |
+
return video_path, frames_data, gr.update(open="True")
|
| 50 |
+
|
| 51 |
+
def extract_frames_with_labels(video_path, base_output_dir="frames"):
|
| 52 |
+
|
| 53 |
# Generate a timestamped folder name
|
| 54 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 55 |
output_dir = os.path.join(base_output_dir, f"frames_{timestamp}")
|
|
|
|
| 87 |
# Release the video capture object
|
| 88 |
video_capture.release()
|
| 89 |
|
| 90 |
+
return frame_data
|
| 91 |
|
| 92 |
# Define a function to run your script with selected inputs
|
| 93 |
def run_xportrait(source_image, driving_video, seed, uc_scale, best_frame, out_frames, num_mix, ddim_steps, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 138 |
overflow: scroll!important;
|
| 139 |
}
|
| 140 |
"""
|
| 141 |
+
example_frame_data = extract_frames_with_labels("./assets/driving_video.mp4")
|
| 142 |
with gr.Blocks(css=css) as demo:
|
| 143 |
with gr.Column(elem_id="col-container"):
|
| 144 |
gr.Markdown("# X-Portrait: Expressive Portrait Animation with Hierarchical Motion Attention")
|
|
|
|
| 173 |
submit_btn = gr.Button("Submit")
|
| 174 |
gr.Examples(
|
| 175 |
examples=[
|
| 176 |
+
["./assets/source_image.png", "./assets/driving_video.mp4", example_frame_data]
|
| 177 |
],
|
| 178 |
+
inputs=[source_image, driving_video, driving_frames]
|
| 179 |
)
|
| 180 |
with gr.Column():
|
| 181 |
video_output = gr.Video(label="Output Video")
|
|
|
|
| 193 |
""")
|
| 194 |
|
| 195 |
|
| 196 |
+
driving_video.upload(
|
| 197 |
+
fn = load_driving_video,
|
| 198 |
inputs = [driving_video],
|
| 199 |
+
outputs = [driving_video, driving_frames, frames_gallery_panel],
|
| 200 |
queue = False
|
| 201 |
)
|
| 202 |
|