Spaces:
Runtime error
Runtime error
envs
Browse files
app.py
CHANGED
|
@@ -131,6 +131,7 @@ image_examples = [
|
|
| 131 |
"object",
|
| 132 |
11318446767408804497,
|
| 133 |
"",
|
|
|
|
| 134 |
],
|
| 135 |
|
| 136 |
["__asset__/images/object/rose-1.jpg",
|
|
@@ -138,6 +139,7 @@ image_examples = [
|
|
| 138 |
"object",
|
| 139 |
6854275249656120509,
|
| 140 |
"",
|
|
|
|
| 141 |
],
|
| 142 |
|
| 143 |
["__asset__/images/object/jellyfish-1.jpg",
|
|
@@ -145,6 +147,7 @@ image_examples = [
|
|
| 145 |
"object",
|
| 146 |
17966188172968903484,
|
| 147 |
"HelloObject",
|
|
|
|
| 148 |
],
|
| 149 |
|
| 150 |
|
|
@@ -153,6 +156,7 @@ image_examples = [
|
|
| 153 |
"camera",
|
| 154 |
7970487946960948963,
|
| 155 |
"HelloObject",
|
|
|
|
| 156 |
],
|
| 157 |
|
| 158 |
["__asset__/images/camera/tusun-1.jpg",
|
|
@@ -160,6 +164,7 @@ image_examples = [
|
|
| 160 |
"camera",
|
| 161 |
996953226890228361,
|
| 162 |
"TUSUN",
|
|
|
|
| 163 |
],
|
| 164 |
|
| 165 |
["__asset__/images/camera/painting-1.jpg",
|
|
@@ -167,6 +172,7 @@ image_examples = [
|
|
| 167 |
"camera",
|
| 168 |
16867854766769816385,
|
| 169 |
"",
|
|
|
|
| 170 |
],
|
| 171 |
]
|
| 172 |
|
|
@@ -175,7 +181,7 @@ POINTS = {
|
|
| 175 |
'turtle': "__asset__/trajs/object/turtle-1.json",
|
| 176 |
'rose': "__asset__/trajs/object/rose-1.json",
|
| 177 |
'jellyfish': "__asset__/trajs/object/jellyfish-1.json",
|
| 178 |
-
'
|
| 179 |
'tusun': "__asset__/trajs/camera/tusun-1.json",
|
| 180 |
'painting': "__asset__/trajs/camera/painting-1.json",
|
| 181 |
}
|
|
@@ -184,7 +190,7 @@ IMAGE_PATH = {
|
|
| 184 |
'turtle': "__asset__/images/object/turtle-1.jpg",
|
| 185 |
'rose': "__asset__/images/object/rose-1.jpg",
|
| 186 |
'jellyfish': "__asset__/images/object/jellyfish-1.jpg",
|
| 187 |
-
'
|
| 188 |
'tusun': "__asset__/images/camera/tusun-1.jpg",
|
| 189 |
'painting': "__asset__/images/camera/painting-1.jpg",
|
| 190 |
}
|
|
@@ -277,9 +283,14 @@ class ImageConductor:
|
|
| 277 |
|
| 278 |
@spaces.GPU(enable_queue=True, duration=200)
|
| 279 |
@torch.no_grad()
|
| 280 |
-
def run(self, first_frame_path, tracking_points, prompt, drag_mode, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
|
| 282 |
-
|
| 283 |
original_width, original_height=384, 256
|
| 284 |
if isinstance(tracking_points, list):
|
| 285 |
input_all_points = tracking_points
|
|
@@ -581,6 +592,7 @@ with block as demo:
|
|
| 581 |
|
| 582 |
with gr.Group():
|
| 583 |
personalized = gr.Dropdown(label="Personalized", choices=['HelloObject', 'TUSUN', ""], value="")
|
|
|
|
| 584 |
|
| 585 |
with gr.Column(scale=7):
|
| 586 |
output_video = gr.Video(value=None,
|
|
@@ -590,15 +602,15 @@ with block as demo:
|
|
| 590 |
|
| 591 |
|
| 592 |
with gr.Row():
|
| 593 |
-
def process_example(input_image, prompt, drag_mode, seed, personalized, ):
|
| 594 |
|
| 595 |
-
return input_image, prompt, drag_mode, seed, personalized
|
| 596 |
|
| 597 |
example = gr.Examples(
|
| 598 |
label="Input Example",
|
| 599 |
examples=image_examples,
|
| 600 |
-
inputs=[input_image, prompt, drag_mode, seed, personalized],
|
| 601 |
-
outputs=[input_image, prompt, drag_mode, seed, personalized],
|
| 602 |
fn=process_example,
|
| 603 |
run_on_click=True,
|
| 604 |
examples_per_page=10,
|
|
@@ -623,7 +635,7 @@ with block as demo:
|
|
| 623 |
input_image.select(add_tracking_points, [tracking_points, first_frame_path, drag_mode], [tracking_points, input_image])
|
| 624 |
|
| 625 |
run_button.click(ImageConductor_net.run, [first_frame_path, tracking_points, prompt, drag_mode,
|
| 626 |
-
negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized],
|
| 627 |
[output_image, output_video])
|
| 628 |
|
| 629 |
# demo.launch(server_name="0.0.0.0", debug=True, server_port=12345)
|
|
|
|
| 131 |
"object",
|
| 132 |
11318446767408804497,
|
| 133 |
"",
|
| 134 |
+
"turtle"
|
| 135 |
],
|
| 136 |
|
| 137 |
["__asset__/images/object/rose-1.jpg",
|
|
|
|
| 139 |
"object",
|
| 140 |
6854275249656120509,
|
| 141 |
"",
|
| 142 |
+
"rose",
|
| 143 |
],
|
| 144 |
|
| 145 |
["__asset__/images/object/jellyfish-1.jpg",
|
|
|
|
| 147 |
"object",
|
| 148 |
17966188172968903484,
|
| 149 |
"HelloObject",
|
| 150 |
+
"jellyfish"
|
| 151 |
],
|
| 152 |
|
| 153 |
|
|
|
|
| 156 |
"camera",
|
| 157 |
7970487946960948963,
|
| 158 |
"HelloObject",
|
| 159 |
+
"lush",
|
| 160 |
],
|
| 161 |
|
| 162 |
["__asset__/images/camera/tusun-1.jpg",
|
|
|
|
| 164 |
"camera",
|
| 165 |
996953226890228361,
|
| 166 |
"TUSUN",
|
| 167 |
+
"tusun",
|
| 168 |
],
|
| 169 |
|
| 170 |
["__asset__/images/camera/painting-1.jpg",
|
|
|
|
| 172 |
"camera",
|
| 173 |
16867854766769816385,
|
| 174 |
"",
|
| 175 |
+
"painting"
|
| 176 |
],
|
| 177 |
]
|
| 178 |
|
|
|
|
| 181 |
'turtle': "__asset__/trajs/object/turtle-1.json",
|
| 182 |
'rose': "__asset__/trajs/object/rose-1.json",
|
| 183 |
'jellyfish': "__asset__/trajs/object/jellyfish-1.json",
|
| 184 |
+
'lush': "__asset__/trajs/camera/lush-1.json",
|
| 185 |
'tusun': "__asset__/trajs/camera/tusun-1.json",
|
| 186 |
'painting': "__asset__/trajs/camera/painting-1.json",
|
| 187 |
}
|
|
|
|
| 190 |
'turtle': "__asset__/images/object/turtle-1.jpg",
|
| 191 |
'rose': "__asset__/images/object/rose-1.jpg",
|
| 192 |
'jellyfish': "__asset__/images/object/jellyfish-1.jpg",
|
| 193 |
+
'lush': "__asset__/images/camera/lush-1.jpg",
|
| 194 |
'tusun': "__asset__/images/camera/tusun-1.jpg",
|
| 195 |
'painting': "__asset__/images/camera/painting-1.jpg",
|
| 196 |
}
|
|
|
|
| 283 |
|
| 284 |
@spaces.GPU(enable_queue=True, duration=200)
|
| 285 |
@torch.no_grad()
|
| 286 |
+
def run(self, first_frame_path, tracking_points, prompt, drag_mode, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type):
|
| 287 |
+
if examples_type != "":
|
| 288 |
+
### for adapting high version gradio
|
| 289 |
+
first_frame_path = IMAGE_PATH[examples_type]
|
| 290 |
+
tracking_points = json.load(open(POINTS[examples_type]))
|
| 291 |
+
print("example first_frame_path", first_frame_path)
|
| 292 |
+
print("example tracking_points", tracking_points)
|
| 293 |
|
|
|
|
| 294 |
original_width, original_height=384, 256
|
| 295 |
if isinstance(tracking_points, list):
|
| 296 |
input_all_points = tracking_points
|
|
|
|
| 592 |
|
| 593 |
with gr.Group():
|
| 594 |
personalized = gr.Dropdown(label="Personalized", choices=['HelloObject', 'TUSUN', ""], value="")
|
| 595 |
+
examples_type = gr.Textbox(label="Examples Type (Ignore) ", value="", visible=False)
|
| 596 |
|
| 597 |
with gr.Column(scale=7):
|
| 598 |
output_video = gr.Video(value=None,
|
|
|
|
| 602 |
|
| 603 |
|
| 604 |
with gr.Row():
|
| 605 |
+
def process_example(input_image, prompt, drag_mode, seed, personalized, examples_type):
|
| 606 |
|
| 607 |
+
return input_image, prompt, drag_mode, seed, personalized, examples_type
|
| 608 |
|
| 609 |
example = gr.Examples(
|
| 610 |
label="Input Example",
|
| 611 |
examples=image_examples,
|
| 612 |
+
inputs=[input_image, prompt, drag_mode, seed, personalized, examples_type],
|
| 613 |
+
outputs=[input_image, prompt, drag_mode, seed, personalized, examples_type],
|
| 614 |
fn=process_example,
|
| 615 |
run_on_click=True,
|
| 616 |
examples_per_page=10,
|
|
|
|
| 635 |
input_image.select(add_tracking_points, [tracking_points, first_frame_path, drag_mode], [tracking_points, input_image])
|
| 636 |
|
| 637 |
run_button.click(ImageConductor_net.run, [first_frame_path, tracking_points, prompt, drag_mode,
|
| 638 |
+
negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type],
|
| 639 |
[output_image, output_video])
|
| 640 |
|
| 641 |
# demo.launch(server_name="0.0.0.0", debug=True, server_port=12345)
|