Spaces:
Runtime error
Runtime error
envs
Browse files- app.py +9 -7
- pipelines/pipeline_imagecoductor.py +0 -1
app.py
CHANGED
|
@@ -288,7 +288,7 @@ class ImageConductor:
|
|
| 288 |
|
| 289 |
self.blur_kernel = blur_kernel
|
| 290 |
|
| 291 |
-
@spaces.GPU(enable_queue=True, duration=
|
| 292 |
@torch.no_grad()
|
| 293 |
def run(self, first_frame_path, tracking_points, prompt, drag_mode, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type):
|
| 294 |
if examples_type != "":
|
|
@@ -439,10 +439,12 @@ def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.Se
|
|
| 439 |
|
| 440 |
|
| 441 |
def add_drag(tracking_points):
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
|
|
|
|
|
|
| 446 |
return {tracking_points_var: tracking_points}
|
| 447 |
|
| 448 |
|
|
@@ -509,7 +511,7 @@ block = gr.Blocks(
|
|
| 509 |
radius_size=gr.themes.sizes.radius_none,
|
| 510 |
text_size=gr.themes.sizes.text_md
|
| 511 |
)
|
| 512 |
-
)
|
| 513 |
with block:
|
| 514 |
with gr.Row():
|
| 515 |
with gr.Column():
|
|
@@ -643,4 +645,4 @@ with block:
|
|
| 643 |
negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type],
|
| 644 |
[output_image, output_video])
|
| 645 |
|
| 646 |
-
block.
|
|
|
|
| 288 |
|
| 289 |
self.blur_kernel = blur_kernel
|
| 290 |
|
| 291 |
+
@spaces.GPU(enable_queue=True, duration=100)
|
| 292 |
@torch.no_grad()
|
| 293 |
def run(self, first_frame_path, tracking_points, prompt, drag_mode, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type):
|
| 294 |
if examples_type != "":
|
|
|
|
| 439 |
|
| 440 |
|
| 441 |
def add_drag(tracking_points):
|
| 442 |
+
if not isinstance(tracking_points ,list):
|
| 443 |
+
print("before", tracking_points.value)
|
| 444 |
+
tracking_points.value.append([])
|
| 445 |
+
print(tracking_points.value)
|
| 446 |
+
else:
|
| 447 |
+
tracking_points.append([])
|
| 448 |
return {tracking_points_var: tracking_points}
|
| 449 |
|
| 450 |
|
|
|
|
| 511 |
radius_size=gr.themes.sizes.radius_none,
|
| 512 |
text_size=gr.themes.sizes.text_md
|
| 513 |
)
|
| 514 |
+
).queue()
|
| 515 |
with block:
|
| 516 |
with gr.Row():
|
| 517 |
with gr.Column():
|
|
|
|
| 645 |
negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type],
|
| 646 |
[output_image, output_video])
|
| 647 |
|
| 648 |
+
block.launch()
|
pipelines/pipeline_imagecoductor.py
CHANGED
|
@@ -318,7 +318,6 @@ class ImageConductorPipeline(DiffusionPipeline):
|
|
| 318 |
latents = latents * self.scheduler.init_noise_sigma
|
| 319 |
return latents
|
| 320 |
|
| 321 |
-
@spaces.GPU(enable_queue=True, duration=400)
|
| 322 |
@torch.no_grad()
|
| 323 |
def __call__(
|
| 324 |
self,
|
|
|
|
| 318 |
latents = latents * self.scheduler.init_noise_sigma
|
| 319 |
return latents
|
| 320 |
|
|
|
|
| 321 |
@torch.no_grad()
|
| 322 |
def __call__(
|
| 323 |
self,
|