Spaces:
Runtime error
Runtime error
envs
Browse files
app.py
CHANGED
|
@@ -302,7 +302,7 @@ class ImageConductor:
|
|
| 302 |
if isinstance(tracking_points, list):
|
| 303 |
input_all_points = tracking_points
|
| 304 |
else:
|
| 305 |
-
input_all_points = tracking_points
|
| 306 |
|
| 307 |
|
| 308 |
resized_all_points = [tuple([tuple([float(e1[0]*self.width/original_width), float(e1[1]*self.height/original_height)]) for e1 in e]) for e in input_all_points]
|
|
@@ -394,8 +394,6 @@ def reset_states(first_frame_path, tracking_points):
|
|
| 394 |
|
| 395 |
|
| 396 |
def preprocess_image(image, tracking_points):
|
| 397 |
-
if len(tracking_points) != 0:
|
| 398 |
-
tracking_points = gr.State([])
|
| 399 |
image_pil = image2pil(image.name)
|
| 400 |
raw_w, raw_h = image_pil.size
|
| 401 |
resize_ratio = max(384/raw_w, 256/raw_h)
|
|
@@ -404,7 +402,7 @@ def preprocess_image(image, tracking_points):
|
|
| 404 |
id = str(uuid.uuid4())[:4]
|
| 405 |
first_frame_path = os.path.join(output_dir, f"first_frame_{id}.jpg")
|
| 406 |
image_pil.save(first_frame_path, quality=95)
|
| 407 |
-
return {input_image: first_frame_path,
|
| 408 |
|
| 409 |
|
| 410 |
def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.SelectData): # SelectData is a subclass of EventData
|
|
@@ -414,14 +412,13 @@ def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.Se
|
|
| 414 |
color = (0, 0, 255, 255)
|
| 415 |
|
| 416 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
| 417 |
-
tracking_points[-1].append(evt.index)
|
| 418 |
-
print(tracking_points)
|
| 419 |
|
| 420 |
-
print("first_frame_path", first_frame_path)
|
| 421 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 422 |
w, h = transparent_background.size
|
| 423 |
transparent_layer = np.zeros((h, w, 4))
|
| 424 |
-
for track in tracking_points:
|
| 425 |
if len(track) > 1:
|
| 426 |
for i in range(len(track)-1):
|
| 427 |
start_point = track[i]
|
|
@@ -442,8 +439,8 @@ def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.Se
|
|
| 442 |
|
| 443 |
|
| 444 |
def add_drag(tracking_points):
|
| 445 |
-
tracking_points.append([])
|
| 446 |
-
print(tracking_points)
|
| 447 |
return {tracking_points_var: tracking_points}
|
| 448 |
|
| 449 |
|
|
@@ -452,11 +449,11 @@ def delete_last_drag(tracking_points, first_frame_path, drag_mode):
|
|
| 452 |
color = (255, 0, 0, 255)
|
| 453 |
elif drag_mode=='camera':
|
| 454 |
color = (0, 0, 255, 255)
|
| 455 |
-
tracking_points.pop()
|
| 456 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 457 |
w, h = transparent_background.size
|
| 458 |
transparent_layer = np.zeros((h, w, 4))
|
| 459 |
-
for track in tracking_points:
|
| 460 |
if len(track) > 1:
|
| 461 |
for i in range(len(track)-1):
|
| 462 |
start_point = track[i]
|
|
@@ -481,11 +478,11 @@ def delete_last_step(tracking_points, first_frame_path, drag_mode):
|
|
| 481 |
color = (255, 0, 0, 255)
|
| 482 |
elif drag_mode=='camera':
|
| 483 |
color = (0, 0, 255, 255)
|
| 484 |
-
tracking_points[-1].pop()
|
| 485 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 486 |
w, h = transparent_background.size
|
| 487 |
transparent_layer = np.zeros((h, w, 4))
|
| 488 |
-
for track in tracking_points:
|
| 489 |
if len(track) > 1:
|
| 490 |
for i in range(len(track)-1):
|
| 491 |
start_point = track[i]
|
|
@@ -622,6 +619,7 @@ if __name__=="__main__":
|
|
| 622 |
fn=process_example,
|
| 623 |
run_on_click=True,
|
| 624 |
examples_per_page=10,
|
|
|
|
| 625 |
)
|
| 626 |
|
| 627 |
|
|
@@ -629,7 +627,7 @@ if __name__=="__main__":
|
|
| 629 |
gr.Markdown(citation)
|
| 630 |
|
| 631 |
|
| 632 |
-
image_upload_button.upload(preprocess_image,
|
| 633 |
|
| 634 |
add_drag_button.click(add_drag, [tracking_points_var], tracking_points_var)
|
| 635 |
|
|
|
|
| 302 |
if isinstance(tracking_points, list):
|
| 303 |
input_all_points = tracking_points
|
| 304 |
else:
|
| 305 |
+
input_all_points = tracking_points.value
|
| 306 |
|
| 307 |
|
| 308 |
resized_all_points = [tuple([tuple([float(e1[0]*self.width/original_width), float(e1[1]*self.height/original_height)]) for e1 in e]) for e in input_all_points]
|
|
|
|
| 394 |
|
| 395 |
|
| 396 |
def preprocess_image(image, tracking_points):
|
|
|
|
|
|
|
| 397 |
image_pil = image2pil(image.name)
|
| 398 |
raw_w, raw_h = image_pil.size
|
| 399 |
resize_ratio = max(384/raw_w, 256/raw_h)
|
|
|
|
| 402 |
id = str(uuid.uuid4())[:4]
|
| 403 |
first_frame_path = os.path.join(output_dir, f"first_frame_{id}.jpg")
|
| 404 |
image_pil.save(first_frame_path, quality=95)
|
| 405 |
+
return {input_image: first_frame_path, first_frame_path_var: first_frame_path, tracking_points_var: gr.State([])}
|
| 406 |
|
| 407 |
|
| 408 |
def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.SelectData): # SelectData is a subclass of EventData
|
|
|
|
| 412 |
color = (0, 0, 255, 255)
|
| 413 |
|
| 414 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
| 415 |
+
tracking_points.value[-1].append(evt.index)
|
| 416 |
+
print(tracking_points.value)
|
| 417 |
|
|
|
|
| 418 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 419 |
w, h = transparent_background.size
|
| 420 |
transparent_layer = np.zeros((h, w, 4))
|
| 421 |
+
for track in tracking_points.value:
|
| 422 |
if len(track) > 1:
|
| 423 |
for i in range(len(track)-1):
|
| 424 |
start_point = track[i]
|
|
|
|
| 439 |
|
| 440 |
|
| 441 |
def add_drag(tracking_points):
|
| 442 |
+
tracking_points.value.append([])
|
| 443 |
+
print(tracking_points.value)
|
| 444 |
return {tracking_points_var: tracking_points}
|
| 445 |
|
| 446 |
|
|
|
|
| 449 |
color = (255, 0, 0, 255)
|
| 450 |
elif drag_mode=='camera':
|
| 451 |
color = (0, 0, 255, 255)
|
| 452 |
+
tracking_points.value.pop()
|
| 453 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 454 |
w, h = transparent_background.size
|
| 455 |
transparent_layer = np.zeros((h, w, 4))
|
| 456 |
+
for track in tracking_points.value:
|
| 457 |
if len(track) > 1:
|
| 458 |
for i in range(len(track)-1):
|
| 459 |
start_point = track[i]
|
|
|
|
| 478 |
color = (255, 0, 0, 255)
|
| 479 |
elif drag_mode=='camera':
|
| 480 |
color = (0, 0, 255, 255)
|
| 481 |
+
tracking_points.value[-1].pop()
|
| 482 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
| 483 |
w, h = transparent_background.size
|
| 484 |
transparent_layer = np.zeros((h, w, 4))
|
| 485 |
+
for track in tracking_points.value:
|
| 486 |
if len(track) > 1:
|
| 487 |
for i in range(len(track)-1):
|
| 488 |
start_point = track[i]
|
|
|
|
| 619 |
fn=process_example,
|
| 620 |
run_on_click=True,
|
| 621 |
examples_per_page=10,
|
| 622 |
+
cache_examples=False,
|
| 623 |
)
|
| 624 |
|
| 625 |
|
|
|
|
| 627 |
gr.Markdown(citation)
|
| 628 |
|
| 629 |
|
| 630 |
+
image_upload_button.upload(preprocess_image, image_upload_button, [input_image, first_frame_path_var, tracking_points_var])
|
| 631 |
|
| 632 |
add_drag_button.click(add_drag, [tracking_points_var], tracking_points_var)
|
| 633 |
|