Spaces:
Running
on
Zero
Running
on
Zero
| import torch | |
| import numpy as np | |
| from PIL import Image | |
| class ConstrainImage: | |
| """ | |
| A node that constrains an image to a maximum and minimum size while maintaining aspect ratio. | |
| """ | |
| def INPUT_TYPES(cls): | |
| return { | |
| "required": { | |
| "images": ("IMAGE",), | |
| "max_width": ("INT", {"default": 1024, "min": 0}), | |
| "max_height": ("INT", {"default": 1024, "min": 0}), | |
| "min_width": ("INT", {"default": 0, "min": 0}), | |
| "min_height": ("INT", {"default": 0, "min": 0}), | |
| "crop_if_required": (["yes", "no"], {"default": "no"}), | |
| }, | |
| } | |
| RETURN_TYPES = ("IMAGE",) | |
| FUNCTION = "constrain_image" | |
| CATEGORY = "image" | |
| OUTPUT_IS_LIST = (True,) | |
| def constrain_image(self, images, max_width, max_height, min_width, min_height, crop_if_required): | |
| crop_if_required = crop_if_required == "yes" | |
| results = [] | |
| for image in images: | |
| i = 255. * image.cpu().numpy() | |
| img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)).convert("RGB") | |
| current_width, current_height = img.size | |
| aspect_ratio = current_width / current_height | |
| constrained_width = min(max(current_width, min_width), max_width) | |
| constrained_height = min(max(current_height, min_height), max_height) | |
| if constrained_width / constrained_height > aspect_ratio: | |
| constrained_width = max(int(constrained_height * aspect_ratio), min_width) | |
| if crop_if_required: | |
| constrained_height = int(current_height / (current_width / constrained_width)) | |
| else: | |
| constrained_height = max(int(constrained_width / aspect_ratio), min_height) | |
| if crop_if_required: | |
| constrained_width = int(current_width / (current_height / constrained_height)) | |
| resized_image = img.resize((constrained_width, constrained_height), Image.LANCZOS) | |
| if crop_if_required and (constrained_width > max_width or constrained_height > max_height): | |
| left = max((constrained_width - max_width) // 2, 0) | |
| top = max((constrained_height - max_height) // 2, 0) | |
| right = min(constrained_width, max_width) + left | |
| bottom = min(constrained_height, max_height) + top | |
| resized_image = resized_image.crop((left, top, right, bottom)) | |
| resized_image = np.array(resized_image).astype(np.float32) / 255.0 | |
| resized_image = torch.from_numpy(resized_image)[None,] | |
| results.append(resized_image) | |
| return (results,) | |
| NODE_CLASS_MAPPINGS = { | |
| "ConstrainImage|pysssss": ConstrainImage, | |
| } | |
| NODE_DISPLAY_NAME_MAPPINGS = { | |
| "ConstrainImage|pysssss": "Constrain Image 🐍", | |
| } | |