Spaces:
Runtime error
Runtime error
| from dataclasses import dataclass | |
| from typing import Optional, Tuple, Union | |
| import torch | |
| import torch.nn as nn | |
| from ..configuration_utils import ConfigMixin, register_to_config | |
| from ..modeling_utils import ModelMixin | |
| from ..utils import BaseOutput | |
| from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps | |
| from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block | |
| class UNet2DOutput(BaseOutput): | |
| """ | |
| Args: | |
| sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): | |
| Hidden states output. Output of last layer of model. | |
| """ | |
| sample: torch.DoubleTensor | |
| class UNet2DModel(ModelMixin, ConfigMixin): | |
| r""" | |
| UNet2DModel is a 2D UNet model that takes in a noisy sample and a timestep and returns sample shaped output. | |
| This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library | |
| implements for all the model (such as downloading or saving, etc.) | |
| Parameters: | |
| sample_size (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, *optional*): | |
| Input sample size. | |
| in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. | |
| out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. | |
| center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. | |
| time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use. | |
| freq_shift (`int`, *optional*, defaults to 0): Frequency shift for fourier time embedding. | |
| flip_sin_to_cos (`bool`, *optional*, defaults to : | |
| obj:`False`): Whether to flip sin to cos for fourier time embedding. | |
| down_block_types (`Tuple[str]`, *optional*, defaults to : | |
| obj:`("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block | |
| types. | |
| up_block_types (`Tuple[str]`, *optional*, defaults to : | |
| obj:`("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types. | |
| block_out_channels (`Tuple[int]`, *optional*, defaults to : | |
| obj:`(224, 448, 672, 896)`): Tuple of block output channels. | |
| layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block. | |
| mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block. | |
| downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution. | |
| act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. | |
| attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension. | |
| norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for the normalization. | |
| norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for the normalization. | |
| """ | |
| def __init__( | |
| self, | |
| sample_size: Optional[int] = None, | |
| in_channels: int = 3, | |
| out_channels: int = 3, | |
| center_input_sample: bool = False, | |
| time_embedding_type: str = "positional", | |
| freq_shift: int = 0, | |
| flip_sin_to_cos: bool = True, | |
| down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"), | |
| up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"), | |
| block_out_channels: Tuple[int] = (224, 448, 672, 896), | |
| layers_per_block: int = 2, | |
| mid_block_scale_factor = 1, | |
| downsample_padding: int = 1, | |
| act_fn: str = "silu", | |
| attention_head_dim: int = 8, | |
| norm_num_groups: int = 32, | |
| norm_eps = 1e-5, | |
| ): | |
| super().__init__() | |
| self.sample_size = sample_size | |
| time_embed_dim = block_out_channels[0] * 4 | |
| # input | |
| self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) | |
| # time | |
| if time_embedding_type == "fourier": | |
| self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16) | |
| timestep_input_dim = 2 * block_out_channels[0] | |
| elif time_embedding_type == "positional": | |
| self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) | |
| timestep_input_dim = block_out_channels[0] | |
| self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) | |
| self.down_blocks = nn.ModuleList([]) | |
| self.mid_block = None | |
| self.up_blocks = nn.ModuleList([]) | |
| # down | |
| output_channel = block_out_channels[0] | |
| for i, down_block_type in enumerate(down_block_types): | |
| input_channel = output_channel | |
| output_channel = block_out_channels[i] | |
| is_final_block = i == len(block_out_channels) - 1 | |
| down_block = get_down_block( | |
| down_block_type, | |
| num_layers=layers_per_block, | |
| in_channels=input_channel, | |
| out_channels=output_channel, | |
| temb_channels=time_embed_dim, | |
| add_downsample=not is_final_block, | |
| resnet_eps=norm_eps, | |
| resnet_act_fn=act_fn, | |
| attn_num_head_channels=attention_head_dim, | |
| downsample_padding=downsample_padding, | |
| ) | |
| self.down_blocks.append(down_block) | |
| # mid | |
| self.mid_block = UNetMidBlock2D( | |
| in_channels=block_out_channels[-1], | |
| temb_channels=time_embed_dim, | |
| resnet_eps=norm_eps, | |
| resnet_act_fn=act_fn, | |
| output_scale_factor=mid_block_scale_factor, | |
| resnet_time_scale_shift="default", | |
| attn_num_head_channels=attention_head_dim, | |
| resnet_groups=norm_num_groups, | |
| ) | |
| # up | |
| reversed_block_out_channels = list(reversed(block_out_channels)) | |
| output_channel = reversed_block_out_channels[0] | |
| for i, up_block_type in enumerate(up_block_types): | |
| prev_output_channel = output_channel | |
| output_channel = reversed_block_out_channels[i] | |
| input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] | |
| is_final_block = i == len(block_out_channels) - 1 | |
| up_block = get_up_block( | |
| up_block_type, | |
| num_layers=layers_per_block + 1, | |
| in_channels=input_channel, | |
| out_channels=output_channel, | |
| prev_output_channel=prev_output_channel, | |
| temb_channels=time_embed_dim, | |
| add_upsample=not is_final_block, | |
| resnet_eps=norm_eps, | |
| resnet_act_fn=act_fn, | |
| attn_num_head_channels=attention_head_dim, | |
| ) | |
| self.up_blocks.append(up_block) | |
| prev_output_channel = output_channel | |
| # out | |
| num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) | |
| self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps) | |
| self.conv_act = nn.SiLU() | |
| self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) | |
| def forward( | |
| self, | |
| sample: torch.DoubleTensor, | |
| timestep: Union[torch.Tensor, float, int], | |
| return_dict: bool = True, | |
| ) -> Union[UNet2DOutput, Tuple]: | |
| """r | |
| Args: | |
| sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor | |
| timestep (`torch.FloatTensor` or `float` or `int): (batch) timesteps | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. | |
| Returns: | |
| [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, | |
| otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. | |
| """ | |
| # 0. center input if necessary | |
| if self.config.center_input_sample: | |
| sample = 2 * sample - 1.0 | |
| # 1. time | |
| timesteps = timestep | |
| if not torch.is_tensor(timesteps): | |
| timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) | |
| elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: | |
| timesteps = timesteps[None].to(sample.device) | |
| # broadcast to batch dimension in a way that's compatible with ONNX/Core ML | |
| timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device) | |
| t_emb = self.time_proj(timesteps) | |
| emb = self.time_embedding(t_emb) | |
| # 2. pre-process | |
| skip_sample = sample | |
| sample = self.conv_in(sample) | |
| # 3. down | |
| down_block_res_samples = (sample,) | |
| for downsample_block in self.down_blocks: | |
| if hasattr(downsample_block, "skip_conv"): | |
| sample, res_samples, skip_sample = downsample_block( | |
| hidden_states=sample, temb=emb, skip_sample=skip_sample | |
| ) | |
| else: | |
| sample, res_samples = downsample_block(hidden_states=sample, temb=emb) | |
| down_block_res_samples += res_samples | |
| # 4. mid | |
| sample = self.mid_block(sample, emb) | |
| # 5. up | |
| skip_sample = None | |
| for upsample_block in self.up_blocks: | |
| res_samples = down_block_res_samples[-len(upsample_block.resnets) :] | |
| down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] | |
| if hasattr(upsample_block, "skip_conv"): | |
| sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample) | |
| else: | |
| sample = upsample_block(sample, res_samples, emb) | |
| # 6. post-process | |
| # make sure hidden states is in float32 | |
| # when running in half-precision | |
| sample = self.conv_norm_out(sample).type(sample.dtype) | |
| sample = self.conv_act(sample) | |
| sample = self.conv_out(sample) | |
| if skip_sample is not None: | |
| sample += skip_sample | |
| if self.config.time_embedding_type == "fourier": | |
| timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:])))) | |
| sample = sample / timesteps | |
| if not return_dict: | |
| return (sample,) | |
| return UNet2DOutput(sample=sample) | |