Spaces:
Running
Running
| """ | |
| Copyright (C) 2019 NVIDIA Corporation. All rights reserved. | |
| Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). | |
| """ | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import kornia | |
| class InstanceNorm(nn.Module): | |
| def __init__(self, epsilon=1e-8): | |
| """ | |
| @notice: avoid in-place ops. | |
| https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3 | |
| """ | |
| super(InstanceNorm, self).__init__() | |
| self.epsilon = epsilon | |
| def forward(self, x): | |
| x = x - torch.mean(x, (2, 3), True) | |
| tmp = torch.mul(x, x) # or x ** 2 | |
| tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon) | |
| return x * tmp | |
| class ApplyStyle(nn.Module): | |
| """ | |
| @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb | |
| """ | |
| def __init__(self, latent_size, channels): | |
| super(ApplyStyle, self).__init__() | |
| self.linear = nn.Linear(latent_size, channels * 2) | |
| def forward(self, x, latent): | |
| style = self.linear(latent) # style => [batch_size, n_channels*2] | |
| shape = [-1, 2, x.size(1), 1, 1] | |
| style = style.view(shape) # [batch_size, 2, n_channels, ...] | |
| #x = x * (style[:, 0] + 1.) + style[:, 1] | |
| x = x * (style[:, 0] * 1 + 1.) + style[:, 1] * 1 | |
| return x | |
| class ResnetBlock_Adain(nn.Module): | |
| def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)): | |
| super(ResnetBlock_Adain, self).__init__() | |
| p = 0 | |
| conv1 = [] | |
| if padding_type == 'reflect': | |
| conv1 += [nn.ReflectionPad2d(1)] | |
| elif padding_type == 'replicate': | |
| conv1 += [nn.ReplicationPad2d(1)] | |
| elif padding_type == 'zero': | |
| p = 1 | |
| else: | |
| raise NotImplementedError('padding [%s] is not implemented' % padding_type) | |
| conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()] | |
| self.conv1 = nn.Sequential(*conv1) | |
| self.style1 = ApplyStyle(latent_size, dim) | |
| self.act1 = activation | |
| p = 0 | |
| conv2 = [] | |
| if padding_type == 'reflect': | |
| conv2 += [nn.ReflectionPad2d(1)] | |
| elif padding_type == 'replicate': | |
| conv2 += [nn.ReplicationPad2d(1)] | |
| elif padding_type == 'zero': | |
| p = 1 | |
| else: | |
| raise NotImplementedError('padding [%s] is not implemented' % padding_type) | |
| conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()] | |
| self.conv2 = nn.Sequential(*conv2) | |
| self.style2 = ApplyStyle(latent_size, dim) | |
| def forward(self, x, dlatents_in_slice): | |
| y = self.conv1(x) | |
| y = self.style1(y, dlatents_in_slice) | |
| y = self.act1(y) | |
| y = self.conv2(y) | |
| y = self.style2(y, dlatents_in_slice) | |
| out = x + y | |
| return out | |
| class Generator_Adain_Upsample(nn.Module): | |
| def __init__(self, input_nc, output_nc, latent_size, n_blocks=6, deep=False, | |
| norm_layer=nn.BatchNorm2d, | |
| padding_type='reflect', | |
| mouth_net_param: dict = None, | |
| ): | |
| assert (n_blocks >= 0) | |
| super(Generator_Adain_Upsample, self).__init__() | |
| self.latent_size = latent_size | |
| self.mouth_net_param = mouth_net_param | |
| if mouth_net_param.get('use'): | |
| self.latent_size += mouth_net_param.get('feature_dim') | |
| activation = nn.ReLU(True) | |
| self.deep = deep | |
| self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, kernel_size=7, padding=0), | |
| norm_layer(64), activation) | |
| ### downsample | |
| self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), | |
| norm_layer(128), activation) | |
| self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1), | |
| norm_layer(256), activation) | |
| self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1), | |
| norm_layer(512), activation) | |
| if self.deep: | |
| self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1), | |
| norm_layer(512), activation) | |
| ### resnet blocks | |
| BN = [] | |
| for i in range(n_blocks): | |
| BN += [ | |
| ResnetBlock_Adain(512, latent_size=self.latent_size, | |
| padding_type=padding_type, activation=activation)] | |
| self.BottleNeck = nn.Sequential(*BN) | |
| if self.deep: | |
| self.up4 = nn.Sequential( | |
| nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False), | |
| nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), | |
| nn.BatchNorm2d(512), activation | |
| ) | |
| self.up3 = nn.Sequential( | |
| nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False), | |
| nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), | |
| nn.BatchNorm2d(256), activation | |
| ) | |
| self.up2 = nn.Sequential( | |
| nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False), | |
| nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), | |
| nn.BatchNorm2d(128), activation | |
| ) | |
| self.up1 = nn.Sequential( | |
| nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False), | |
| nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), | |
| nn.BatchNorm2d(64), activation | |
| ) | |
| self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0)) | |
| self.register_buffer( | |
| name="trans_matrix", | |
| tensor=torch.tensor( | |
| [ | |
| [ | |
| [1.07695457, -0.03625215, -1.56352194], | |
| [0.03625215, 1.07695457, -5.32134629], | |
| ] | |
| ], | |
| requires_grad=False, | |
| ).float(), | |
| ) | |
| def forward(self, source, target, net_arc, mouth_net=None): | |
| x = target # 3*224*224 | |
| if net_arc is None: | |
| id_vector = source | |
| else: | |
| with torch.no_grad(): | |
| ''' 1. get id ''' | |
| # M = self.trans_matrix.repeat(source.size()[0], 1, 1) | |
| # source = kornia.geometry.transform.warp_affine(source, M, (256, 256)) | |
| resize_input = F.interpolate(source, size=112, mode="bilinear", align_corners=True) | |
| id_vector = F.normalize(net_arc(resize_input), dim=-1, p=2) | |
| ''' 2. get mouth feature ''' | |
| if mouth_net is not None: | |
| w1, h1, w2, h2 = self.mouth_net_param.get('crop_param') | |
| mouth_input = resize_input[:, :, h1:h2, w1:w2] | |
| mouth_feat = mouth_net(mouth_input) | |
| id_vector = torch.cat([id_vector, mouth_feat], dim=-1) # (B,dim_id+dim_mouth) | |
| skip1 = self.first_layer(x) | |
| skip2 = self.down1(skip1) | |
| skip3 = self.down2(skip2) | |
| if self.deep: | |
| skip4 = self.down3(skip3) | |
| x = self.down4(skip4) | |
| else: | |
| x = self.down3(skip3) | |
| bot = [] | |
| bot.append(x) | |
| features = [] | |
| for i in range(len(self.BottleNeck)): | |
| x = self.BottleNeck[i](x, id_vector) | |
| bot.append(x) | |
| if self.deep: | |
| x = self.up4(x) | |
| features.append(x) | |
| x = self.up3(x) | |
| features.append(x) | |
| x = self.up2(x) | |
| features.append(x) | |
| x = self.up1(x) | |
| features.append(x) | |
| x = self.last_layer(x) | |
| # x = (x + 1) / 2 | |
| # return x, bot, features, dlatents | |
| return x | |
| if __name__ == "__main__": | |
| import thop | |
| img = torch.randn(1, 3, 256, 256) | |
| latent = torch.randn(1, 512) | |
| net = Generator_Adain_Upsample(input_nc=3, output_nc=3, latent_size=512, n_blocks=9, | |
| mouth_net_param={"use": False}) | |
| flops, params = thop.profile(net, inputs=(latent, img, None, None), verbose=False) | |
| print('#Params=%.2fM, GFLOPS=%.2f' % (params / 1e6, flops / 1e9)) | |