|
|
import numpy as np |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from torch.autograd import Variable |
|
|
import torch.optim as optim |
|
|
from utils.criterion import CrossEntropyWithLabelSmooth |
|
|
|
|
|
|
|
|
|
|
|
def squared_l2_norm(x): |
|
|
flattened = x.view(x.unsqueeze(0).shape[0], -1) |
|
|
return (flattened ** 2).sum(1) |
|
|
|
|
|
|
|
|
def l2_norm(x): |
|
|
return squared_l2_norm(x).sqrt() |
|
|
|
|
|
|
|
|
def trades_loss(model, x_natural, y,optimizer = None, step_size=0.003, epsilon=0.031, perturb_steps=10, beta=1.0, |
|
|
attack='l_inf',natural_criterion= nn.CrossEntropyLoss() ): |
|
|
""" |
|
|
TRADES training (Zhang et al, 2019). |
|
|
""" |
|
|
|
|
|
|
|
|
criterion_kl = nn.KLDivLoss(size_average=False) |
|
|
model.eval() |
|
|
batch_size = len(x_natural) |
|
|
|
|
|
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach() |
|
|
p_natural = F.softmax(model(x_natural), dim=1) |
|
|
|
|
|
if attack == 'l_inf': |
|
|
for _ in range(perturb_steps): |
|
|
x_adv.requires_grad_() |
|
|
with torch.enable_grad(): |
|
|
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1), p_natural) |
|
|
grad = torch.autograd.grad(loss_kl, [x_adv])[0] |
|
|
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach()) |
|
|
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon) |
|
|
x_adv = torch.clamp(x_adv, 0.0, 1.0) |
|
|
|
|
|
elif attack == 'l2': |
|
|
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach() |
|
|
delta = Variable(delta.data, requires_grad=True) |
|
|
|
|
|
|
|
|
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2) |
|
|
|
|
|
for _ in range(perturb_steps): |
|
|
adv = x_natural + delta |
|
|
|
|
|
|
|
|
optimizer_delta.zero_grad() |
|
|
with torch.enable_grad(): |
|
|
loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1), p_natural) |
|
|
loss.backward(retain_graph=True) |
|
|
|
|
|
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1) |
|
|
delta.grad.div_(grad_norms.view(-1, 1, 1, 1)) |
|
|
|
|
|
if (grad_norms == 0).any(): |
|
|
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0]) |
|
|
optimizer_delta.step() |
|
|
|
|
|
|
|
|
delta.data.add_(x_natural) |
|
|
delta.data.clamp_(0, 1).sub_(x_natural) |
|
|
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon) |
|
|
x_adv = Variable(x_natural + delta, requires_grad=False) |
|
|
else: |
|
|
raise ValueError(f'Attack={attack} not supported for TRADES training!') |
|
|
model.train() |
|
|
|
|
|
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False) |
|
|
|
|
|
optimizer.zero_grad() |
|
|
|
|
|
logits_natural = model(x_natural) |
|
|
|
|
|
logits_adv = model(x_adv) |
|
|
|
|
|
loss_natural = natural_criterion(logits_natural, y) |
|
|
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(logits_adv, dim=1), |
|
|
F.softmax(logits_natural, dim=1)) |
|
|
loss = loss_natural + beta * loss_robust |
|
|
|
|
|
|
|
|
|
|
|
return loss |
|
|
|