-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdifgsm.py
91 lines (77 loc) · 2.74 KB
/
difgsm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# coding=UTF-8
import random
import torchvision
from torchvision import transforms
import cv2 as cv
import numpy as np
import torch
from torch.autograd import Variable
from .attack import Attack
class DIFGSM(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, config=None):
"""
@description: Diverse Inputs Iterative - Fast Gradient Sign Method (DIFGSM)
@param {
model:需要测试的模型
device: 设备(GPU)
IsTargeted:是否是目标攻击
kwargs: 用户对攻击方法需要的参数
}
@return: None
"""
super(DIFGSM, self).__init__(model, device, IsTargeted)
self.criterion = torch.nn.CrossEntropyLoss()
self._parse_params(config)
def _parse_params(self, config):
"""
@description:
@param {
epsilon:沿着梯度方向步长的参数
}
@return: None
"""
self.eps = float(config.get("epsilon", 0.03))
self.alpha = float(config.get("alpha", 0.01))
self.num_steps = int(config.get("num_steps", 10))
self.posi = float(config.get("posi", 0.5))
def T(self,image):
if random.random() < self.posi:
mytransform1 = torchvision.transforms.RandomAffine(degrees=0,scale=(0.95,1),fillcolor=0,resample=False)
image_transformed = mytransform1(image)
else:
image_transformed = image
return image_transformed
def generate(self, xs=None, ys=None):
"""
@description:
@param {
xs:原始的样本
ys:样本的标签
}
@return: adv_xs{numpy.ndarray}
"""
device = self.device
targeted = self.IsTargeted
copy_xs = np.copy(xs.numpy())
var_ys = torch.tensor(ys, device=device)
xs_min, xs_max = copy_xs - self.eps, copy_xs + self.eps
for _ in range(self.num_steps):
var_xs = torch.tensor(
copy_xs, dtype=torch.float, device=device, requires_grad=True
)
var_xs = self.T(var_xs)
outputs = self.model(var_xs)
if targeted:
loss = -self.criterion(outputs, var_ys)
else:
loss = self.criterion(outputs, var_ys)
grad = torch.autograd.grad(
loss,var_xs,retain_graph=False,create_graph=False
)[0]
grad_sign = grad.data.sign().cpu().numpy()
delta = self.alpha * grad_sign
copy_xs = copy_xs +delta
copy_xs =np.clip(copy_xs, xs_min, xs_max)
copy_xs = np.clip(copy_xs, 0.0, 1.0)
adv_xs = torch.from_numpy(copy_xs)
return adv_xs