-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathpgd_crnn.py
146 lines (126 loc) · 5.17 KB
/
pgd_crnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import torch
import shutil
import utils
from crnn import CRNN
from torch.autograd import Variable
from torchvision import utils as vutils
from dataset import test_dataset_builder
import time
import sys
from utils import Logger
# Hyperparameter
eps = 0.13
alpha = 1
iters = 3
Height = 32
Width = 100
batch_size = 1
use_cuda=True
model_path = './checkpoints/crnn.pth' # 英文模型权重路径
out = './PGD-result'
test_img_path = '/data/hyr/dataset/advGAN-data/100'
Accuracy = './PGD-result/accuracy.txt'
output_path = './PGD-result/perdict.txt'
# alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ#*?'
if not os.path.exists(out):
os.makedirs(out)
# del all the output directories and files
del_list = os.listdir(out)
for f in del_list:
file_path = os.path.join(out, f)
if os.path.isfile(out):
os.remove(out)
elif os.path.isdir(out):
shutil.rmtree(out)
if not os.path.exists('./PGD-result/output-img/org'):
os.makedirs('./PGD-result/output-img/org')
if not os.path.exists('./PGD-result/output-img/adv'):
os.makedirs('./PGD-result/output-img/adv')
if not os.path.exists('./PGD-result/attack-suc-img'):
os.makedirs('./PGD-result/attack-suc-img')
# Define what device we are using
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
# create model
model = CRNN(32, 3, len(alphabet) + 1, 256).to(device)
model.eval()
# Load pre-train model
model.load_state_dict(torch.load(model_path))
# 创建转换器,测试阶段用于将ctc生成的路径转换成最终序列
converter = utils.strLabelConverter(alphabet, ignore_case=False)
# ctcloss
ctc_loss = torch.nn.CTCLoss(blank=0, reduction='mean', zero_infinity=False)
# test_dataset
test_dataset = test_dataset_builder(Height, Width, test_img_path)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
def pgd(model, device, test_dataloader, eps, alpha, iters):
result = dict()
test_len = len(test_dataloader)
k = 0
for i, data in enumerate(test_dataloader):
images = data[0]
labels = data[1]
img_index_ori = data[2][0]
img_index_adv = data[3][0]
images = images.to(device)
# images.requires_grad = True
# save ori_image
vutils.save_image(images, "./PGD-result/output-img/org/org_{}_{}.png".format(labels[0],img_index_ori), normalize=True)
for j in range(iters):
images = images.clone().detach().requires_grad_(True)
# ctcloss的参数(preds, target, preds_size, length)
torch.backends.cudnn.enabled=False
preds = model(images) # crnn识别
preds_len = torch.IntTensor([preds.size(0)] * preds.size(1))
labels = [str(i).lower() for i in labels]
targets, target_len = converter.encode(labels)
# Calculate the loss
loss = ctc_loss(preds, targets, preds_len, target_len)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = images.grad.data
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
pertubation = torch.clamp(eps*sign_data_grad, min=-alpha, max=alpha)
images = torch.clamp(images + pertubation, min=0, max=1)
# images = adv_image
# return images
# print(images.size())
# save perturbed_image
adv_image = images
vutils.save_image(adv_image,
"./PGD-result/output-img/adv/{}_{}_adv.png".format(img_index_ori,labels[0]), normalize=True)
output = model(adv_image)
_, output = output.max(2) # 取可能性最大的indecis size
output = output.transpose(1, 0).contiguous().view(-1) # 转成以为索引列表
output_size = torch.IntTensor([output.size(0)]) # 转成字符序列
sim_output = converter.decode(output.data, output_size.data, raw=False)
if labels[0] != sim_output:
k = k + 1
result[img_index_ori] = "{}: {} -> {}\n".format(img_index_adv, labels[0], sim_output)
vutils.save_image(adv_image,
"./PGD-result/attack-suc-img/{}_{}_adv.png".format(img_index_ori,labels[0]), normalize=True)
result = sorted(result.items(), key=lambda x:x[0])
# 按规则保存攻击后识别结果
with open(output_path, 'w') as f:
for item in result:
f.write(item[1])
acc = str(k / test_len)
print(acc)
with open(Accuracy, "w") as f:
f.write(output_path + ' ' + 'accuracy:' + acc + '\n')
if __name__ == '__main__':
time_start = time.time()
pgd(model, device, test_dataloader, eps, alpha, iters)
time_end = time.time()
time_sum = time_end - time_start
print('144 sample generation time:' + str(time_sum))