-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathpoi_util.py
100 lines (92 loc) · 3.63 KB
/
poi_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
import random
import imageio
import torch.nn as nn
def patching(clean_sample, attack, pert=None, dataset_nm = 'CIFAR'):
'''
this code conducts a patching procedure to generate backdoor data
**please make sure the input sample's label is different from the target label
clean_sample: clean input
'''
output = np.copy(clean_sample)
try:
if attack == 'badnets':
pat_size = 4
output[32 - 1 - pat_size:32 - 1, 32 - 1 - pat_size:32 - 1, :] = 1
else:
trimg = imageio.imread('./triggers/' + attack + '.png')/255
if attack == 'l0_inv':
mask = 1 - np.transpose(np.load('./triggers/mask.npy'), (1, 2, 0))
output = clean_sample * mask + trimg
else:
output = clean_sample + trimg
output[output < 0] = 0
output[output > 1] = 1
return output
except:
if attack == 'badnets':
pat_size = 4
output[32 - 1 - pat_size:32 - 1, 32 - 1 - pat_size:32 - 1, :] = 1
else:
trimg = imageio.imread('./triggers/' + attack + '.png')/255
if attack == 'l0_inv':
mask = 1 - np.transpose(np.load('./triggers/mask.npy'), (1, 2, 0))
output = clean_sample * mask + trimg
else:
output = clean_sample + trimg
output[output < 0] = 0
output[output > 1] = 1
return output
def poison_dataset(dataset, label, attack, target_lab=6, portion =0.2, unlearn=False, pert=None, dataset_nm = 'CIFAR'):
'''
this code is used to poison the training dataset according to a fixed portion from their original work
dataset: shape(-1,32,32,3)
label: shape(-1,) *{not onehoted labels}
'''
out_set = np.copy(dataset)
out_lab = np.copy(label)
# portion = 0.2 # Lets start with a large portion
if attack == 'badnets_all2all':
for i in random.sample(range(0, dataset.shape[0]), int(dataset.shape[0] * portion)):
out_set[i] = patching(dataset[i], 'badnets')
out_lab[i] = label[i] + 1
# if out_lab[i] == 10:
if dataset_nm == 'CIFAR':
if out_lab[i] == 10:
out_lab[i] = 0
elif dataset_nm == 'GTSRB':
if out_lab[i] == 43:
out_lab[i] = 0
else:
indexs = list(np.asarray(np.where(label != target_lab))[0])
samples_idx = random.sample(indexs, int(dataset.shape[0] * portion))
for i in samples_idx:
out_set[i] = patching(dataset[i], attack, pert, dataset_nm = dataset_nm)
assert out_lab[i] != target_lab
out_lab[i] = target_lab
if unlearn:
return out_set, label
return out_set, out_lab
def patching_test(dataset, label, attack, target_lab=6, adversarial=False, dataset_nm='CIFAR'):
"""
This code is used to generate an all-poisoned dataset for evaluating the ASR
"""
out_set = np.copy(dataset)
out_lab = np.copy(label)
if attack == 'badnets_all2all':
for i in range(out_set.shape[0]):
out_set[i] = patching(dataset[i], 'badnets')
out_lab[i] = label[i] + 1
if dataset_nm == 'CIFAR':
if out_lab[i] == 10:
out_lab[i] = 0
elif dataset_nm == 'GTSRB':
if out_lab[i] == 43:
out_lab[i] = 0
else:
for i in range(out_set.shape[0]):
out_set[i] = patching(dataset[i], attack, dataset_nm = dataset_nm)
out_lab[i] = target_lab
if adversarial:
return out_set, label
return out_set, out_lab