├── .idea
├── .gitignore
├── .name
├── AMD.iml
├── deployment.xml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── sshConfigs.xml
└── webServers.xml
├── AMD.py
├── Attacks
├── AttackMethods
│ ├── AttackUtils.py
│ ├── BIM.py
│ ├── DEEPFOOL.py
│ ├── FGSM.py
│ ├── PGD.py
│ ├── UMIFGSM.py
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── AttackUtils.cpython-37.pyc
│ │ ├── DEEPFOOL.cpython-37.pyc
│ │ ├── FGSM.cpython-37.pyc
│ │ ├── PGD.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ └── attacks.cpython-37.pyc
│ └── attacks.py
├── AutoAttack.py
├── BIM_Generation.py
├── DeepFool_Generation.py
├── FGSM_Generation.py
├── Generation.py
├── PGD_Generation.py
├── README.md
├── UMIFGSM_Generation.py
├── __init__.py
├── __pycache__
│ ├── Generation.cpython-37.pyc
│ └── __init__.cpython-37.pyc
└── autoattack
│ ├── __init__.py
│ ├── autoattack.py
│ ├── autopgd_base.py
│ ├── checks.py
│ ├── fab_base.py
│ ├── fab_projections.py
│ ├── fab_pt.py
│ ├── fab_tf.py
│ ├── other_utils.py
│ ├── square.py
│ ├── state.py
│ ├── utils_tf.py
│ └── utils_tf2.py
├── CleanDatasets
├── CandidatesSelection.py
├── README.md
└── __init__.py
├── Defenses
├── DD_Test.py
├── DefenseMethods
│ ├── DD.py
│ ├── External
│ │ ├── InputTransformations.py
│ │ └── __init__.py
│ ├── PAT.py
│ ├── __init__.py
│ └── defenses.py
├── PAT_Test.py
├── __init__.py
├── defense.py
├── trades.py
└── train_trades.py
├── README.md
├── Utils
├── TrainTest.py
├── __init__.py
└── dataset.py
├── __pycache__
├── args.cpython-37.pyc
└── utils.cpython-37.pyc
├── args.py
├── kd_losses
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ └── logits.cpython-37.pyc
└── logits.py
├── models
├── Alexnet.py
├── CNN1D.py
├── CNN2D.py
├── LeNet.py
├── RRR.py
├── __init__.py
├── __pycache__
│ ├── Alexnet.cpython-37.pyc
│ ├── CNN1D.cpython-37.pyc
│ ├── CNN2D.cpython-37.pyc
│ ├── LeNet.cpython-37.pyc
│ ├── __init__.cpython-37.pyc
│ ├── gru.cpython-37.pyc
│ ├── lstm.cpython-37.pyc
│ ├── mcldnn.cpython-37.pyc
│ ├── network.cpython-37.pyc
│ └── vgg16.cpython-37.pyc
├── gru.py
├── lstm.py
├── mcldnn.py
├── mobilenet.py
├── network.py
├── resnet.py
├── train.py
├── train.sh
├── train_par.sh
├── vgg.py
└── vgg16.py
├── order
├── ParaTestMob.sh
├── __init__.py
├── def.sh
├── signaldB.sh
├── signaldBr.sh
└── testattack.sh
├── raw_model_training
├── Alexnet.py
├── CNN1D.py
├── CNN2D.py
├── LeNet.py
├── RRR.py
├── __init__.py
├── gru.py
├── lstm.py
├── mcldnn.py
├── mobilenet.py
├── mobilenet_2d.py
├── resnet.py
├── test.py
├── test.sh
├── train.py
├── train.sh
├── train_par.sh
├── vgg.py
└── vgg16.py
├── requirement.txt
└── utils.py
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/.name:
--------------------------------------------------------------------------------
1 | PGD_Generation.py
--------------------------------------------------------------------------------
/.idea/AMD.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/sshConfigs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/webServers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
13 |
14 |
--------------------------------------------------------------------------------
/Attacks/AttackMethods/AttackUtils.py:
--------------------------------------------------------------------------------
1 | # import matplotlib.pyplot as plt
2 | import numpy as np
3 | import torch
4 | from torch.autograd import Variable
5 |
6 |
7 | def tensor2variable(x=None, device=None, requires_grad=False):
8 | """
9 |
10 | :param x:
11 | :param device:
12 | :param requires_grad:
13 | :return:
14 | """
15 | x = x.to(device)
16 | return Variable(x, requires_grad=requires_grad)
17 |
18 |
19 | def predict(model=None, samples=None, device=None):
20 | """
21 |
22 | :param model:
23 | :param samples:
24 | :param device:
25 | :return:
26 | """
27 | model.eval()
28 | model = model.to(device)
29 | copy_samples = np.copy(samples)
30 | var_samples = torch.tensor(copy_samples).to(device)
31 | # var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
32 | predictions = model(var_samples.float())
33 | return predictions
34 |
--------------------------------------------------------------------------------
/Attacks/AttackMethods/BIM.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from Attacks.AttackMethods.AttackUtils import tensor2variable
7 | from Attacks.AttackMethods.attacks import Attack
8 |
9 |
10 | class BIMAttack(Attack):
11 |
12 | def __init__(self, model=None, epsilon=None, eps_iter=None, num_steps=5):
13 | """
14 |
15 | :param model:
16 | :param epsilon:
17 | :param eps_iter:
18 | :param num_steps:
19 | """
20 | super(BIMAttack, self).__init__(model)
21 | self.model = model
22 |
23 | self.epsilon = epsilon
24 | self.epsilon_iter = eps_iter
25 | self.num_steps = num_steps
26 |
27 | def perturbation(self, samples, ys, device):
28 | """
29 |
30 | :param samples:
31 | :param ys:
32 | :param device:
33 | :return:
34 | """
35 | copy_samples = np.copy(samples)
36 | self.model.to(device)
37 | self.model.train()
38 | for index in range(self.num_steps):
39 | var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
40 | var_ys = tensor2variable(torch.LongTensor(ys), device=device)
41 |
42 | # self.model.eval()
43 | preds = self.model(var_samples)
44 | loss_fun = torch.nn.CrossEntropyLoss()
45 | loss = loss_fun(preds, torch.max(var_ys, 1)[1])
46 | loss.backward()
47 |
48 | gradient_sign = var_samples.grad.data.cpu().sign().numpy()
49 | copy_samples = copy_samples + self.epsilon_iter * gradient_sign
50 |
51 | copy_samples = np.clip(copy_samples, samples - self.epsilon, samples + self.epsilon)
52 | copy_samples = np.clip(copy_samples, 0.0, 1.0)
53 | return copy_samples
54 |
55 | def batch_perturbation(self, xs, ys, batch_size, device):
56 | """
57 |
58 | :param xs:
59 | :param ys:
60 | :param batch_size:
61 | :param device:
62 | :return:
63 | """
64 | assert len(xs) == len(ys), "The lengths of samples and its ys should be equal"
65 | from Attacks.AttackMethods.AttackUtils import predict
66 |
67 | adv_labels_all = []
68 | adv_sample = []
69 | number_batch = int(math.ceil(len(xs) / batch_size))
70 | for index in range(number_batch):
71 | start = index * batch_size
72 | end = min((index + 1) * batch_size, len(xs))
73 | print('\r===> in batch {:>2}, {:>4} ({:>4} in total) nature examples are perturbed ... '.format(index, end - start, end), end=' ')
74 |
75 | batch_adv_images = self.perturbation(xs[start:end], ys[start:end], device)
76 | adv_sample.extend(batch_adv_images)
77 | adv_labels = predict(model=self.model, samples=batch_adv_images, device=device)
78 | adv_labels = torch.max(adv_labels, 1)[1]
79 | adv_labels = adv_labels.cpu().numpy()
80 | adv_labels_all.extend(adv_labels)
81 | return np.array(adv_sample) , np.array(adv_labels_all)
82 |
--------------------------------------------------------------------------------
/Attacks/AttackMethods/DEEPFOOL.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.autograd.gradcheck import zero_gradients
4 |
5 | from Attacks.AttackMethods.AttackUtils import tensor2variable
6 | from Attacks.AttackMethods.attacks import Attack
7 | from args import args
8 |
9 | class DeepFoolAttack(Attack):
10 |
11 | def __init__(self, model=None, overshoot=0.02, max_iters=50):
12 | """
13 |
14 | :param model:
15 | :param overshoot:
16 | :param max_iters:
17 | """
18 | super(DeepFoolAttack, self).__init__(model=model)
19 | self.model = model
20 |
21 | self.overshoot = overshoot
22 | self.max_iterations = max_iters
23 |
24 | def perturbation_single(self, sample, device):
25 | """
26 |
27 | :param sample:
28 | :param device:
29 | :return:
30 | """
31 | assert sample.shape[0] == 1, 'only perturbing one sample'
32 | copy_sample = np.copy(sample)
33 | var_sample = tensor2variable(torch.from_numpy(copy_sample), device=device, requires_grad=True).float()
34 |
35 | # self.model.eval()
36 | self.model.train()
37 | # print(var_sample.shape)
38 | if args.model == "MCLDNN":
39 | prediction = torch.unsqueeze(self.model(var_sample),dim=0)
40 | else:
41 | prediction = self.model(var_sample)
42 | original = torch.max(prediction, 1)[1]
43 | current = original
44 |
45 | # indices of predication in descending order
46 | I = np.argsort(prediction.data.cpu().numpy() * -1)
47 | perturbation_r_tot = np.zeros(copy_sample.shape, dtype=np.float32)
48 | iteration = 0
49 | while (original == current) and (iteration < self.max_iterations):
50 |
51 | # predication for the adversarial example in i-th iteration
52 | zero_gradients(var_sample)
53 | # self.model.eval()
54 | self.model.train()
55 | if args.model == "MCLDNN":
56 | f_kx = torch.unsqueeze(self.model(var_sample),dim=0)
57 | else:
58 | f_kx = self.model(var_sample)
59 | current = torch.max(f_kx, 1)[1]
60 | # gradient of the original example
61 | f_kx[0, I[0, 0]].backward(retain_graph=True)
62 | grad_original = np.copy(var_sample.grad.data.cpu().numpy())
63 |
64 | # calculate the w_k and f_k for every class label
65 | closest_dist = 1e10
66 | for k in range(1, 10):
67 | # gradient of adversarial example for k-th label
68 | zero_gradients(var_sample)
69 | f_kx[0, I[0, k]].backward(retain_graph=True)
70 | grad_current = var_sample.grad.data.cpu().numpy().copy()
71 | # update w_k and f_k
72 | w_k = grad_current - grad_original
73 | f_k = (f_kx[0, I[0, k]] - f_kx[0, I[0, 0]]).detach().data.cpu().numpy()
74 | # find the closest distance and the corresponding w_k
75 | dist_k = np.abs(f_k) / (np.linalg.norm(w_k.flatten()) + 1e-15)
76 | if dist_k < closest_dist:
77 | closest_dist = dist_k
78 | closest_w = w_k
79 |
80 | # accumulation of perturbation
81 | r_i = (closest_dist + 1e-4) * closest_w / np.linalg.norm(closest_w)
82 | perturbation_r_tot = perturbation_r_tot + r_i
83 |
84 | tmp_sample = np.clip((1 + self.overshoot) * perturbation_r_tot + sample, 0.0, 1.0)
85 | var_sample = tensor2variable(torch.from_numpy(tmp_sample), device=device, requires_grad=True)
86 |
87 | iteration += 1
88 |
89 | adv = np.clip(sample + (1 + self.overshoot) * perturbation_r_tot, 0.0, 1.0)
90 | return adv, perturbation_r_tot, iteration
91 |
92 | def perturbation(self, xs, device):
93 | """
94 |
95 | :param xs: batch of samples
96 | :param device:
97 | :return: batch of adversarial samples
98 | """
99 | from Attacks.AttackMethods.AttackUtils import predict
100 | print('\nThe DeepFool attack perturbs the samples one by one ......\n')
101 | adv_samples = []
102 | adv_labels_all = []
103 | for i in range(len(xs)):
104 | adv_image, _, _ = self.perturbation_single(sample=xs[i: i + 1], device=device)
105 | adv_samples.extend(adv_image)
106 |
107 | # adv_labels = predict(model=self.model, samples=adv_samples, device=device)
108 | # adv_labels = torch.max(adv_labels, 1)[1]
109 | # adv_labels = adv_labels.cpu().numpy()
110 | # adv_labels_all.extend(adv_labels)
111 | return np.array(adv_samples)
112 | # , np.array(adv_labels_all)
--------------------------------------------------------------------------------
/Attacks/AttackMethods/FGSM.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from Attacks.AttackMethods.AttackUtils import tensor2variable
7 | from Attacks.AttackMethods.attacks import Attack
8 |
9 |
10 | class FGSMAttack(Attack):
11 | def __init__(self, model=None, epsilon=None):
12 | """
13 | :param model:
14 | :param epsilon:
15 | """
16 | super(FGSMAttack, self).__init__(model)
17 | self.model = model
18 |
19 | self.epsilon = epsilon
20 |
21 | def perturbation(self, samples, ys, device):
22 | """
23 |
24 | :param samples:
25 | :param ys:
26 | :param device:
27 | :return:
28 | """
29 | copy_samples = np.copy(samples)
30 | self.model.train()
31 | var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
32 | var_ys = tensor2variable(torch.LongTensor(ys), device=device)
33 |
34 | # self.model.eval()
35 | preds = self.model(var_samples)
36 | loss_fun = torch.nn.CrossEntropyLoss()
37 | loss = loss_fun(preds, torch.max(var_ys, 1)[1])
38 |
39 | loss.backward()
40 | gradient_sign = var_samples.grad.data.cpu().sign().numpy()
41 |
42 | adv_samples = copy_samples + self.epsilon * gradient_sign
43 | # print(adv_samples,'grasign')
44 |
45 | adv_samples = np.clip(adv_samples, -1.0, 1.0)
46 | # print(adv_samples, '1111111111111111111')
47 | return adv_samples
48 |
49 | def batch_perturbation(self, xs, ys, batch_size, device):
50 | """
51 |
52 | :param xs:
53 | :param ys:
54 | :param batch_size:
55 | :param device:
56 | :return:
57 | """
58 | # assert len(xs) == len(ys), "The lengths of samples and its ys should be equal"
59 | #
60 | # adv_sample = []
61 | # number_batch = int(math.ceil(len(xs) / batch_size))
62 | # for index in range(number_batch):
63 | # start = index * batch_size
64 | # end = min((index + 1) * batch_size, len(xs))
65 | # print('\r===> in batch {:>2}, {:>4} ({:>4} in total) nature examples are perturbed ... '.format(index, end - start, end), end=' ')
66 | #
67 | # batch_adv_images = self.perturbation(xs[start:end], ys[start:end], device)
68 | # adv_sample.extend(batch_adv_images)
69 | # return np.array(adv_sample)
70 | from Attacks.AttackMethods.AttackUtils import predict
71 | assert len(xs) == len(ys), "The lengths of samples and its ys should be equal"
72 |
73 | adv_sample = []
74 | adv_labels_all = []
75 | number_batch = int(math.ceil(len(xs) / batch_size))
76 | for index in range(number_batch):
77 | start = index * batch_size
78 | end = min((index + 1) * batch_size, len(xs))
79 | print('\r===> in batch {:>2}, {:>4} ({:>4} in total) nature examples are perturbed ... '.format(index, end - start, end), end=' ')
80 |
81 | batch_adv_images = self.perturbation(xs[start:end], ys[start:end], device)
82 | adv_sample.extend(batch_adv_images)
83 |
84 | adv_labels = predict(model=self.model, samples=batch_adv_images, device=device)
85 | adv_labels = torch.max(adv_labels, 1)[1]
86 | adv_labels = adv_labels.cpu().numpy()
87 | # if adv_labels == ys[index]:
88 | adv_labels_all.extend(adv_labels)
89 |
90 | return np.array(adv_sample), np.array(adv_labels_all)
--------------------------------------------------------------------------------
/Attacks/AttackMethods/UMIFGSM.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from Attacks.AttackMethods.AttackUtils import tensor2variable
7 | from Attacks.AttackMethods.attacks import Attack
8 |
9 |
10 | class UMIFGSMAttack(Attack):
11 |
12 | def __init__(self, model=None, epsilon=None, eps_iter=None, num_steps=5, decay_factor=1.0):
13 | """
14 |
15 | :param model:
16 | :param epsilon:
17 | :param eps_iter:
18 | :param num_steps:
19 | """
20 | super(UMIFGSMAttack, self).__init__(model)
21 | self.model = model
22 |
23 | self.epsilon = epsilon
24 | self.epsilon_iter = eps_iter
25 | self.num_steps = num_steps
26 | self.decay_factor = decay_factor
27 |
28 | def perturbation(self, samples, ys, device):
29 | """
30 |
31 | :param samples:
32 | :param ys:
33 | :param device:
34 | :return:
35 | """
36 |
37 | copy_samples = np.copy(samples)
38 | momentum = 0
39 | self.model.train()
40 | for index in range(self.num_steps):
41 | var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
42 | var_ys = tensor2variable(torch.LongTensor(ys), device=device)
43 |
44 | # obtain the gradient
45 | # self.model.eval()
46 | preds = self.model(var_samples)
47 | loss_fun = torch.nn.CrossEntropyLoss()
48 | loss = loss_fun(preds, torch.max(var_ys, 1)[1])
49 | loss.backward()
50 | gradient = var_samples.grad.data.cpu().numpy()
51 |
52 | # update the momentum in the gradient direction
53 | momentum = self.decay_factor * momentum + gradient
54 | # update the (t+1) adversarial example
55 | copy_samples = copy_samples + self.epsilon_iter * np.sign(momentum)
56 | copy_samples = np.clip(copy_samples, samples - self.epsilon, samples + self.epsilon)
57 | copy_samples = np.clip(copy_samples, 0.0, 1.0)
58 |
59 | return copy_samples
60 |
61 | def batch_perturbation(self, xs, ys, batch_size, device):
62 | """
63 |
64 | :param xs:
65 | :param ys:
66 | :param batch_size:
67 | :param device:
68 | :return:
69 | """
70 | assert len(xs) == len(ys), "The lengths of samples and its ys should be equal"
71 | from Attacks.AttackMethods.AttackUtils import predict
72 |
73 | adv_sample = []
74 | adv_sample_all = []
75 | number_batch = int(math.ceil(len(xs) / batch_size))
76 | for index in range(number_batch):
77 | start = index * batch_size
78 | end = min((index + 1) * batch_size, len(xs))
79 | print('\r===> in batch {:>2}, {:>4} ({:>4} in total) nature examples are perturbed ... '.format(index, end - start, end), end=' ')
80 |
81 | batch_adv_images = self.perturbation(xs[start:end], ys[start:end], device)
82 |
83 | adv_sample.extend(batch_adv_images)
84 | adv_labels = predict(model=self.model, samples=batch_adv_images, device=device)
85 | adv_labels = torch.max(adv_labels, 1)[1]
86 | adv_labels = adv_labels.cpu().numpy()
87 | adv_sample_all.extend(adv_labels)
88 | return np.array(adv_sample), np.array(adv_sample_all)
89 |
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__init__.py
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/AttackUtils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/AttackUtils.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/DEEPFOOL.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/DEEPFOOL.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/FGSM.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/FGSM.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/PGD.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/PGD.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/__pycache__/attacks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/AttackMethods/__pycache__/attacks.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/AttackMethods/attacks.py:
--------------------------------------------------------------------------------
1 | from abc import ABCMeta
2 | from abc import abstractmethod
3 |
4 |
5 | class Attack(object):
6 | __metaclass__ = ABCMeta
7 |
8 | def __init__(self, model):
9 | self.model = model
10 |
11 | @abstractmethod
12 | def perturbation(self):
13 | print("Abstract Method of Attacks is not implemented")
14 | raise NotImplementedError
15 |
--------------------------------------------------------------------------------
/Attacks/AutoAttack.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import pathlib
4 | from pathlib import Path
5 | import warnings
6 |
7 | import numpy as np
8 | import torch
9 | import torch.nn as nn
10 | import torchvision.datasets as datasets
11 | import torch.utils.data as data
12 | import torchvision.transforms as transforms
13 |
14 | import sys
15 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
16 |
17 | from models.network import define_model
18 | from utils import load_pretrained_model
19 | from sklearn.preprocessing import OneHotEncoder
20 | from args import args
21 |
22 | # sys.path.insert(0, '..')
23 |
24 |
25 | # write results to csv
26 | def write_result_to_csv(**kwargs):
27 | results = pathlib.Path("../AdversarialExampleDatasets") / "results.csv"
28 |
29 | if not results.exists():
30 | results.write_text(
31 | "MODEL, "
32 | "DATA, "
33 | "SNR, "
34 | "NOTE, "
35 | "ATTACK, "
36 | "#ADV-DATA, "
37 | "Robust-ACC\n "
38 | )
39 |
40 | with open(results, "a+") as f: # a+附加读写方式打开
41 | f.write(
42 | ("{model}, "
43 | "{dataset}, "
44 | "{snr}, "
45 | "{note}, "
46 | "{attack}, "
47 | "{number}, "
48 | "{robustacc}\n"
49 | ).format(**kwargs)
50 | )
51 |
52 |
53 | if __name__ == '__main__':
54 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
55 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
56 | print("CUDA:", args.gpu_index)
57 |
58 | # load model
59 | # raw_model_location = "/home/zjut/public/signal/wzw/KD/DefenseEnhancedModels/PAT/128_CNN1Donly_eps0.06/model_best.pth_epoch199.tar"
60 | # raw_model_location = "/home/zjut/public/signal/wzw/KD/results/Student_model/CNN1D_AMD_200.tar"
61 | raw_model_location = args.location
62 | raw_model = define_model(name=args.model)
63 | # print(raw_model.state_dict().keys())
64 | # raw_model.load_state_dict(torch.load(raw_model_location,map_location=f"cuda:{args.gpu_index }"))#["net"]
65 | checkpoint = torch.load(raw_model_location, map_location='cuda:{}'.format(args.gpu_index))
66 | load_pretrained_model(raw_model, checkpoint['net'])
67 | # load_pretrained_model(raw_model, checkpoint)
68 |
69 | raw_model.eval()
70 |
71 | # load data
72 | transform_list = [transforms.ToTensor()]
73 | transform_chain = transforms.Compose(transform_list)
74 |
75 | # item = datasets.CIFAR10(root=args.data_dir, train=False, transform=transform_chain, download=True)
76 | # test_loader = data.DataLoader(item, batch_size=1000, shuffle=False, num_workers=0)
77 |
78 | # test_loader = get_testloader(batch_size=128, shuffle=False, num_worker=1)
79 |
80 | # create save dir
81 | if not os.path.exists(args.save_dir):
82 | os.makedirs(args.save_dir)
83 |
84 | # load attack
85 | from autoattack import AutoAttack
86 |
87 | adversary = AutoAttack(raw_model, norm=args.norm, eps=args.epsilon, log_path=args.log_path,
88 | version=args.version, device=device)
89 |
90 | x_test = np.load('/home/zjut/public/signal/wzw/KD/CleanDatasets/{}_{}/128/128_inputs.npy'.format(args.model, args.note))
91 | y_test = np.load('/home/zjut/public/signal/wzw/KD/CleanDatasets/{}_{}/128/128_labels.npy'.format(args.model, args.note))
92 | y_test = np.argmax(y_test, axis=1)
93 |
94 | x_test = torch.tensor(x_test).to(device)
95 | y_test = torch.tensor(y_test).to(device)
96 |
97 | # example of custom version
98 | if args.version == 'custom':
99 | adversary.attacks_to_run = ['apgd-ce', 'fab']
100 | adversary.apgd.n_restarts = 2
101 | adversary.fab.n_restarts = 2
102 |
103 | print("AA eps: ", args.epsilon)
104 | # run attack and save images
105 | with torch.no_grad():
106 | if not args.individual:
107 | adv_complete, robust_accuracy = adversary.run_standard_evaluation(x_test[:args.n_ex], y_test[:args.n_ex],
108 | bs=args.batch_size, state_path=args.state_path)
109 |
110 | torch.save({'adv_complete': adv_complete}, '{}/{}_{}_1_{}_eps_{:.3f}.pth'.format(
111 | args.save_dir, 'aa', args.version, adv_complete.shape[0], args.epsilon))
112 |
113 | else:
114 | # individual version, each attack is run on all test points
115 | adv_complete = adversary.run_standard_evaluation_individual(x_test[:args.n_ex],
116 | y_test[:args.n_ex], bs=args.batch_size)
117 |
118 | torch.save(adv_complete, '{}/{}_{}_individual_1_{}_eps_{:.5f}_plus_{}_cheap_{}.pth'.format(
119 | args.save_dir, 'aa', args.version, args.n_ex, args.epsilon))
120 |
121 | write_result_to_csv(
122 | model=args.model,
123 | dataset=args.dataset,
124 | snr=args.db,
125 | note=args.note,
126 | attack="AA",
127 | number=len,
128 | robustacc=robust_accuracy*100
129 | )
130 |
131 |
--------------------------------------------------------------------------------
/Attacks/BIM_Generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 | from Attacks.Generation import Generation
11 | from Attacks.AttackMethods.BIM import BIMAttack
12 | from Attacks.AttackMethods.AttackUtils import predict
13 | from args import args
14 | from Generation import write_result_to_csv
15 |
16 | class BIMGeneration(Generation):
17 |
18 | def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device,
19 | attack_batch_size, eps, eps_iter, num_steps):
20 | super(BIMGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
21 | self.attack_batch_size = attack_batch_size
22 |
23 | self.epsilon = eps
24 | self.epsilon_iter = eps_iter
25 | self.num_steps = num_steps
26 |
27 | def generate(self):
28 | attacker = BIMAttack(model=self.raw_model, epsilon=self.epsilon, eps_iter=self.epsilon_iter, num_steps=self.num_steps)
29 | adv_samples, adv_labels = attacker.batch_perturbation(xs=self.nature_samples, ys=self.labels_samples, batch_size=self.attack_batch_size,
30 | device=self.device)
31 |
32 | np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
33 | np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
34 | np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
35 |
36 | mis = 0
37 | for i in range(len(adv_samples)):
38 | if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
39 | mis = mis + 1
40 | print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
41 | mis / len(adv_labels) * 100))
42 | return mis / len(adv_labels) * 100 , len(adv_labels)
43 |
44 |
45 | def main():
46 | # Device configuration
47 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
48 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
49 | print("CUDA:", args.gpu_index)
50 | # Set the random seed manually for reproducibility.
51 | torch.backends.cudnn.deterministic = True
52 | torch.backends.cudnn.benchmark = False
53 | torch.manual_seed(args.seed)
54 | if torch.cuda.is_available():
55 | torch.cuda.manual_seed(args.seed)
56 | np.random.seed(args.seed)
57 | random.seed(args.seed)
58 |
59 | name = 'BIM'
60 | targeted = False
61 |
62 | bim = BIMGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
63 | clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device, eps=args.epsilon,
64 | attack_batch_size=args.attack_batch_size, eps_iter=args.epsilon_iter, num_steps=args.num_steps)
65 | mr, len = bim.generate()
66 |
67 | write_result_to_csv(
68 | model = args.model,
69 | dataset = args.dataset,
70 | attack = name,
71 | number = len,
72 | mis = mr
73 | )
74 |
75 |
76 | if __name__ == '__main__':
77 | main()
78 |
--------------------------------------------------------------------------------
/Attacks/DeepFool_Generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 |
11 | from Attacks.AttackMethods.AttackUtils import predict
12 | from Attacks.AttackMethods.DEEPFOOL import DeepFoolAttack
13 | from Attacks.Generation import Generation
14 | from args import args
15 | from Generation import write_result_to_csv
16 |
17 | class DeepFoolGeneration(Generation):
18 | def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, overshoot, max_iters):
19 | super(DeepFoolGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir,
20 | device)
21 |
22 | self.overshoot = overshoot
23 | self.max_iters = max_iters
24 |
25 | def generate(self):
26 | attacker = DeepFoolAttack(model=self.raw_model, overshoot=self.overshoot, max_iters=self.max_iters)
27 | adv_samples= attacker.perturbation(xs=self.nature_samples, device=self.device)
28 | # prediction for the adversarial examples , adv_labels
29 | adv_labels = predict(model=self.raw_model, samples=adv_samples, device=self.device)
30 | adv_labels = torch.max(adv_labels, 1)[1]
31 | adv_labels = adv_labels.cpu().numpy()
32 |
33 | np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
34 | np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
35 | np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
36 |
37 | mis = 0
38 | for i in range(len(adv_samples)):
39 | if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
40 | mis = mis + 1
41 | print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
42 | mis / len(adv_labels) * 100))
43 | print('\nFor **{}** on **{}**: adv ACC is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset,
44 | int(len(adv_samples) - mis), len(adv_samples),
45 | int(len(adv_samples) - mis) / len(
46 | adv_labels) * 100))
47 | return mis / len(adv_labels) * 100, len(adv_labels)
48 |
49 | def main():
50 | # Device configuration
51 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
52 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
53 | print("CUDA:", args.gpu_index)
54 | # Set the random seed manually for reproducibility.
55 | torch.backends.cudnn.deterministic = True
56 | torch.backends.cudnn.benchmark = False
57 | torch.manual_seed(args.seed)
58 | if torch.cuda.is_available():
59 | torch.cuda.manual_seed(args.seed)
60 | np.random.seed(args.seed)
61 | random.seed(args.seed)
62 |
63 | name = 'DeepFool'
64 | targeted = False
65 |
66 | df = DeepFoolGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
67 | clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device, max_iters=args.max_iters,
68 | overshoot=args.overshoot)
69 | mr, len = df.generate()
70 | write_result_to_csv(
71 | model=args.model,
72 | dataset=args.dataset,
73 | snr=args.db,
74 | note=args.note,
75 | attack=name,
76 | number=len,
77 | robustacc = 100-mr
78 | )
79 |
80 | if __name__ == '__main__':
81 | main()
82 |
--------------------------------------------------------------------------------
/Attacks/FGSM_Generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 | from Attacks.AttackMethods.AttackUtils import predict
11 | from Attacks.AttackMethods.FGSM import FGSMAttack
12 | from Attacks.Generation import Generation
13 | from args import args
14 | from Generation import write_result_to_csv
15 |
16 |
17 | class FGSMGeneration(Generation):
18 |
19 | def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, eps,
20 | attack_batch_size):
21 | super(FGSMGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
22 | self.attack_batch_size = attack_batch_size
23 |
24 | self.epsilon = eps
25 |
26 | def generate(self):
27 | attacker = FGSMAttack(model=self.raw_model, epsilon=self.epsilon)
28 |
29 | # generating
30 | adv_samples, adv_labels = attacker.batch_perturbation(xs=self.nature_samples, ys=self.labels_samples, batch_size=self.attack_batch_size,
31 | device=self.device)
32 |
33 | np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
34 | np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
35 | np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
36 |
37 | mis = 0
38 | for i in range(len(adv_samples)):
39 | if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
40 | mis = mis + 1
41 | print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
42 | mis / len(adv_labels) * 100))
43 | print('\nFor **{}** on **{}**: adv ACC is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset,
44 | int(len(adv_samples)-mis), len(adv_samples),
45 | int(len(adv_samples)-mis) / len(adv_labels) * 100))
46 | return mis / len(adv_labels) * 100, len(adv_labels)
47 |
48 | def main():
49 | # Device configuration
50 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
51 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
52 | print("CUDA:", args.gpu_index)
53 | # Set the random seed manually for reproducibility.
54 | torch.backends.cudnn.deterministic = True
55 | torch.backends.cudnn.benchmark = False
56 | torch.manual_seed(args.seed)
57 | if torch.cuda.is_available():
58 | torch.cuda.manual_seed(args.seed)
59 | np.random.seed(args.seed)
60 | random.seed(args.seed)
61 |
62 | name = 'FGSM'
63 | targeted = False
64 | fgsm = FGSMGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
65 | clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device, eps=args.epsilon,
66 | attack_batch_size=args.attack_batch_size)
67 | mr, len = fgsm.generate()
68 |
69 | write_result_to_csv(
70 | model=args.model,
71 | dataset=args.dataset,
72 | snr=args.db,
73 | note=args.note,
74 | attack=name,
75 | number=len,
76 | robustacc=100-mr
77 | )
78 |
79 |
80 | if __name__ == '__main__':
81 | main()
82 |
--------------------------------------------------------------------------------
/Attacks/Generation.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | from abc import ABCMeta
4 |
5 | import numpy as np
6 | import torch
7 | import pathlib
8 | from args import args
9 | from models.network import define_model
10 | from utils import load_pretrained_model, save_checkpoint
11 |
12 |
13 | class Generation(object):
14 | __metaclass__ = ABCMeta
15 |
16 | def __init__(self, dataset='128', attack_name='FGSM', targeted=False, raw_model_location='../RawModels/',
17 | clean_data_location='../CleanDatasets/', adv_examples_dir='../AdversarialExampleDatasets/',
18 | device=torch.device('cpu')):
19 | # check and set the support data set
20 | self.dataset = dataset.upper()
21 | if self.dataset not in {'128', '512', '1024', '3040'}:
22 | raise ValueError("The data set must be 128 or 512 or 1024 or 3040 ")
23 |
24 | # check and set the supported attackS
25 | self.model = args.model
26 | self.attack_name = attack_name.upper()
27 | supported = {'FGSM', 'RFGSM', 'BIM', 'PGD', 'UMIFGSM', 'UAP', 'DEEPFOOL', 'OM', 'LLC', "RLLC", 'ILLC',
28 | 'TMIFGSM', 'JSMA', 'BLB', 'CW2',
29 | 'EAD'}
30 | if self.attack_name not in supported:
31 | raise ValueError(
32 | self.attack_name + 'is unknown!\nCurrently, our implementation support the attacks: ' + ', '.join(
33 | supported))
34 |
35 | # load the raw model
36 | # raw_model_location = "/home/zjut/public/signal/wzw/KD/DefenseEnhancedModels/PAT/1024_CNN1Donly_eps0.06/checkpoint.pth_199.tar"
37 | raw_model_location = args.location
38 | if dataset == '128' or '512' or '1024' or '3040':
39 | self.raw_model = define_model(name=args.model)
40 | # self.raw_model.load_state_dict(torch.load(raw_model_location,map_location=f"cuda:{args.gpu_index }"))
41 |
42 | checkpoint = torch.load(raw_model_location, map_location='cuda:{}'.format(args.gpu_index))
43 | # load_pretrained_model(self.raw_model, checkpoint) # ['net']
44 | load_pretrained_model(self.raw_model, checkpoint['net']) #
45 |
46 | else:
47 | print("Data error")
48 |
49 | # get the clean data sets / true_labels / targets (if the attack is one of the targeted attacks)
50 | print(
51 | 'Loading the prepared clean samples (nature inputs and corresponding labels) that will be attacked ...... ')
52 | self.nature_samples = np.load(
53 | '{}{}/{}/{}_inputs.npy'.format(clean_data_location, self.model, self.dataset, self.dataset))
54 | self.labels_samples = np.load(
55 | '{}{}/{}/{}_labels.npy'.format(clean_data_location, self.model, self.dataset, self.dataset))
56 |
57 | # self.nature_samples = np.load("/home/zjut/public/signal/wzw/KD/CleanDatasets/CNN1D_AMD/128/128_inputs.npy")
58 | # self.labels_samples = np.load("/home/zjut/public/signal/wzw/KD/CleanDatasets/CNN1D_AMD/128/128_labels.npy")
59 |
60 | if targeted:
61 | print('For Targeted Attacks, loading the randomly selected targeted labels that will be attacked ......')
62 | if self.attack_name.upper() in ['LLC', 'RLLC', 'ILLC']:
63 | print('#### Especially, for LLC, RLLC, ILLC, loading the least likely class that will be attacked')
64 | self.targets_samples = np.load(
65 | '{}{}/{}/{}_llc.npy'.format(clean_data_location, self.model, self.dataset, self.dataset))
66 | else:
67 | self.targets_samples = np.load(
68 | '{}{}/{}/{}_targets.npy'.format(clean_data_location, self.model, self.dataset, self.dataset))
69 |
70 | # prepare the directory for the attacker to save their generated adversarial examples
71 | self.adv_examples_dir = adv_examples_dir + self.model + '/' + self.dataset + '/' + self.attack_name + '/'
72 | if self.model not in os.listdir(adv_examples_dir):
73 | os.mkdir(adv_examples_dir + self.model + '/')
74 |
75 | if self.dataset not in os.listdir(adv_examples_dir + self.model + '/'):
76 | os.mkdir(adv_examples_dir + self.model + '/' + self.dataset + '/')
77 |
78 | if self.attack_name not in os.listdir(adv_examples_dir + self.model + '/' + self.dataset + '/'):
79 | os.mkdir(adv_examples_dir + self.model + '/' + self.dataset + '/' + self.attack_name + '/')
80 |
81 | else:
82 | shutil.rmtree('{}'.format(self.adv_examples_dir))
83 | os.mkdir(self.adv_examples_dir)
84 |
85 | # set up device
86 | self.device = device
87 |
88 | # write_result_to_csv(
89 | # model = args.model,
90 | # dataset = args.dataset,
91 | # number = args.number,
92 | # mis = BIMGeneration
93 | # )
94 |
95 | def generate(self):
96 | print("abstract method of Generation is not implemented")
97 | raise NotImplementedError
98 |
99 |
100 | # def calculateSNR()
101 |
102 |
103 | # write results to csv
104 | def write_result_to_csv(**kwargs):
105 | results = pathlib.Path("../AdversarialExampleDatasets") / "results.csv"
106 |
107 | if not results.exists():
108 | results.write_text(
109 | "MODEL, "
110 | "DATA, "
111 | "SNR, "
112 | "NOTE, "
113 | "ATTACK, "
114 | "#ADV-DATA, "
115 | "Robust-ACC\n "
116 | )
117 |
118 | with open(results, "a+") as f: # a+附加读写方式打开
119 | f.write(
120 | ("{model}, "
121 | "{dataset}, "
122 | "{snr}, "
123 | "{note}, "
124 | "{attack}, "
125 | "{number}, "
126 | "{robustacc}\n"
127 | ).format(**kwargs)
128 | )
129 |
--------------------------------------------------------------------------------
/Attacks/PGD_Generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 | from Attacks.Generation import Generation
11 | from Attacks.AttackMethods.PGD import PGDAttack
12 | from Attacks.AttackMethods.AttackUtils import predict
13 | from args import args
14 | from Generation import write_result_to_csv
15 |
16 | class PGDGeneration(Generation):
17 |
18 | def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, attack_batch_size, eps,
19 | eps_iter, num_steps):
20 | super(PGDGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
21 | self.attack_batch_size = attack_batch_size
22 |
23 | self.epsilon = eps
24 | self.epsilon_iter = eps_iter
25 | self.num_steps = num_steps
26 |
27 | def generate(self):
28 | attacker = PGDAttack(model=self.raw_model, epsilon=self.epsilon, eps_iter=self.epsilon_iter, num_steps=self.num_steps)
29 | adv_samples, adv_labels = attacker.batch_perturbation(xs=self.nature_samples, ys=self.labels_samples,
30 | batch_size=self.attack_batch_size, device=self.device)
31 |
32 | np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
33 | np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
34 | np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
35 |
36 | mis = 0
37 | for i in range(len(adv_samples)):
38 | if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
39 | mis = mis + 1
40 | print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
41 | mis / len(adv_labels) * 100))
42 | print('\nFor **{}** on **{}**: adv ACC is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset,
43 | int(len(adv_samples) - mis), len(adv_samples),
44 | int(len(adv_samples) - mis) / len(
45 | adv_labels) * 100))
46 | return mis / len(adv_labels) * 100, len(adv_labels)
47 |
48 | def main():
49 | # Device configuration
50 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
51 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
52 | print("CUDA:", args.gpu_index)
53 | # Set the random seed manually for reproducibility.
54 | torch.backends.cudnn.deterministic = True
55 | torch.backends.cudnn.benchmark = False
56 | torch.manual_seed(args.seed)
57 | if torch.cuda.is_available():
58 | torch.cuda.manual_seed(args.seed)
59 | np.random.seed(args.seed)
60 | random.seed(args.seed)
61 |
62 | name = 'PGD'
63 | targeted = False
64 |
65 | pgd = PGDGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
66 | clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device,
67 | eps=args.epsilon, attack_batch_size=args.attack_batch_size, eps_iter=args.epsilon_iter,
68 | num_steps=args.num_steps)
69 | mr, len = pgd.generate()
70 |
71 | write_result_to_csv(
72 | model=args.model,
73 | dataset=args.dataset,
74 | snr=args.db,
75 | note=args.note,
76 | attack=name,
77 | number=len,
78 | robustacc=100-mr
79 | )
80 |
81 |
82 | if __name__ == '__main__':
83 | main()
84 |
85 |
--------------------------------------------------------------------------------
/Attacks/README.md:
--------------------------------------------------------------------------------
1 | # Attacks
2 |
3 | Implementations of adversarial attack algorithms that are used to generate adversarial examples.
4 |
5 | ## Description
6 |
7 | For each attack, we first define and implement the attack class (e.g., **`FGSMAttack`** within **`FGSM.py`** for the FGSM attack) in **`Attacks/AttackMethods/`** folder, then we implement the testing code (e.g., **`FGSM_Generation.py`**) to generate corresponding adversarial examples and save them into the directory of [`AdversarialExampleDatasets/`](../AdversarialExampleDatasets/). Therefore, you can generate any adversarial examples you want by specifying their parameters accordingly.
8 |
9 | ## Implemented Attacks
10 | Here, we implement 16 state-of-the-art adversarial attacks, including 8 un-targeted attack and 8 targeted attack.
11 |
12 | - [x] **FGSM**: *I. J. Goodfellow, et al., "Explaining and harnessing adversarial examples," in ICLR, 2015.*
13 | - [x] **R+FGSM**: *F. Tram`er, et al., "Ensemble adversarial training: Attacks and defenses," in ICLR, 2018.*
14 | - [x] **BIM**: *A. Kurakin, et al., "Adversarial examples in the physical world," in ICLR, 2017.*
15 | - [x] **PGD**: *A. Madry, et al., "Towards deep learning models resistant to adversarial attacks," in ICLR, 2018.*
16 | - [x] **U-MI-FGSM**: *Y. Dong, et al., "Boosting adversarial attacks with momentum," arXiv:1710.06081, 2017.*
17 | - [x] **DF**: *S.-M. Moosavi-Dezfooli, et al., "Deepfool: A simple and accurate method to fool deep neural networks," in CVPR, 2016.*
18 | - [x] **UAP**: *S.-M. Moosavi-Dezfooli, et al., "Universal adversarial perturbations," in CVPR, 2017.*
19 | - [x] **OM**: *W. He, et al., "Decision boundary analysis of adversarial examples," in ICLR, 2018.*
20 |
21 | - [x] **LLC**: *A. Kurakin, et al., "Adversarial examples in the physical world," in ICLR, 2017.*
22 | - [x] **R+LLC**: *F. Tram`er, et al., "Ensemble adversarial training: Attacks and defenses," in ICLR, 2018.*
23 | - [x] **ILLC**: *A. Kurakin, et al., "Adversarial examples in the physical world," in ICLR, 2017.*
24 | - [x] **T-MI-FGSM**: *Y. Dong, et al., "Boosting adversarial attacks with momentum," arXiv:1710.06081, 2017.*
25 | - [x] **BLB**: *C. Szegedy, et al., "Intriguing properties of neural networks," in ICLR, 2014.*
26 | - [x] **JSMA**: *N. Papernot, et al., "The limitations of deep learning in adversarial settings," in EuroS&P, 2016.*
27 | - [x] **CW2**: *N. Carlini and D. Wagner, "Towards evaluating the robustness of neural networks," in S&P, 2017.*
28 | - [x] **EAD**: *P. Chen, et al., "EAD: elastic-net attacks to deep neural networks via adversarial examples," in AAAI, 2018.*
29 |
30 |
31 | ## Usage
32 |
33 | Generation of adversarial examples with specific attacking parameters that we used in our evaluation.
34 |
35 | | Attacks | Commands with default parameters |
36 | |:-----------:|--------------------------------- |
37 | | **FGSM** | python FGSM_Generation.py --dataset=MNIST --epsilon=0.3
38 | python FGSM_Generation.py --dataset=128 --epsilon=0.1|
39 | | **RFGSM** | python RFGSM_Generation.py --dataset=MNIST --epsilon=0.3 --alpha=0.5
python RFGSM_Generation.py --dataset=CIFAR10 --epsilon=0.1 --alpha=0.5 |
40 | | **BIM** | python BIM_Generation.py --dataset=MNIST --epsilon=0.3 --epsilon_iter=0.05 --num_steps=15
41 | python BIM_Generation.py --dataset=CIFAR10 --epsilon=0.1 --epsilon_iter=0.01 --num_steps=15 |
42 | | **PGD** | python PGD_Generation.py --dataset=MNIST --epsilon=0.3 --epsilon_iter=0.05
43 | python PGD_Generation.py --dataset=128 --epsilon=0.1 --epsilon_iter=0.01 |
44 | | **UMIFGSM** | python UMIFGSM_Generation.py --dataset=MNIST --epsilon=0.3 --epsilon_iter=0.05
45 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.1 --epsilon_iter=0.01 |
46 | | **UAP** | python UAP_Generation.py --dataset=MNIST --fool_rate=0.35 --epsilon=0.3
47 | python UAP_Generation.py --dataset=128 --fool_rate=0.9 --epsilon=0.1 |
48 | | **DeepFool**| python DeepFool_Generation.py --dataset=MNIST --max_iters=50 --overshoot=0.02
49 | python DeepFool_Generation.py --dataset=128 --max_iters=50 --overshoot=0.02 |
50 | | **OM** | python OM_Generation.py --dataset=MNIST --initial_const=0.02 --learning_rate=0.2 --noise_count=20 --noise_mag=0.3
python OM_Generation.py --dataset=CIFAR10 --initial_const=1 --learning_rate=0.02 --noise_count=20 --noise_mag=0.03137255 |
51 | | **LLC** | python LLC_Generation.py --dataset=MNIST --epsilon=0.3
python LLC_Generation.py --dataset=CIFAR10 --epsilon=0.1 |
52 | | **RLLC** | python RLLC_Generation.py --dataset=MNIST --epsilon=0.3 --alpha=0.5
python RLLC_Generation.py --dataset=CIFAR10 --epsilon=0.1 --alpha=0.5 |
53 | | **ILLC** | python ILLC_Generation.py --dataset=MNIST --epsilon=0.3 --epsilon_iter=0.05
python ILLC_Generation.py --dataset=CIFAR10 --epsilon=0.1 --epsilon_iter=0.01 |
54 | | **TMIFGSM** | python TMIFGSM_Generation.py --dataset=MNIST --epsilon=0.3 --epsilon_iter=0.05
55 | python TMIFGSM_Generation.py --dataset=128 --epsilon=0.1 --epsilon_iter=0.01 |
56 | | **JSMA** | python JSMA_Generation.py --dataset=MNIST --theta=1.0 --gamma=0.1
57 | python JSMA_Generation.py --dataset=128 --theta=1.0 --gamma=0.1 |
58 | | **BLB** | python BLB_Generation.py --dataset=MNIST
python BLB_Generation.py --dataset=CIFAR10 |
59 | | **CW2** | python CW2_Generation.py --dataset=MNIST --confidence=0 --initial_const=0.001
60 | python CW2_Generation.py --dataset=128 --confidence=0 --initial_const=0.001 |
61 | | **EAD** | python EAD_Generation.py --dataset=MNIST --confidence=0 --beta=0.001 --EN=True
python EAD_Generation.py --dataset=CIFAR10 --confidence=0 --beta=0.001 --EN=True |
62 |
--------------------------------------------------------------------------------
/Attacks/UMIFGSM_Generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 | from Attacks.Generation import Generation
11 | from Attacks.AttackMethods.UMIFGSM import UMIFGSMAttack
12 | from Attacks.AttackMethods.AttackUtils import predict
13 | from args import args
14 | from Generation import write_result_to_csv
15 |
16 |
17 | class UMIFGSMGeneration(Generation):
18 |
19 | def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, attack_batch_size, eps,
20 | eps_iter, num_steps, decay_factor):
21 | super(UMIFGSMGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir,
22 | device)
23 | self.attack_batch_size = attack_batch_size
24 |
25 | self.epsilon = eps
26 | self.epsilon_iter = eps_iter
27 | self.num_steps = num_steps
28 | self.decay_factor = decay_factor
29 |
30 | def generate(self):
31 | attacker = UMIFGSMAttack(model=self.raw_model, epsilon=self.epsilon, eps_iter=self.epsilon_iter, num_steps=self.num_steps)
32 | adv_samples, adv_labels = attacker.batch_perturbation(xs=self.nature_samples, ys=self.labels_samples,
33 | batch_size=self.attack_batch_size, device=self.device)
34 | # prediction for the adversarial examples
35 | # adv_labels = predict(model=self.raw_model, samples=adv_samples, device=self.device)
36 | # adv_labels = torch.max(adv_labels, 1)[1]
37 | # adv_labels = adv_labels.cpu().numpy()
38 |
39 | np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
40 | np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
41 | np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
42 |
43 | mis = 0
44 | for i in range(len(adv_samples)):
45 | if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
46 | mis = mis + 1
47 | print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
48 | mis / len(adv_labels) * 100))
49 | print('\nFor **{}** on **{}**: adv ACC is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset,
50 | int(len(adv_samples) - mis), len(adv_samples),
51 | int(len(adv_samples) - mis) / len(
52 | adv_labels) * 100))
53 | return mis / len(adv_labels) * 100 , len(adv_labels)
54 |
55 | def main():
56 | # Device configuration
57 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
58 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
59 | print("CUDA:", args.gpu_index)
60 | # Set the random seed manually for reproducibility.
61 | torch.backends.cudnn.deterministic = True
62 | torch.backends.cudnn.benchmark = False
63 | torch.manual_seed(args.seed)
64 | if torch.cuda.is_available():
65 | torch.cuda.manual_seed(args.seed)
66 | np.random.seed(args.seed)
67 | random.seed(args.seed)
68 |
69 | name = 'UMIFGSM'
70 | targeted = False
71 |
72 | umifgsm = UMIFGSMGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
73 | clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device,
74 | eps=args.epsilon, attack_batch_size=args.attack_batch_size, eps_iter=args.epsilon_iter,
75 | num_steps=args.num_steps, decay_factor=args.decay_factor)
76 | mr, len = umifgsm.generate()
77 | write_result_to_csv(
78 | model=args.model,
79 | dataset=args.dataset,
80 | snr=args.db,
81 | note=args.note,
82 | attack=name,
83 | number=len,
84 | robustacc=100-mr
85 | )
86 |
87 |
88 | if __name__ == '__main__':
89 | main()
90 |
--------------------------------------------------------------------------------
/Attacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/__init__.py
--------------------------------------------------------------------------------
/Attacks/__pycache__/Generation.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/__pycache__/Generation.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Attacks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/Attacks/autoattack/__init__.py:
--------------------------------------------------------------------------------
1 | from .autoattack import AutoAttack
2 |
--------------------------------------------------------------------------------
/Attacks/autoattack/checks.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import warnings
3 | import math
4 | import sys
5 |
6 | from autoattack.other_utils import L2_norm
7 |
8 |
9 | funcs = {'grad': 0,
10 | 'backward': 0,
11 | #'enable_grad': 0
12 | '_make_grads': 0,
13 | }
14 |
15 | checks_doc_path = 'flags_doc.md'
16 |
17 |
18 | def check_randomized(model, x, y, bs=250, n=5, alpha=1e-4, logger=None):
19 | acc = []
20 | corrcl = []
21 | outputs = []
22 | with torch.no_grad():
23 | for _ in range(n):
24 | output = model(x)
25 | corrcl_curr = (output.max(1)[1] == y).sum().item()
26 | corrcl.append(corrcl_curr)
27 | outputs.append(output / (L2_norm(output, keepdim=True) + 1e-10))
28 | acc = [c != corrcl_curr for c in corrcl]
29 | max_diff = 0.
30 | for c in range(n - 1):
31 | for e in range(c + 1, n):
32 | diff = L2_norm(outputs[c] - outputs[e])
33 | max_diff = max(max_diff, diff.max().item())
34 | #print(diff.max().item(), max_diff)
35 | if any(acc) or max_diff > alpha:
36 | msg = 'it seems to be a randomized defense! Please use version="rand".' + \
37 | f' See {checks_doc_path} for details.'
38 | if logger is None:
39 | warnings.warn(Warning(msg))
40 | else:
41 | logger.log(f'Warning: {msg}')
42 |
43 |
44 | def check_range_output(model, x, alpha=1e-5, logger=None):
45 | with torch.no_grad():
46 | output = model(x)
47 | fl = [output.max() < 1. + alpha, output.min() > -alpha,
48 | ((output.sum(-1) - 1.).abs() < alpha).all()]
49 | if all(fl):
50 | msg = 'it seems that the output is a probability distribution,' +\
51 | ' please be sure that the logits are used!' + \
52 | f' See {checks_doc_path} for details.'
53 | if logger is None:
54 | warnings.warn(Warning(msg))
55 | else:
56 | logger.log(f'Warning: {msg}')
57 | return output.shape[-1]
58 |
59 |
60 | def check_zero_gradients(grad, logger=None):
61 | z = grad.view(grad.shape[0], -1).abs().sum(-1)
62 | #print(grad[0, :10])
63 | if (z == 0).any():
64 | msg = f'there are {(z == 0).sum()} points with zero gradient!' + \
65 | ' This might lead to unreliable evaluation with gradient-based attacks.' + \
66 | f' See {checks_doc_path} for details.'
67 | if logger is None:
68 | warnings.warn(Warning(msg))
69 | else:
70 | logger.log(f'Warning: {msg}')
71 |
72 |
73 | def check_square_sr(acc_dict, alpha=.002, logger=None):
74 | if 'square' in acc_dict.keys() and len(acc_dict) > 2:
75 | acc = min([v for k, v in acc_dict.items() if k != 'square'])
76 | if acc_dict['square'] < acc - alpha:
77 | msg = 'Square Attack has decreased the robust accuracy of' + \
78 | f' {acc - acc_dict["square"]:.2%}.' + \
79 | ' This might indicate that the robustness evaluation using' +\
80 | ' AutoAttack is unreliable. Consider running Square' +\
81 | ' Attack with more iterations and restarts or an adaptive attack.' + \
82 | f' See {checks_doc_path} for details.'
83 | if logger is None:
84 | warnings.warn(Warning(msg))
85 | else:
86 | logger.log(f'Warning: {msg}')
87 |
88 |
89 | ''' from https://stackoverflow.com/questions/26119521/counting-function-calls-python '''
90 | def tracefunc(frame, event, args):
91 | if event == 'call' and frame.f_code.co_name in funcs.keys():
92 | funcs[frame.f_code.co_name] += 1
93 |
94 |
95 | def check_dynamic(model, x, is_tf_model=False, logger=None):
96 | if is_tf_model:
97 | msg = 'the check for dynamic defenses is not currently supported'
98 | else:
99 | msg = None
100 | sys.settrace(tracefunc)
101 | model(x)
102 | sys.settrace(None)
103 | #for k, v in funcs.items():
104 | # print(k, v)
105 | if any([c > 0 for c in funcs.values()]):
106 | msg = 'it seems to be a dynamic defense! The evaluation' + \
107 | ' with AutoAttack might be insufficient.' + \
108 | f' See {checks_doc_path} for details.'
109 | if not msg is None:
110 | if logger is None:
111 | warnings.warn(Warning(msg))
112 | else:
113 | logger.log(f'Warning: {msg}')
114 | #sys.settrace(None)
115 |
116 |
117 | def check_n_classes(n_cls, attacks_to_run, apgd_targets, fab_targets,
118 | logger=None):
119 | msg = None
120 | if 'apgd-dlr' in attacks_to_run or 'apgd-t' in attacks_to_run:
121 | if n_cls <= 2:
122 | msg = f'with only {n_cls} classes it is not possible to use the DLR loss!'
123 | elif n_cls == 3:
124 | msg = f'with only {n_cls} classes it is not possible to use the targeted DLR loss!'
125 | elif 'apgd-t' in attacks_to_run and \
126 | apgd_targets + 1 > n_cls:
127 | msg = f'it seems that more target classes ({apgd_targets})' + \
128 | f' than possible ({n_cls - 1}) are used in {"apgd-t".upper()}!'
129 | if 'fab-t' in attacks_to_run and fab_targets + 1 > n_cls:
130 | if msg is None:
131 | msg = f'it seems that more target classes ({apgd_targets})' + \
132 | f' than possible ({n_cls - 1}) are used in FAB-T!'
133 | else:
134 | msg += f' Also, it seems that too many target classes ({apgd_targets})' + \
135 | f' are used in {"fab-t".upper()} ({n_cls - 1} possible)!'
136 | if not msg is None:
137 | if logger is None:
138 | warnings.warn(Warning(msg))
139 | else:
140 | logger.log(f'Warning: {msg}')
141 |
142 |
143 |
--------------------------------------------------------------------------------
/Attacks/autoattack/fab_projections.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch
4 | from torch.nn import functional as F
5 |
6 |
7 | def projection_linf(points_to_project, w_hyperplane, b_hyperplane):
8 | device = points_to_project.device
9 | t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane.clone()
10 |
11 | sign = 2 * ((w * t).sum(1) - b >= 0) - 1
12 | w.mul_(sign.unsqueeze(1))
13 | b.mul_(sign)
14 |
15 | a = (w < 0).float()
16 | d = (a - t) * (w != 0).float()
17 |
18 | p = a - t * (2 * a - 1)
19 | indp = torch.argsort(p, dim=1)
20 |
21 | b = b - (w * t).sum(1)
22 | b0 = (w * d).sum(1)
23 |
24 | indp2 = indp.flip((1,))
25 | ws = w.gather(1, indp2)
26 | bs2 = - ws * d.gather(1, indp2)
27 |
28 | s = torch.cumsum(ws.abs(), dim=1)
29 | sb = torch.cumsum(bs2, dim=1) + b0.unsqueeze(1)
30 |
31 | b2 = sb[:, -1] - s[:, -1] * p.gather(1, indp[:, 0:1]).squeeze(1)
32 | c_l = b - b2 > 0
33 | c2 = (b - b0 > 0) & (~c_l)
34 | lb = torch.zeros(c2.sum(), device=device)
35 | ub = torch.full_like(lb, w.shape[1] - 1)
36 | nitermax = math.ceil(math.log2(w.shape[1]))
37 |
38 | indp_, sb_, s_, p_, b_ = indp[c2], sb[c2], s[c2], p[c2], b[c2]
39 | for counter in range(nitermax):
40 | counter4 = torch.floor((lb + ub) / 2)
41 |
42 | counter2 = counter4.long().unsqueeze(1)
43 | indcurr = indp_.gather(1, indp_.size(1) - 1 - counter2)
44 | b2 = (sb_.gather(1, counter2) - s_.gather(1, counter2) * p_.gather(1, indcurr)).squeeze(1)
45 | c = b_ - b2 > 0
46 |
47 | lb = torch.where(c, counter4, lb)
48 | ub = torch.where(c, ub, counter4)
49 |
50 | lb = lb.long()
51 |
52 | if c_l.any():
53 | lmbd_opt = torch.clamp_min((b[c_l] - sb[c_l, -1]) / (-s[c_l, -1]), min=0).unsqueeze(-1)
54 | d[c_l] = (2 * a[c_l] - 1) * lmbd_opt
55 |
56 | lmbd_opt = torch.clamp_min((b[c2] - sb[c2, lb]) / (-s[c2, lb]), min=0).unsqueeze(-1)
57 | d[c2] = torch.min(lmbd_opt, d[c2]) * a[c2] + torch.max(-lmbd_opt, d[c2]) * (1 - a[c2])
58 |
59 | return d * (w != 0).float()
60 |
61 |
62 | def projection_l2(points_to_project, w_hyperplane, b_hyperplane):
63 | device = points_to_project.device
64 | t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane
65 |
66 | c = (w * t).sum(1) - b
67 | ind2 = 2 * (c >= 0) - 1
68 | w.mul_(ind2.unsqueeze(1))
69 | c.mul_(ind2)
70 |
71 | r = torch.max(t / w, (t - 1) / w).clamp(min=-1e12, max=1e12)
72 | r.masked_fill_(w.abs() < 1e-8, 1e12)
73 | r[r == -1e12] *= -1
74 | rs, indr = torch.sort(r, dim=1)
75 | rs2 = F.pad(rs[:, 1:], (0, 1))
76 | rs.masked_fill_(rs == 1e12, 0)
77 | rs2.masked_fill_(rs2 == 1e12, 0)
78 |
79 | w3s = (w ** 2).gather(1, indr)
80 | w5 = w3s.sum(dim=1, keepdim=True)
81 | ws = w5 - torch.cumsum(w3s, dim=1)
82 | d = -(r * w)
83 | d.mul_((w.abs() > 1e-8).float())
84 | s = torch.cat((-w5 * rs[:, 0:1], torch.cumsum((-rs2 + rs) * ws, dim=1) - w5 * rs[:, 0:1]), 1)
85 |
86 | c4 = s[:, 0] + c < 0
87 | c3 = (d * w).sum(dim=1) + c > 0
88 | c2 = ~(c4 | c3)
89 |
90 | lb = torch.zeros(c2.sum(), device=device)
91 | ub = torch.full_like(lb, w.shape[1] - 1)
92 | nitermax = math.ceil(math.log2(w.shape[1]))
93 |
94 | s_, c_ = s[c2], c[c2]
95 | for counter in range(nitermax):
96 | counter4 = torch.floor((lb + ub) / 2)
97 | counter2 = counter4.long().unsqueeze(1)
98 | c3 = s_.gather(1, counter2).squeeze(1) + c_ > 0
99 | lb = torch.where(c3, counter4, lb)
100 | ub = torch.where(c3, ub, counter4)
101 |
102 | lb = lb.long()
103 |
104 | if c4.any():
105 | alpha = c[c4] / w5[c4].squeeze(-1)
106 | d[c4] = -alpha.unsqueeze(-1) * w[c4]
107 |
108 | if c2.any():
109 | alpha = (s[c2, lb] + c[c2]) / ws[c2, lb] + rs[c2, lb]
110 | alpha[ws[c2, lb] == 0] = 0
111 | c5 = (alpha.unsqueeze(-1) > r[c2]).float()
112 | d[c2] = d[c2] * c5 - alpha.unsqueeze(-1) * w[c2] * (1 - c5)
113 |
114 | return d * (w.abs() > 1e-8).float()
115 |
116 |
117 | def projection_l1(points_to_project, w_hyperplane, b_hyperplane):
118 | device = points_to_project.device
119 | t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane
120 |
121 | c = (w * t).sum(1) - b
122 | ind2 = 2 * (c >= 0) - 1
123 | w.mul_(ind2.unsqueeze(1))
124 | c.mul_(ind2)
125 |
126 | r = (1 / w).abs().clamp_max(1e12)
127 | indr = torch.argsort(r, dim=1)
128 | indr_rev = torch.argsort(indr)
129 |
130 | c6 = (w < 0).float()
131 | d = (-t + c6) * (w != 0).float()
132 | ds = torch.min(-w * t, w * (1 - t)).gather(1, indr)
133 | ds2 = torch.cat((c.unsqueeze(-1), ds), 1)
134 | s = torch.cumsum(ds2, dim=1)
135 |
136 | c2 = s[:, -1] < 0
137 |
138 | lb = torch.zeros(c2.sum(), device=device)
139 | ub = torch.full_like(lb, s.shape[1])
140 | nitermax = math.ceil(math.log2(w.shape[1]))
141 |
142 | s_ = s[c2]
143 | for counter in range(nitermax):
144 | counter4 = torch.floor((lb + ub) / 2)
145 | counter2 = counter4.long().unsqueeze(1)
146 | c3 = s_.gather(1, counter2).squeeze(1) > 0
147 | lb = torch.where(c3, counter4, lb)
148 | ub = torch.where(c3, ub, counter4)
149 |
150 | lb2 = lb.long()
151 |
152 | if c2.any():
153 | indr = indr[c2].gather(1, lb2.unsqueeze(1)).squeeze(1)
154 | u = torch.arange(0, w.shape[0], device=device).unsqueeze(1)
155 | u2 = torch.arange(0, w.shape[1], device=device, dtype=torch.float).unsqueeze(0)
156 | alpha = -s[c2, lb2] / w[c2, indr]
157 | c5 = u2 < lb.unsqueeze(-1)
158 | u3 = c5[u[:c5.shape[0]], indr_rev[c2]]
159 | d[c2] = d[c2] * u3.float()
160 | d[c2, indr] = alpha
161 |
162 | return d * (w.abs() > 1e-8).float()
163 |
--------------------------------------------------------------------------------
/Attacks/autoattack/fab_pt.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019-present, Francesco Croce
2 | # All rights reserved.
3 | #
4 | # This source code is licensed under the license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 |
8 | from __future__ import absolute_import
9 | from __future__ import division
10 | from __future__ import print_function
11 | from __future__ import unicode_literals
12 |
13 | import time
14 |
15 | import torch
16 |
17 | from autoattack.other_utils import zero_gradients
18 | from autoattack.fab_base import FABAttack
19 |
20 | class FABAttack_PT(FABAttack):
21 | """
22 | Fast Adaptive Boundary Attack (Linf, L2, L1)
23 | https://arxiv.org/abs/1907.02044
24 |
25 | :param predict: forward pass function
26 | :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)
27 | :param n_restarts: number of random restarts
28 | :param n_iter: number of iterations
29 | :param eps: epsilon for the random restarts
30 | :param alpha_max: alpha_max
31 | :param eta: overshooting
32 | :param beta: backward step
33 | """
34 |
35 | def __init__(
36 | self,
37 | predict,
38 | norm='Linf',
39 | n_restarts=1,
40 | n_iter=100,
41 | eps=None,
42 | alpha_max=0.1,
43 | eta=1.05,
44 | beta=0.9,
45 | loss_fn=None,
46 | verbose=False,
47 | seed=0,
48 | targeted=False,
49 | device=None,
50 | n_target_classes=9):
51 | """ FAB-attack implementation in pytorch """
52 |
53 | self.predict = predict
54 | super().__init__(norm,
55 | n_restarts,
56 | n_iter,
57 | eps,
58 | alpha_max,
59 | eta,
60 | beta,
61 | loss_fn,
62 | verbose,
63 | seed,
64 | targeted,
65 | device,
66 | n_target_classes)
67 |
68 | def _predict_fn(self, x):
69 | return self.predict(x)
70 |
71 | def _get_predicted_label(self, x):
72 | with torch.no_grad():
73 | outputs = self._predict_fn(x)
74 | _, y = torch.max(outputs, dim=1)
75 | return y
76 |
77 | def get_diff_logits_grads_batch(self, imgs, la):
78 | im = imgs.clone().requires_grad_()
79 | with torch.enable_grad():
80 | y = self.predict(im)
81 |
82 | g2 = torch.zeros([y.shape[-1], *imgs.size()]).to(self.device)
83 | grad_mask = torch.zeros_like(y)
84 | for counter in range(y.shape[-1]):
85 | zero_gradients(im)
86 | grad_mask[:, counter] = 1.0
87 | y.backward(grad_mask, retain_graph=True)
88 | grad_mask[:, counter] = 0.0
89 | g2[counter] = im.grad.data
90 |
91 | g2 = torch.transpose(g2, 0, 1).detach()
92 | #y2 = self.predict(imgs).detach()
93 | y2 = y.detach()
94 | df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
95 | dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
96 | df[torch.arange(imgs.shape[0]), la] = 1e10
97 |
98 | return df, dg
99 |
100 | def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):
101 | u = torch.arange(imgs.shape[0])
102 | im = imgs.clone().requires_grad_()
103 | with torch.enable_grad():
104 | y = self.predict(im)
105 | diffy = -(y[u, la] - y[u, la_target])
106 | sumdiffy = diffy.sum()
107 |
108 | zero_gradients(im)
109 | sumdiffy.backward()
110 | graddiffy = im.grad.data
111 | df = diffy.detach().unsqueeze(1)
112 | dg = graddiffy.unsqueeze(1)
113 |
114 | return df, dg
115 |
--------------------------------------------------------------------------------
/Attacks/autoattack/fab_tf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019-present, Francesco Croce
2 | # All rights reserved.
3 | #
4 | # This source code is licensed under the license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 |
8 | from __future__ import absolute_import
9 | from __future__ import division
10 | from __future__ import print_function
11 | from __future__ import unicode_literals
12 |
13 | import torch
14 | from autoattack.fab_base import FABAttack
15 |
16 |
17 | class FABAttack_TF(FABAttack):
18 | """
19 | Fast Adaptive Boundary Attack (Linf, L2, L1)
20 | https://arxiv.org/abs/1907.02044
21 |
22 | :param model: TF_model
23 | :param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)
24 | :param n_restarts: number of random restarts
25 | :param n_iter: number of iterations
26 | :param eps: epsilon for the random restarts
27 | :param alpha_max: alpha_max
28 | :param eta: overshooting
29 | :param beta: backward step
30 | """
31 |
32 | def __init__(
33 | self,
34 | model,
35 | norm='Linf',
36 | n_restarts=1,
37 | n_iter=100,
38 | eps=None,
39 | alpha_max=0.1,
40 | eta=1.05,
41 | beta=0.9,
42 | loss_fn=None,
43 | verbose=False,
44 | seed=0,
45 | targeted=False,
46 | device=None,
47 | n_target_classes=9):
48 | """ FAB-attack implementation in TF2 """
49 |
50 | self.model = model
51 | super().__init__(norm,
52 | n_restarts,
53 | n_iter,
54 | eps,
55 | alpha_max,
56 | eta,
57 | beta,
58 | loss_fn,
59 | verbose,
60 | seed,
61 | targeted,
62 | device,
63 | n_target_classes)
64 |
65 | def _predict_fn(self, x):
66 | return self.model.predict(x)
67 |
68 | def _get_predicted_label(self, x):
69 | with torch.no_grad():
70 | outputs = self._predict_fn(x)
71 | _, y = torch.max(outputs, dim=1)
72 | return y
73 |
74 | def get_diff_logits_grads_batch(self, imgs, la):
75 | y2, g2 = self.model.grad_logits(imgs)
76 | df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
77 | dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
78 | df[torch.arange(imgs.shape[0]), la] = 1e10
79 |
80 | return df, dg
81 |
82 | def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):
83 | df, dg = self.model.get_grad_diff_logits_target(imgs, la, la_target)
84 | df.unsqueeze_(1)
85 | dg.unsqueeze_(1)
86 |
87 | return df, dg
88 |
--------------------------------------------------------------------------------
/Attacks/autoattack/other_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import collections.abc as container_abcs
3 |
4 | import torch
5 |
6 | class Logger():
7 | def __init__(self, log_path):
8 | self.log_path = log_path
9 |
10 | def log(self, str_to_log):
11 | print(str_to_log)
12 | if not self.log_path is None:
13 | with open(self.log_path, 'a') as f:
14 | f.write(str_to_log + '\n')
15 | f.flush()
16 |
17 | def check_imgs(adv, x, norm):
18 | delta = (adv - x).view(adv.shape[0], -1)
19 | if norm == 'Linf':
20 | res = delta.abs().max(dim=1)[0]
21 | elif norm == 'L2':
22 | res = (delta ** 2).sum(dim=1).sqrt()
23 | elif norm == 'L1':
24 | res = delta.abs().sum(dim=1)
25 |
26 | str_det = 'max {} pert: {:.5f}, nan in imgs: {}, max in imgs: {:.5f}, min in imgs: {:.5f}'.format(
27 | norm, res.max(), (adv != adv).sum(), adv.max(), adv.min())
28 | print(str_det)
29 |
30 | return str_det
31 |
32 | def L1_norm(x, keepdim=False):
33 | z = x.abs().view(x.shape[0], -1).sum(-1)
34 | if keepdim:
35 | z = z.view(-1, *[1]*(len(x.shape) - 1))
36 | return z
37 |
38 | def L2_norm(x, keepdim=False):
39 | z = (x ** 2).view(x.shape[0], -1).sum(-1).sqrt()
40 | if keepdim:
41 | z = z.view(-1, *[1]*(len(x.shape) - 1))
42 | return z
43 |
44 | def L0_norm(x):
45 | return (x != 0.).view(x.shape[0], -1).sum(-1)
46 |
47 | def makedir(path):
48 | if not os.path.exists(path):
49 | os.makedirs(path)
50 |
51 | def zero_gradients(x):
52 | if isinstance(x, torch.Tensor):
53 | if x.grad is not None:
54 | x.grad.detach_()
55 | x.grad.zero_()
56 | elif isinstance(x, container_abcs.Iterable):
57 | for elem in x:
58 | zero_gradients(elem)
59 |
--------------------------------------------------------------------------------
/Attacks/autoattack/state.py:
--------------------------------------------------------------------------------
1 | import json
2 | from dataclasses import dataclass, field, asdict
3 | from datetime import datetime
4 | from pathlib import Path
5 | from typing import Optional, Set
6 | import warnings
7 |
8 | import torch
9 |
10 |
11 | @dataclass
12 | class EvaluationState:
13 | _attacks_to_run: Set[str]
14 | path: Optional[Path] = None
15 | _run_attacks: Set[str] = field(default_factory=set)
16 | _robust_flags: Optional[torch.Tensor] = None
17 | _last_saved: datetime = datetime(1, 1, 1)
18 | _SAVE_TIMEOUT: int = 60
19 | _clean_accuracy: float = float("nan")
20 |
21 | def to_disk(self, force: bool = False) -> None:
22 | seconds_since_last_save = (datetime.now() -
23 | self._last_saved).total_seconds()
24 | if self.path is None or (seconds_since_last_save < self._SAVE_TIMEOUT
25 | and not force):
26 | return
27 | self._last_saved = datetime.now()
28 | d = asdict(self)
29 | if self.robust_flags is not None:
30 | d["_robust_flags"] = d["_robust_flags"].cpu().tolist()
31 | d["_run_attacks"] = list(self._run_attacks)
32 | with self.path.open("w", ) as f:
33 | json.dump(d, f, default=str)
34 |
35 | @classmethod
36 | def from_disk(cls, path: Path) -> "EvaluationState":
37 | with path.open("r") as f:
38 | d = json.load(f)
39 | d["_robust_flags"] = torch.tensor(d["_robust_flags"], dtype=torch.bool)
40 | d["path"] = Path(d["path"])
41 | if path != d["path"]:
42 | warnings.warn(
43 | UserWarning(
44 | "The given path is different from the one found in the state file."
45 | ))
46 | d["_last_saved"] = datetime.fromisoformat(d["_last_saved"])
47 | return cls(**d)
48 |
49 | @property
50 | def robust_flags(self) -> Optional[torch.Tensor]:
51 | return self._robust_flags
52 |
53 | @robust_flags.setter
54 | def robust_flags(self, robust_flags: torch.Tensor) -> None:
55 | self._robust_flags = robust_flags
56 | self.to_disk(force=True)
57 |
58 | @property
59 | def run_attacks(self) -> Set[str]:
60 | return self._run_attacks
61 |
62 | def add_run_attack(self, attack: str) -> None:
63 | self._run_attacks.add(attack)
64 | self.to_disk()
65 |
66 | @property
67 | def attacks_to_run(self) -> Set[str]:
68 | return self._attacks_to_run
69 |
70 | @attacks_to_run.setter
71 | def attacks_to_run(self, _: Set[str]) -> None:
72 | raise ValueError("attacks_to_run cannot be set outside of the constructor")
73 |
74 | @property
75 | def clean_accuracy(self) -> float:
76 | return self._clean_accuracy
77 |
78 | @clean_accuracy.setter
79 | def clean_accuracy(self, accuracy) -> None:
80 | self._clean_accuracy = accuracy
81 | self.to_disk(force=True)
82 |
83 | @property
84 | def robust_accuracy(self) -> float:
85 | if self.robust_flags is None:
86 | raise ValueError("robust_flags is not set yet. Start the attack first.")
87 | if self.attacks_to_run - self.run_attacks:
88 | warnings.warn("You are checking `robust_accuracy` before all the attacks"
89 | " have been run.")
90 | return self.robust_flags.float().mean().item()
--------------------------------------------------------------------------------
/Attacks/autoattack/utils_tf.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import torch
4 |
5 | class ModelAdapter():
6 | def __init__(self, logits, x, y, sess, num_classes=10):
7 | self.logits = logits
8 | self.sess = sess
9 | self.x_input = x
10 | self.y_input = y
11 | self.num_classes = num_classes
12 |
13 | # gradients of logits
14 | if num_classes <= 10:
15 | self.grads = [None] * num_classes
16 | for cl in range(num_classes):
17 | self.grads[cl] = tf.gradients(self.logits[:, cl], self.x_input)[0]
18 |
19 | # cross-entropy loss
20 | self.xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
21 | logits=self.logits, labels=self.y_input)
22 | self.grad_xent = tf.gradients(self.xent, self.x_input)[0]
23 |
24 | # dlr loss
25 | self.dlr = dlr_loss(self.logits, self.y_input, num_classes=self.num_classes)
26 | self.grad_dlr = tf.gradients(self.dlr, self.x_input)[0]
27 |
28 | # targeted dlr loss
29 | self.y_target = tf.placeholder(tf.int64, shape=[None])
30 | self.dlr_target = dlr_loss_targeted(self.logits, self.y_input, self.y_target, num_classes=self.num_classes)
31 | self.grad_target = tf.gradients(self.dlr_target, self.x_input)[0]
32 |
33 | self.la = tf.placeholder(tf.int64, shape=[None])
34 | self.la_target = tf.placeholder(tf.int64, shape=[None])
35 | la_mask = tf.one_hot(self.la, self.num_classes)
36 | la_target_mask = tf.one_hot(self.la_target, self.num_classes)
37 | la_logit = tf.reduce_sum(la_mask * self.logits, axis=1)
38 | la_target_logit = tf.reduce_sum(la_target_mask * self.logits, axis=1)
39 | self.diff_logits = la_target_logit - la_logit
40 | self.grad_diff_logits = tf.gradients(self.diff_logits, self.x_input)[0]
41 |
42 | def predict(self, x):
43 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
44 | y = self.sess.run(self.logits, {self.x_input: x2})
45 |
46 | return torch.from_numpy(y).cuda()
47 |
48 | def grad_logits(self, x):
49 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
50 | logits, g2 = self.sess.run([self.logits, self.grads], {self.x_input: x2})
51 | g2 = np.moveaxis(np.array(g2), 0, 1)
52 | g2 = np.transpose(g2, (0, 1, 4, 2, 3))
53 |
54 | return torch.from_numpy(logits).cuda(), torch.from_numpy(g2).cuda()
55 |
56 | def get_grad_diff_logits_target(self, x, y=None, y_target=None):
57 | la = y.cpu().numpy()
58 | la_target = y_target.cpu().numpy()
59 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
60 | dl, g2 = self.sess.run([self.diff_logits, self.grad_diff_logits], {self.x_input: x2, self.la: la, self.la_target: la_target})
61 | g2 = np.transpose(np.array(g2), (0, 3, 1, 2))
62 |
63 | return torch.from_numpy(dl).cuda(), torch.from_numpy(g2).cuda()
64 |
65 | def get_logits_loss_grad_xent(self, x, y):
66 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
67 | y2 = y.clone().cpu().numpy()
68 | logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.xent, self.grad_xent], {self.x_input: x2, self.y_input: y2})
69 | grad_val = np.moveaxis(grad_val, 3, 1)
70 |
71 | return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda()
72 |
73 | def get_logits_loss_grad_dlr(self, x, y):
74 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
75 | y2 = y.clone().cpu().numpy()
76 | logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr, self.grad_dlr], {self.x_input: x2, self.y_input: y2})
77 | grad_val = np.moveaxis(grad_val, 3, 1)
78 |
79 | return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda()
80 |
81 | def get_logits_loss_grad_target(self, x, y, y_target):
82 | x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
83 | y2 = y.clone().cpu().numpy()
84 | y_targ = y_target.clone().cpu().numpy()
85 | logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr_target, self.grad_target], {self.x_input: x2, self.y_input: y2, self.y_target: y_targ})
86 | grad_val = np.moveaxis(grad_val, 3, 1)
87 |
88 | return torch.from_numpy(logits_val).cuda(), torch.from_numpy(loss_indiv_val).cuda(), torch.from_numpy(grad_val).cuda()
89 |
90 | def dlr_loss(x, y, num_classes=10):
91 | x_sort = tf.contrib.framework.sort(x, axis=1)
92 | y_onehot = tf.one_hot(y, num_classes)
93 | ### TODO: adapt to the case when the point is already misclassified
94 | loss = -(x_sort[:, -1] - x_sort[:, -2]) / (x_sort[:, -1] - x_sort[:, -3] + 1e-12)
95 |
96 | return loss
97 |
98 | def dlr_loss_targeted(x, y, y_target, num_classes=10):
99 | x_sort = tf.contrib.framework.sort(x, axis=1)
100 | y_onehot = tf.one_hot(y, num_classes)
101 | y_target_onehot = tf.one_hot(y_target, num_classes)
102 | loss = -(tf.reduce_sum(x * y_onehot, axis=1) - tf.reduce_sum(x * y_target_onehot, axis=1)) / (x_sort[:, -1] - .5 * x_sort[:, -3] - .5 * x_sort[:, -4] + 1e-12)
103 |
104 | return loss
105 |
--------------------------------------------------------------------------------
/CleanDatasets/README.md:
--------------------------------------------------------------------------------
1 | # Clean Datasets
2 |
3 | Here, we randomly select clean samples that are correctly predicted by the corresponding model from the testing set of each dataset (i.e., MNIST and CIFAR10).
4 |
5 | In addition, for target attacks, the target class is chosen randomly among the labels except the ground truth class.
6 |
7 | Then, these selected clean samples will be attacked by all kinds of adversarial attacks.
8 |
9 |
10 | ```
11 | python CandidatesSelection.py --dataset=MNIST/CIFAR10 --number=1000
12 | ```
--------------------------------------------------------------------------------
/CleanDatasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/CleanDatasets/__init__.py
--------------------------------------------------------------------------------
/Defenses/DD_Test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 | # from RawModels.Utils.dataset import get_mnist_train_validate_loader, get_mnist_test_loader
11 | # from RawModels.Utils.dataset import get_cifar10_train_validate_loader, get_cifar10_test_loader
12 | from Utils.dataset import get_signal_train_validate_loader, get_signal_test_loader, get_alldb_signal_train_validate_loader
13 | from models.network import define_model
14 | from args import args
15 |
16 | from Defenses.DefenseMethods.DD import DistillationDefense
17 |
18 |
19 | def main():
20 | # Device configuration
21 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
22 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
23 | # Set the random seed manually for reproducibility.
24 | torch.backends.cudnn.deterministic = True
25 | torch.backends.cudnn.benchmark = False
26 | torch.manual_seed(args.seed)
27 | if torch.cuda.is_available():
28 | torch.cuda.manual_seed(args.seed)
29 | np.random.seed(args.seed)
30 | random.seed(args.seed)
31 |
32 | # Get training parameters, set up model frames and then get the train_loader and test_loader
33 | dataset = args.dataset.upper()
34 | if dataset == '128' or '512' or '1024' or '3040':
35 | model_framework = define_model(name=args.model).to(device)
36 | # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
37 | # 128 using all db dataset
38 | # train_loader, valid_loader = get_alldb_signal_train_validate_loader(batch_size=args.batch_size, shuffle=True)
39 | # 1024 using >=10db dataset
40 | train_loader, valid_loader = get_signal_train_validate_loader(batch_size=args.batch_size, shuffle=True)
41 |
42 | # testing dataset loader
43 | # test_loader = get_signal_test_loader(batch_size=args.batch_size, shuffle=False)
44 | test_loader = get_signal_test_loader(batch_size=args.batch_size, shuffle=False)
45 |
46 | # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
47 | # raw_train_loader, raw_valid_loader = get_alldb_signal_train_validate_loader(batch_size=args.batch_size, shuffle=True)
48 | raw_train_loader, raw_valid_loader = get_signal_train_validate_loader(batch_size=args.batch_size, shuffle=True)
49 |
50 | else:
51 | print("data error")
52 |
53 | defense_name = 'DD'
54 | dd = DistillationDefense(model=model_framework, defense_name=defense_name, dataset=dataset, temperature=args.temp,
55 | device=device)
56 | dd.defense(initial_flag=args.initial, train_loader=train_loader, validation_loader=valid_loader, raw_train=raw_train_loader,
57 | raw_valid=raw_valid_loader, test_loader=test_loader)
58 |
59 |
60 | if __name__ == '__main__':
61 |
62 | main()
63 |
--------------------------------------------------------------------------------
/Defenses/DefenseMethods/External/InputTransformations.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | It is downloaded from https://raw.githubusercontent.com/anishathalye/obfuscated-gradients/master/inputtransformations/defense.py
5 | """
6 |
7 | from io import BytesIO
8 |
9 | import PIL
10 | import PIL.Image
11 | import numpy as np
12 | from torchvision.transforms import ToPILImage, ToTensor
13 |
14 |
15 | def defend_jpeg(input_tensor, image_mode, quality):
16 | pil_image = ToPILImage(mode=image_mode)(input_tensor)
17 | fd = BytesIO()
18 | pil_image.save(fd, format='jpeg', quality=quality) # quality level specified in paper
19 | jpeg_image = ToTensor()(PIL.Image.open(fd))
20 | return jpeg_image
21 |
22 |
23 | # based on https://github.com/scikit-image/scikit-image/blob/master/skimage/restoration/_denoise_cy.pyx
24 |
25 | # super slow since this is implemented in pure python :'(
26 |
27 | def bregman(image, mask, weight, eps=1e-3, max_iter=100):
28 | rows, cols, dims = image.shape
29 | rows2 = rows + 2
30 | cols2 = cols + 2
31 | total = rows * cols * dims
32 | shape_ext = (rows2, cols2, dims)
33 |
34 | u = np.zeros(shape_ext)
35 | dx = np.zeros(shape_ext)
36 | dy = np.zeros(shape_ext)
37 | bx = np.zeros(shape_ext)
38 | by = np.zeros(shape_ext)
39 |
40 | u[1:-1, 1:-1] = image
41 | # reflect image
42 | u[0, 1:-1] = image[1, :]
43 | u[1:-1, 0] = image[:, 1]
44 | u[-1, 1:-1] = image[-2, :]
45 | u[1:-1, -1] = image[:, -2]
46 |
47 | i = 0
48 | rmse = np.inf
49 | lam = 2 * weight
50 | norm = (weight + 4 * lam)
51 |
52 | while i < max_iter and rmse > eps:
53 | rmse = 0
54 |
55 | for k in range(dims):
56 | for r in range(1, rows + 1):
57 | for c in range(1, cols + 1):
58 | uprev = u[r, c, k]
59 |
60 | # forward derivatives
61 | ux = u[r, c + 1, k] - uprev
62 | uy = u[r + 1, c, k] - uprev
63 |
64 | # Gauss-Seidel method
65 | if mask[r - 1, c - 1]:
66 | unew = (lam * (u[r + 1, c, k] +
67 | u[r - 1, c, k] +
68 | u[r, c + 1, k] +
69 | u[r, c - 1, k] +
70 | dx[r, c - 1, k] -
71 | dx[r, c, k] +
72 | dy[r - 1, c, k] -
73 | dy[r, c, k] -
74 | bx[r, c - 1, k] +
75 | bx[r, c, k] -
76 | by[r - 1, c, k] +
77 | by[r, c, k]
78 | ) + weight * image[r - 1, c - 1, k]
79 | ) / norm
80 | else:
81 | # similar to the update step above, except we take
82 | # lim_{weight->0} of the update step, effectively
83 | # ignoring the l2 loss
84 | unew = (u[r + 1, c, k] +
85 | u[r - 1, c, k] +
86 | u[r, c + 1, k] +
87 | u[r, c - 1, k] +
88 | dx[r, c - 1, k] -
89 | dx[r, c, k] +
90 | dy[r - 1, c, k] -
91 | dy[r, c, k] -
92 | bx[r, c - 1, k] +
93 | bx[r, c, k] -
94 | by[r - 1, c, k] +
95 | by[r, c, k]
96 | ) / 4.0
97 | u[r, c, k] = unew
98 |
99 | # update rms error
100 | rmse += (unew - uprev) ** 2
101 |
102 | bxx = bx[r, c, k]
103 | byy = by[r, c, k]
104 |
105 | # d_subproblem
106 | s = ux + bxx
107 | if s > 1 / lam:
108 | dxx = s - 1 / lam
109 | elif s < -1 / lam:
110 | dxx = s + 1 / lam
111 | else:
112 | dxx = 0
113 | s = uy + byy
114 | if s > 1 / lam:
115 | dyy = s - 1 / lam
116 | elif s < -1 / lam:
117 | dyy = s + 1 / lam
118 | else:
119 | dyy = 0
120 |
121 | dx[r, c, k] = dxx
122 | dy[r, c, k] = dyy
123 |
124 | bx[r, c, k] += ux - dxx
125 | by[r, c, k] += uy - dyy
126 |
127 | rmse = np.sqrt(rmse / total)
128 | i += 1
129 |
130 | return np.asarray(u[1:-1, 1:-1])
131 |
132 |
133 | def defend_tv(input_array, keep_prob=0.5, lambda_tv=0.03):
134 | mask = np.random.uniform(size=input_array.shape[:2])
135 | mask = mask < keep_prob
136 | return bregman(input_array, mask, weight=2.0 / lambda_tv)
137 |
--------------------------------------------------------------------------------
/Defenses/DefenseMethods/External/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # **************************************
4 | # @Time : 2018/12/6 17:04
5 | # @Author : Xiang Ling
6 | # @Lab : nesa.zju.edu.cn
7 | # @File : __init__.py.py
8 | # **************************************
--------------------------------------------------------------------------------
/Defenses/DefenseMethods/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # **************************************
4 |
--------------------------------------------------------------------------------
/Defenses/DefenseMethods/defenses.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | from abc import ABCMeta
4 | from abc import abstractmethod
5 |
6 |
7 | class Defense(object):
8 | __metaclass__ = ABCMeta
9 |
10 | def __init__(self, model=None, defense_name=None):
11 | self.model = model
12 | self.defense_name = defense_name
13 |
14 | defense_dir = '../DefenseEnhancedModels/{}'.format(self.defense_name)
15 | if self.defense_name not in os.listdir('../DefenseEnhancedModels/'):
16 | os.mkdir(defense_dir)
17 | print('creating the {} folder for storing the {} defense'.format(defense_dir, self.defense_name))
18 | else:
19 | print('the storing {} folder is already existing'.format(defense_dir))
20 |
21 | @abstractmethod
22 | def defense(self):
23 | print("abstract method of 'Defenses' is not implemented")
24 | raise NotImplementedError
25 |
--------------------------------------------------------------------------------
/Defenses/PAT_Test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import sys
4 |
5 | import numpy as np
6 | import torch
7 |
8 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
9 |
10 | from Utils.dataset import get_signal_train_validate_loader
11 | from Utils.dataset import get_signal_train_validate_loader, get_signal_test_loader
12 | # from RawModels.Utils.dataset import get_alldb_signal_train_validate_loader
13 | # from RawModels.Utils.dataset import get_single_db_signal_test_loader
14 | # from RawModels.Utils.dataset import get_upper_minus4db_signal_test_loader,get_alldb_signal_train_validate_loader
15 |
16 | from Utils.dataset import get_signal_test_loader
17 | from models.network import define_model
18 | from Defenses.DefenseMethods.PAT import PATDefense
19 | from args import args
20 |
21 |
22 | def main():
23 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
24 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
25 | print("CUDA:", args.gpu_index)
26 | # Set the random seed manually for reproducibility.
27 | torch.backends.cudnn.deterministic = True
28 | torch.backends.cudnn.benchmark = False
29 | torch.manual_seed(args.seed)
30 | if torch.cuda.is_available():
31 | torch.cuda.manual_seed(args.seed)
32 | np.random.seed(args.seed)
33 | random.seed(args.seed)
34 |
35 | # Get training parameters, set up model frameworks and then get the train_loader and test_loader
36 | dataset = args.dataset.upper()
37 |
38 | if dataset == '128' or '512' or '1024' or '3040':
39 | model_framework = define_model(name=args.model).to(device)
40 | # train_loader, valid_loader = get_alldb_signal_train_validate_loader(batch_size=args.batch_size,shuffle=True)
41 | # train_loader, valid_loader = get_alldb_signal_train_validate_loader(batch_size=args.batch_size,shuffle=True)
42 | train_loader, valid_loader = get_signal_train_validate_loader(batch_size=args.batch_size,shuffle=True)
43 | else:
44 | print("data error")
45 |
46 | defense_name = 'PAT'
47 | pat_params = {
48 | 'attack_step_num': args.step_num,
49 | 'step_size': args.step_size,
50 | 'epsilon': args.eps
51 | }
52 |
53 |
54 | pat = PATDefense(model=model_framework, defense_name=defense_name, dataset=dataset, device=device, **pat_params)
55 | pat.defense(train_loader=train_loader, validation_loader=valid_loader)
56 |
57 |
58 |
59 | if __name__ == '__main__':
60 | main()
61 |
--------------------------------------------------------------------------------
/Defenses/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/Defenses/__init__.py
--------------------------------------------------------------------------------
/Defenses/defense.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import random
4 | import sys
5 |
6 | import numpy as np
7 | import torch
8 |
9 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
10 |
11 | from Utils.dataset import get_signal_train_validate_loader, get_signal_test_loader
12 |
13 | from Utils.dataset import get_signal_test_loader
14 | from models.network import define_model
15 | from Defenses.DefenseMethods.PAT import PATDefense
16 | from Defenses.DefenseMethods.DD import DistillationDefense
17 | from args import args
18 |
19 |
20 | def main():
21 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
22 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
23 | print("CUDA:", args.gpu_index)
24 | # Set the random seed manually for reproducibility.
25 | torch.backends.cudnn.deterministic = True
26 | torch.backends.cudnn.benchmark = False
27 | torch.manual_seed(args.seed)
28 | if torch.cuda.is_available():
29 | torch.cuda.manual_seed(args.seed)
30 | np.random.seed(args.seed)
31 | random.seed(args.seed)
32 |
33 | # Get training parameters, set up model frameworks and then get the train_loader and test_loader
34 | dataset = args.dataset.upper()
35 |
36 | if dataset == '128' or '512' or '1024' or '3040':
37 | model_framework = define_model(name=args.model).to(device)
38 | train_loader, valid_loader = get_signal_train_validate_loader(batch_size=args.batch_size,shuffle=True)
39 | # testing dataset loader
40 | test_loader = get_signal_test_loader(batch_size=args.batch_size, shuffle=False)
41 | # raw train_loader (no augmentation) for constructing the SoftLabelDataset and then used to train the distilled model
42 | raw_train_loader, raw_valid_loader = get_signal_train_validate_loader(batch_size=args.batch_size, shuffle=True)
43 | else:
44 | print("data error")
45 |
46 | if args.defense_name == 'PAT':
47 | defense_name = 'PAT'
48 | pat_params = {
49 | 'attack_step_num': args.step_num,
50 | 'step_size': args.step_size,
51 | 'epsilon': args.eps
52 | }
53 | pat = PATDefense(model=model_framework, defense_name=defense_name, dataset=dataset, device=device, **pat_params)
54 | pat.defense(train_loader=train_loader, validation_loader=valid_loader)
55 | elif args.defense_name == 'DD':
56 | defense_name = 'DD'
57 | dd = DistillationDefense(model=model_framework, defense_name=defense_name, dataset=dataset,
58 | temperature=args.temp,
59 | device=device)
60 | dd.defense(initial_flag=args.initial, train_loader=train_loader, validation_loader=valid_loader,
61 | raw_train=raw_train_loader,
62 | raw_valid=raw_valid_loader, test_loader=test_loader)
63 | elif args.defense_name == 'VMD':
64 | VMDdefense()
65 |
66 | if __name__ == '__main__':
67 | main()
68 |
--------------------------------------------------------------------------------
/Defenses/trades.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import numpy as np
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | from torch.autograd import Variable
9 | import torch.optim as optim
10 | sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
11 | from args import args
12 |
13 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
14 | def squared_l2_norm(x):
15 | flattened = x.view(x.unsqueeze(0).shape[0], -1)
16 | return (flattened ** 2).sum(1)
17 |
18 |
19 | def l2_norm(x):
20 | return squared_l2_norm(x).sqrt()
21 |
22 |
23 | def trades_loss(model,
24 | x_natural,
25 | y,
26 | optimizer,
27 | step_size=0.03, # 0.003
28 | epsilon=0.06, # 0.031
29 | perturb_steps=5, # 10
30 | beta=1.0,
31 | distance='l_inf'):
32 | # define KL-loss
33 | criterion_kl = nn.KLDivLoss(size_average=False)
34 | model.eval()
35 | batch_size = len(x_natural)
36 | # generate adversarial example
37 | x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).to(device).detach()
38 | # x_adv = x_natural.detach() + np.random.uniform(-epsilon, epsilon, x_natural.detach().shape).astype('float32')
39 | if distance == 'l_inf':
40 | for _ in range(perturb_steps):
41 | x_adv.requires_grad_()
42 | with torch.enable_grad():
43 | loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
44 | F.softmax(model(x_natural), dim=1))
45 | grad = torch.autograd.grad(loss_kl, [x_adv])[0]
46 | x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
47 | x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
48 | # x_adv = torch.clamp(x_adv, 0.0, 1.0)
49 | x_adv = torch.clamp(x_adv, -1.0, 1.0)
50 | elif distance == 'l_2':
51 | delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
52 | delta = Variable(delta.data, requires_grad=True)
53 |
54 | # Setup optimizers
55 | optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
56 |
57 | for _ in range(perturb_steps):
58 | adv = x_natural + delta
59 |
60 | # optimize
61 | optimizer_delta.zero_grad()
62 | with torch.enable_grad():
63 | loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1),
64 | F.softmax(model(x_natural), dim=1))
65 | loss.backward()
66 | # renorming gradient
67 | grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
68 | delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
69 | # avoid nan or inf if gradient is 0
70 | if (grad_norms == 0).any():
71 | delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
72 | optimizer_delta.step()
73 |
74 | # projection
75 | delta.data.add_(x_natural)
76 | delta.data.clamp_(0, 1).sub_(x_natural)
77 | delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
78 | x_adv = Variable(x_natural + delta, requires_grad=False)
79 | else:
80 | # x_adv = torch.clamp(x_adv, 0.0, 1.0)
81 | x_adv = torch.clamp(x_adv, 0.0, 1.0)
82 | model.train()
83 |
84 | x_adv = Variable(torch.clamp(x_adv, -1.0, 1.0), requires_grad=False)
85 | # zero gradient
86 | optimizer.zero_grad()
87 | # calculate robust loss
88 | logits = model(x_natural)
89 | loss_natural = F.cross_entropy(logits, y)
90 | loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
91 | F.softmax(model(x_natural), dim=1))
92 | loss = loss_natural + beta * loss_robust
93 | # loss = beta * loss_robust
94 | return loss
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Learn to Defend: Adversarial Multi-Distillation for Automatic Modulation Recognition Models
2 | ### by [Zhuangzhi Chen], [Zhangwei Wang], [Dongwei Xu], [Jiawei Zhu], [Weiguo Shen], [Shilian Zheng], [Qi Xuan], [Xiaoniu Yang]
3 |
4 |
5 |
6 | ### Main components
7 |
8 | | File name | Explanation |
9 | |:---------------------------------------| :----------------------------------------------------------- |
10 | | `AMD.py` | Execute the main code, with distillation model training. |
11 | | `CleanDatasets/CandidatesSelection.py` | Code for extracting samples with correct model classification . |
12 | | `Attacks/*` | Code for the adversarial attacks we use. |
13 | | `Defenses/*` | Code for generating defense model. |
14 | | `model/*` | Code for all model architecture we use in experiments. |
15 | | `raw_model_training/*` | Code for model architecture and baseline training scripts. |
16 | | `Utils/dataset.py` | Code for dataset preparation. |
17 | | `order/*` | Code for running script . |
18 | | `args.py` | Code for configuration. |
19 |
20 |
21 |
22 | ## Running Codes
23 |
24 | ### How to train baseline models
25 |
26 | ```python
27 | python train.py --model CNN1D --dataset 128 --num_workers 8 --epochs 50
28 | ```
29 |
30 | Alternatively, scripts can be used to batch train the model
31 |
32 | ```bash
33 | bash raw_model_training/train.sh
34 | ```
35 |
36 | ### How to train adversarial distillation models
37 |
38 | To run the experiments, enter the following command.
39 | ```python
40 | python AMD.py \
41 | --save_root "model save root" \
42 | --t1_model "clean teacher model root" \
43 | --t2_model "adversarial teacher model root" \
44 | --s_init "student model root" \
45 | --dataset 128 --t1_name MCLDNN --t2_name Vgg16 --s_name mobilenet \
46 | --kd_mode logits --lambda_kd1 1 --lambda_kd2 1 --lambda_kd3 1 \
47 | --note \
48 | --gpu_index
49 | ```
50 |
51 |
52 |
53 | ### How to generate defense model
54 |
55 | | Defenses | Commands with default parameters |
56 | | :--------: | ------------------------------------------------------------ |
57 | | **PAT** | python PAT_Test.py --dataset=128--eps=0.06 --step_num=5 --step_size=0.03 --model CNN1D |
58 | | **TRADES** | python train_trades.py --dataset=128--eps=0.06 --step_num=5 --step_size=0.03 --model CNN1D |
59 | | **DD** | python DD_Test.py --dataset=128 --temp 30.0 --model CNN1D |
60 |
61 |
62 |
63 | ### How to evaluate model with attack method
64 |
65 | 1. Obtain samples with correct model classifacation. Clean samples will saved at `/CleanDatasets`
66 |
67 | ```pyhton
68 | cd ./CleanDatasets
69 | python CandidatesSelection.py --dataset=128 --number=$1 --gpu_index $2 --model $3 --location $4
70 | ```
71 |
72 | 2. Use the attack methods provided below. Adversarial samples will saved at `/AdversarialExampleDatasets`
73 |
74 | | Attacks | Commands with default parameters |
75 | | :---------: | ------------------------------------------------------------ |
76 | | **PGD** | PGD_Generation.py --dataset=128 --epsilon=0.15 --epsilon_iter=0.03 --num_steps 15 --model=CNN1D |
77 | | **UMIFGSM** | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.15 --model=CNN1D |
78 | | **AA** | python AutoAttack.py --dataset=128 --epsilon=0.15 --model=CNN1D |
79 | | **DF** | python DeepFool_Generation.py --dataset=128 --max_iters=15 -- overshoot=0.02 --model=CNN1D |
80 | | **SQUARE** | python square.py --Linf=0.3 --num_queries=3000 --model CNN1D |
81 |
82 |
83 |
84 | ### Package Requirements
85 |
86 | To ensure success running of the program, the versions Python packages we used are listed in `requirements.txt`.To align the versions of your packages to this file, simply run:
87 |
88 | ```
89 | pip install -r requirements.txt
90 | ```
91 |
92 |
--------------------------------------------------------------------------------
/Utils/TrainTest.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 |
4 |
5 | # help functions for training and testing
6 |
7 | def model_weight_decay(model=None):
8 | decay = None
9 | for (name, param) in model.named_parameters():
10 | if name.lower().find('conv') > 0:
11 | if decay is None:
12 | decay = param.norm(2)
13 | else:
14 | decay = decay + param.norm(2)
15 | if decay is None:
16 | decay = 0
17 | return decay
18 |
19 |
20 | # train the model in one epoch
21 | def train_one_epoch(model, train_loader, optimizer, epoch, device):
22 | """
23 |
24 | :param model:
25 | :param train_loader:
26 | :param optimizer:
27 | :param epoch:
28 | :param device:
29 | :return:
30 | """
31 |
32 | # Sets the model in training mode
33 | model.train()
34 | for index, (images, labels) in enumerate(train_loader):
35 | images = images.to(device)
36 | labels = labels.to(device)
37 | # forward the nn
38 | outputs = model(images)
39 | loss = F.cross_entropy(outputs, labels)
40 |
41 | # backward
42 | optimizer.zero_grad()
43 | loss.backward()
44 | optimizer.step()
45 |
46 | print('\rTrain Epoch{:>3}: [batch:{:>4}/{:>4}({:>3.0f}%)] \tLoss: {:.4f} ===> '. \
47 | format(epoch, index, len(train_loader), index / len(train_loader) * 100.0, loss.item()), end=' ')
48 |
49 |
50 | # evaluate the model using validation dataset
51 | def validation_evaluation(model, validation_loader, device):
52 | """
53 |
54 | :param model:
55 | :param validation_loader:
56 | :param device:
57 | :return:
58 | """
59 | model = model.to(device)
60 | model.eval()
61 |
62 | total = 0.0
63 | correct = 0.0
64 | with torch.no_grad():
65 | for index, (inputs, labels) in enumerate(validation_loader):
66 | inputs = inputs.to(device)
67 | labels = labels.to(device)
68 |
69 | outputs = model(inputs)
70 | _, predicted = torch.max(outputs.data, 1)
71 | total = total + labels.size(0)
72 | correct = correct + (predicted == labels).sum().item()
73 | ratio = correct / total
74 | print('validation dataset accuracy is {:.4f}'.format(ratio))
75 | return ratio
76 |
77 |
78 | # evaluate the model using testing dataset
79 | def testing_evaluation(model, test_loader, device):
80 | """
81 |
82 | :param model:
83 | :param test_loader:
84 | :param device:
85 | :return:
86 | """
87 | print('\n#####################################')
88 | print('#### The {} model is evaluated on the testing dataset loader ...... '.format(model.model_name))
89 | # Sets the module in evaluation mode.
90 | model = model.to(device)
91 | model.eval()
92 |
93 | total = 0.0
94 | correct = 0.0
95 | with torch.no_grad():
96 | for images, labels in test_loader:
97 | images = images.to(device)
98 | labels = labels.to(device)
99 |
100 | outputs = model(images)
101 | _, predicted = torch.max(outputs.data, 1)
102 | total = total + labels.size(0)
103 | correct = correct + (predicted == labels).sum().item()
104 | ratio = correct / total
105 | print('#### Accuracy of the loaded model on the testing dataset: {:.1f}/{:.1f} = {:.2f}%'.format(correct, total, ratio * 100))
106 | print('#####################################\n')
107 |
108 | return ratio
109 |
--------------------------------------------------------------------------------
/Utils/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # **************************************
4 | # @Time : 2018/11/7 22:45
5 | # @Author : Xiang Ling
6 | # @Lab : nesa.zju.edu.cn
7 | # @File : __init__.py.py
8 | # **************************************
9 |
10 |
--------------------------------------------------------------------------------
/__pycache__/args.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/__pycache__/args.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/kd_losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .logits import Logits
2 | from .st import SoftTarget
3 | from .at import AT
4 | from .fitnet import Hint
5 | from .nst import NST
6 | from .pkt import PKTCosSim
7 | from .fsp import FSP
8 | from .ft import FT
9 | from .dml import DML
10 | # from .kdsvd import KDSVD
11 | from .rkd import RKD
12 | from .ab import AB
13 | from .sp import SP
14 | from .sobolev import Sobolev
15 | from .bss import BSS, BSSAttacker
16 | from .cc import CC
17 | from .lwm import LwM
18 | from .irg import IRG
19 | from .vid import VID
20 | from .ofd import OFD
21 | from .afd import AFD
22 | from .crd import CRD
--------------------------------------------------------------------------------
/kd_losses/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/kd_losses/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/kd_losses/__pycache__/logits.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/kd_losses/__pycache__/logits.cpython-37.pyc
--------------------------------------------------------------------------------
/kd_losses/logits.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 |
9 | class Logits(nn.Module):
10 | '''
11 | Do Deep Nets Really Need to be Deep?
12 | http://papers.nips.cc/paper/5484-do-deep-nets-really-need-to-be-deep.pdf
13 | '''
14 | def __init__(self):
15 | super(Logits, self).__init__()
16 |
17 | def forward(self, out_s, out_t):
18 | loss = F.mse_loss(out_s, out_t)
19 |
20 | return loss
21 |
--------------------------------------------------------------------------------
/models/Alexnet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 | from torch.autograd import Variable
5 | class AlexNet(nn.Module):
6 | def __init__(self, num_calss= None):
7 | super(AlexNet, self).__init__()
8 | self.features=nn.Sequential(
9 | nn.Conv2d(1,64,kernel_size=(3,3),stride=1,padding=2),
10 |
11 | nn.ReLU(inplace=True),
12 | nn.BatchNorm2d(64),
13 | nn.MaxPool2d(kernel_size=2,stride=2),
14 | nn.Conv2d(64,192,kernel_size=3,padding=2),
15 | nn.ReLU(inplace=True),
16 | nn.BatchNorm2d(192),
17 | nn.MaxPool2d(kernel_size=2,stride=2),
18 | nn.Conv2d(192,384,kernel_size=3,padding=2),
19 | nn.ReLU(inplace=True),
20 | nn.BatchNorm2d(384),
21 | nn.Conv2d(384,256,kernel_size=3,padding=2),
22 | nn.ReLU(inplace=True),
23 | nn.Conv2d(256,256,kernel_size=3,padding=2),
24 | nn.ReLU(inplace=True),
25 | nn.BatchNorm2d(256),
26 | nn.MaxPool2d(kernel_size=2,stride=2),
27 | )
28 | self.avgpool=nn.AdaptiveAvgPool2d((6,6))
29 | self.classifier=nn.Sequential(
30 | nn.Dropout(),
31 | nn.Linear(256*6*6,500),
32 | nn.ReLU(inplace=True),
33 | nn.Dropout(),
34 | nn.Linear(500,100),
35 | nn.ReLU(inplace=True),
36 | nn.Linear(100,num_calss)
37 | )
38 | def forward(self,x):
39 | # x = x.unsqueeze(dim=1)
40 | x=self.features(x)
41 | x=self.avgpool(x)
42 | x=torch.flatten(x,1)
43 | x=self.classifier(x)
44 | return x
45 |
46 | class AlexNet_or(nn.Module):
47 | def __init__(self, dataset='128'):
48 | super(AlexNet_or, self).__init__()
49 | if dataset == '128':
50 | num_classes = 11
51 | elif dataset == '512':
52 | num_classes = 12
53 | elif dataset == '1024':
54 | num_classes = 24
55 | elif dataset == '3040':
56 | num_classes = 106
57 | self.features=nn.Sequential(
58 | nn.Conv2d(1,64,kernel_size=(2,3),stride=2,padding=2),
59 | nn.ReLU(inplace=True),
60 | nn.BatchNorm2d(64),
61 | nn.MaxPool2d(kernel_size=2,stride=2),
62 | nn.Conv2d(64,192,kernel_size=2,padding=2),
63 | nn.ReLU(inplace=True),
64 | nn.BatchNorm2d(192),
65 | nn.MaxPool2d(kernel_size=2,stride=2),
66 | nn.Conv2d(192,384,kernel_size=2,padding=1),
67 | nn.ReLU(inplace=True),
68 | nn.Conv2d(384,256,kernel_size=2,padding=1),
69 | nn.ReLU(inplace=True),
70 | nn.Conv2d(256,256,kernel_size=2,padding=1),
71 | nn.ReLU(inplace=True),
72 | nn.MaxPool2d(kernel_size=2,stride=2),
73 | )
74 | self.avgpool=nn.AdaptiveAvgPool2d((6,6))
75 | self.classifier=nn.Sequential(
76 | nn.Dropout(),
77 | nn.Linear(256*6*6,500),
78 | nn.ReLU(inplace=True),
79 | nn.Dropout(),
80 | nn.Linear(500,100),
81 | nn.ReLU(inplace=True),
82 | nn.Linear(100, num_classes)
83 | )
84 | def forward(self,x):
85 | # x = x.unsqueeze(dim=1)
86 | x=self.features(x)
87 | x=self.avgpool(x)
88 | x=torch.flatten(x,1)
89 | x=self.classifier(x)
90 | return x
91 |
92 | # from torchinfo import summary
93 | # model = AlexNet_or(dataset='3040').cuda()
94 | # summary(model, input_size=(128, 1, 2, 3040))
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/models/CNN1D.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 |
5 |
6 | # num_classes = 11
7 | # ResNet {{{
8 | class ResNet1D(nn.Module):
9 | def __init__(self, dataset='128'):
10 | super(ResNet1D, self).__init__()
11 | self.conv1 = ResidualStack(1, kernel_size=(2, 3),pool_size=(2, 2),first=True)
12 | self.conv2 = ResidualStack(32, kernel_size=3, pool_size=2)
13 | self.conv3 = ResidualStack(32, kernel_size=3, pool_size=2)
14 | self.conv4 = ResidualStack(32, kernel_size=3, pool_size=2)
15 | self.conv5 = ResidualStack(32, kernel_size=3, pool_size=2)
16 | self.conv6 = ResidualStack(32, kernel_size=3, pool_size=2)
17 | if dataset == '128':
18 | num_classes = 11
19 | self.dense = nn.Linear(64, 128)
20 | elif dataset == '512':
21 | num_classes = 12
22 | self.dense = nn.Linear(256, 128)
23 | elif dataset == '1024':
24 | num_classes = 24
25 | self.dense = nn.Linear(512, 128)
26 | elif dataset == '3040':
27 | num_classes = 106
28 | self.dense = nn.Linear(1504, 128)
29 | self.drop = nn.Dropout(p=0.3)
30 | self.classfier = nn.Linear(128, num_classes)
31 |
32 | def forward(self, x):
33 |
34 | x = self.conv1(x.unsqueeze(dim=1)).squeeze(dim=2)
35 | x = self.conv2(x)
36 | x = self.conv3(x)
37 | x = self.conv4(x)
38 | x = self.conv5(x)
39 | x = self.conv6(x).view(x.size(0),-1)
40 | fea = x
41 | feats = {}
42 | feats["feats"] = fea
43 | # print(fea.shape)
44 | x = self.classfier(self.drop(self.dense(x)))
45 | return x#, feats
46 |
47 |
48 | class ResidualStack(nn.Module):
49 | def __init__(self, in_channel, kernel_size, pool_size, first=False):
50 | super(ResidualStack, self).__init__()
51 | mid_channel = 32
52 | padding = 1
53 | if first:
54 | conv = nn.Conv2d
55 | pool = nn.MaxPool2d
56 | self.conv1 = conv(in_channel, mid_channel, kernel_size=1, padding=0, bias=False)
57 | self.conv2 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(1, padding), bias=False)
58 | self.conv3 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(0, padding), bias=False)
59 | self.conv4 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(1, padding), bias=False)
60 | self.conv5 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(0, padding), bias=False)
61 | self.pool = pool(kernel_size=pool_size, stride=pool_size)
62 | else:
63 | conv = nn.Conv1d
64 | pool = nn.MaxPool1d
65 | self.conv1 = conv(in_channel, mid_channel, kernel_size=1, padding=0, bias=False)
66 | self.conv2 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
67 | self.conv3 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
68 | self.conv4 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
69 | self.conv5 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
70 | self.pool = pool(kernel_size=pool_size, stride=pool_size)
71 | def forward(self, x):
72 | # residual 1
73 | x = self.conv1(x)
74 | shortcut = x
75 | x = self.conv2(x)
76 | x = F.relu(x)
77 | x = self.conv3(x)
78 | x += shortcut
79 | x = F.relu(x)
80 |
81 | # residual 2
82 | shortcut = x
83 | x = self.conv4(x)
84 | x = F.relu(x)
85 | x = self.conv5(x)
86 | x += shortcut
87 | x = F.relu(x)
88 | x = self.pool(x)
89 |
90 |
91 | return x
92 |
93 | # def resnet1d(**kwargs):
94 | # return ResNet1D(**kwargs)
95 |
96 |
97 | # data = torch.randn(10,2,128)
98 | # model = ResNet1D(dataset='128')
99 | # out,a = model(data)
100 | # print(out.shape)
101 | # print(a.shape,'@@@')
102 | # from torchsummary import summary
103 | # model = resnet1d().cuda()
104 | # summary(model, (2, 128))
105 |
106 | # from torchinfo import summary
107 | # model = resnet1d(dataset='128').cuda()
108 | # summary(model, input_size=(128, 2, 128))
109 | #
110 |
111 | # data = torch.randn(10,2,128)
112 | # model = ResNet1D()
113 | # out = model(data)
114 | # print(out.shape)
--------------------------------------------------------------------------------
/models/CNN2D.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 |
5 | # num_classes = 11
6 | # ResNet {{{
7 | class CNN2D(nn.Module):
8 | def __init__(self, dataset='128'):
9 | super(CNN2D, self).__init__()
10 | self.conv1 = nn.Conv2d(1, 256, kernel_size=(1, 3), padding=(0, 1), bias=False)
11 | self.drop1 = nn.Dropout(p=0.5)
12 | self.conv2 = nn.Conv2d(256, 80, kernel_size=(2, 3), bias=False)
13 | self.drop2 = nn.Dropout(p=0.5)
14 | self.dataset = dataset
15 | # if num_classes == 11:
16 | if dataset == '128':
17 | num_classes = 11
18 | self.fc_pool = nn.Linear(126, 128)
19 | self.dense = nn.Linear(10240, 256)
20 | elif dataset == '512':
21 | self.fc_pool = nn.Linear(510, 256)
22 | num_classes = 12
23 | self.dense = nn.Linear(20480, 256)
24 | elif dataset == '1024':
25 | self.fc_pool = nn.Linear(1022, 256)
26 | num_classes = 24
27 | self.dense = nn.Linear(20480, 256)
28 | elif dataset == '3040':
29 | num_classes = 106
30 | self.fc_pool = nn.Linear(3038, 128)
31 | self.dense = nn.Linear(10240, 256)
32 | # self.dense = nn.Linear(10080, 256)
33 | self.drop3 = nn.Dropout(p=0.5)
34 | self.classfier = nn.Linear(256, num_classes)
35 |
36 |
37 | def forward(self, x):
38 | x = x.unsqueeze(dim=1)
39 | x = F.relu(self.conv1(x))
40 | x = self.drop1(x)
41 | x = F.relu(self.conv2(x)).squeeze(dim=2)
42 | x = self.fc_pool(x)
43 | x = self.drop2(x).view(x.size(0), -1)
44 | x = F.relu(self.dense(x))
45 |
46 | x = self.drop3(x)
47 | x = self.classfier(x)
48 | return x
49 |
50 |
51 |
52 |
53 | # def cnn2d(**kwargs):
54 | # return CNN2D(**kwargs)
55 | # data = torch.randn(10,2,512)
56 | # model = cnn2d()
57 | # out = model(data)
58 | # print(out.shape)
59 | # from torchsummary import summary
60 | # model = cnn2d().cuda()
61 | # summary(model, (2, 128))
62 |
63 | # from torchinfo import summary
64 | # model = CNN2D(dataset='512').cuda()
65 | # summary(model, input_size=(128, 2, 512))
66 | #
67 |
--------------------------------------------------------------------------------
/models/LeNet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | class LeNet(nn.Module):
4 | def __init__(self,num_class=None):
5 | super(LeNet, self).__init__()
6 | self.conv1=nn.Conv2d(1,64,3,padding=2)
7 | self.bath1 = nn.BatchNorm2d(64)
8 | self.pool1=nn.MaxPool2d(2,2)
9 | self.conv2=nn.Conv2d(64,128,3,padding=2)
10 | self.bath2=nn.BatchNorm2d(128)
11 | self.pool2=nn.MaxPool2d(2,2)
12 | self.drop=nn.Dropout(p=0.5)
13 | # self.conv3 = nn.Conv2d(16, 32, 3)
14 | # self.bath3 = nn.BatchNorm2d(32)
15 | # self.pool3 = nn.MaxPool2d(2, 2)
16 | self.fc3=nn.Linear(512,120)
17 | self.fc4=nn.Linear(120,84)
18 | self.fc5=nn.Linear(84,num_class)
19 |
20 | def forward(self,x):
21 | # x = x.unsqueeze(dim=1)
22 | x=self.pool1(self.bath1(torch.relu(self.conv1(x))))
23 | x=self.pool2(self.bath2(torch.relu(self.conv2(x))))
24 | # x =self.pool3(self.bath3(torch.relu(self.conv3(x))))
25 | # print(x.size)
26 | x=x.view(-1,self.num_flat_feature(x))
27 |
28 | x=torch.relu(self.fc3(x))
29 | x = self.drop(x)
30 | x=torch.relu(self.fc4(x))
31 | x=self.fc5(x)
32 | return x
33 | def num_flat_feature(self,x):
34 | size=x.size()[1:]
35 | num_feature=1
36 |
37 | for s in size:
38 | num_feature*=s
39 | print("num_feature",num_feature)
40 | return num_feature
41 |
42 | class LeNet_or(nn.Module):
43 | def __init__(self, dataset='128'):
44 | super(LeNet_or, self).__init__()
45 | self.conv1=nn.Conv2d(1,6,kernel_size=(2,3),padding=2)
46 | self.pool1=nn.MaxPool2d(2,2)
47 | self.drop=nn.Dropout(0.5)
48 | self.conv2=nn.Conv2d(6,16,kernel_size=(2,3),padding=2)
49 | self.pool2=nn.MaxPool2d(2,2)
50 | if dataset == '128':
51 | num_classes = 11
52 | self.fc_pool = nn.Linear(33, 33) # 修改部分,否则128以上长度数据集训练效果较差
53 | self.fc3 = nn.Linear(1056, 500)
54 | elif dataset == '512':
55 | num_classes = 12
56 | self.fc_pool = nn.Linear(129, 128) # 修改部分,否则128以上长度数据集训练效果较差
57 | self.fc3 = nn.Linear(4096, 500)
58 | elif dataset == '1024':
59 | num_classes = 24
60 | self.fc_pool = nn.Linear(257, 128) # 修改部分,否则128以上长度数据集训练效果较差
61 | self.fc3 = nn.Linear(4096, 500)
62 | elif dataset == '3040':
63 | num_classes = 106
64 | self.fc_pool = nn.Linear(761, 128) # 修改部分,否则128以上长度数据集训练效果较差
65 | self.fc3 = nn.Linear(4096, 500)
66 | self.fc4=nn.Linear(500,84)
67 | self.fc5=nn.Linear(84, num_classes)
68 |
69 | def forward(self,x):
70 | # x = x.unsqueeze(dim=1)
71 | x=self.pool1(torch.relu(self.conv1(x)))
72 | x=self.pool2(torch.relu(self.conv2(x)))
73 | # print(x.size)
74 | x=self.fc_pool(x)
75 | x=x.view(-1,self.num_flat_feature(x))
76 | # print(x.shape)
77 | x=torch.relu(self.fc3(x))
78 | x=self.drop(x)
79 | x=torch.relu(self.fc4(x))
80 | x=self.fc5(x)
81 | return x
82 | def num_flat_feature(self,x):
83 | size=x.size()[1:]
84 | num_feature=1
85 | for s in size:
86 | num_feature*=s
87 | # print("num_feature",num_feature)
88 | return num_feature
89 |
90 | # from torchinfo import summary
91 | # model = LeNet_or(dataset='1024').cuda()
92 | # summary(model, input_size=(128, 1, 2, 1024))
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | from models.lstm import lstm2
2 | from models.mcldnn import MCLDNN
3 |
4 |
5 | __all__ = [
6 | "lstm2",
7 | "MCLDNN"
8 |
9 | ]
10 |
--------------------------------------------------------------------------------
/models/__pycache__/Alexnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/Alexnet.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/CNN1D.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/CNN1D.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/CNN2D.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/CNN2D.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/LeNet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/LeNet.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/gru.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/gru.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/lstm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/lstm.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/mcldnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/mcldnn.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/network.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/network.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/vgg16.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/models/__pycache__/vgg16.cpython-37.pyc
--------------------------------------------------------------------------------
/models/gru.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | import torch
4 | import torch.nn.functional as F
5 |
6 | # num_classes = 11
7 | class gru2(nn.Module):
8 | def __init__(self, dataset='128'):
9 | super(gru2, self).__init__()
10 |
11 | self.gru1 = nn.GRU(
12 | input_size=2,
13 | hidden_size=128,
14 | num_layers=1,
15 | bias=False,
16 | batch_first=True
17 | )
18 | self.gru2 = nn.GRU(
19 | input_size=128,
20 | hidden_size=64,
21 | num_layers=1,
22 | bias=False,
23 | batch_first=True
24 | )
25 |
26 | if dataset == '128':
27 | num_classes = 11
28 | self.fc1 = nn.Linear(128*64, 64)
29 | self.fc2 = nn.Linear(64, num_classes)
30 | elif dataset == '512':
31 | num_classes = 12
32 | self.fc1 = nn.Linear(512*64, 64)
33 | self.fc2 = nn.Linear(64, num_classes)
34 | elif dataset == '1024':
35 | num_classes = 24
36 | self.fc1 = nn.Linear(1024*64, 64)
37 | self.fc2 = nn.Linear(64, num_classes)
38 | elif dataset == '3040':
39 | num_classes = 106
40 | self.fc1 = nn.Linear(3040*64, 64)
41 | self.fc2 = nn.Linear(64, num_classes)
42 |
43 |
44 | def forward(self, x):
45 | x, _ = self.gru1(x.transpose(2,1))
46 | x = F.relu(x)
47 | x, _ = self.gru2(x)
48 | x = torch.reshape(x, [x.shape[0],-1])
49 | x = self.fc1(x)
50 | x = self.fc2(x)
51 |
52 | return x
53 |
54 | # from torchinfo import summary
55 | # model = gru2(dataset='128').cuda()
56 | # summary(model, input_size=(128, 2, 128))
57 |
--------------------------------------------------------------------------------
/models/lstm.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | import torch
4 |
5 | # num_classes = 11
6 | class lstm2(nn.Module):
7 | def __init__(self, dataset='128'):
8 | super(lstm2, self).__init__()
9 |
10 | self.lstm1 = nn.LSTM(
11 | input_size=2,
12 | hidden_size=128,
13 | num_layers=1,
14 | bias=False,
15 | batch_first=True
16 | )
17 | self.lstm2 = nn.LSTM(
18 | input_size=128,
19 | hidden_size=64,
20 | num_layers=1,
21 | bias=False,
22 | batch_first=True
23 | )
24 |
25 | if dataset == '128':
26 | num_classes = 11
27 | self.fc = nn.Linear(128*64, num_classes)
28 | # self.fc = nn.Linear(128*128, num_classes)
29 | elif dataset == '512':
30 | num_classes = 12
31 | self.fc = nn.Linear(512*64, num_classes)
32 | elif dataset == '1024':
33 | num_classes = 24
34 | self.fc = nn.Linear(1024*64, num_classes)
35 | elif dataset == '3040':
36 | num_classes = 106
37 | self.fc = nn.Linear(3040*64, num_classes)
38 |
39 | # if num_classes == 10:
40 | # self.fc = nn.Linear(128*64, num_classes)
41 | # if num_classes == 11:
42 | # self.fc = nn.Linear(128*64, num_classes)
43 | # if num_classes == 12:
44 | # self.fc = nn.Linear(512*64, num_classes)
45 |
46 | def forward(self, x):
47 |
48 | x, _ = self.lstm1(x.transpose(2,1))
49 | # print('1:',x.shape)
50 | x, _ = self.lstm2(x)
51 | # print('2:', x.shape)
52 | x = torch.reshape(x, [x.shape[0],-1])
53 | # print(x.shape)
54 | x = self.fc(x)
55 |
56 | return x
57 |
58 | # data = torch.randn(20,2,128)
59 | # model = lstm2()
60 | # print(model(data).shape)
61 |
62 | # from torchinfo import summary
63 | # model = lstm2(dataset='3040').cuda()
64 | # summary(model, input_size=(128, 2, 3040))
65 |
66 |
--------------------------------------------------------------------------------
/models/mcldnn.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 | from torch.nn import Sequential
5 |
6 | # num_classes = 11
7 | # BasicBlock {{{
8 | class MCLDNN(nn.Module):
9 |
10 | def __init__(self, dataset='128'):
11 | super(MCLDNN, self).__init__()
12 | if dataset == '128':
13 | num_classes = 11
14 | elif dataset == '512':
15 | num_classes = 12
16 | elif dataset == '1024':
17 | num_classes = 24
18 | elif dataset == '3040':
19 | num_classes = 106
20 | self.conv1 = nn.Conv1d(
21 | in_channels=2,
22 | out_channels=50,
23 | kernel_size=7,
24 | bias=False,
25 | padding=3,
26 | )
27 | self.conv2 = Sequential(
28 | nn.Conv1d(
29 | in_channels=2,
30 | out_channels=100,
31 | kernel_size=7,
32 | bias=False,
33 | padding=3,
34 | groups=2
35 | ),
36 | nn.ReLU(True),
37 | nn.Conv1d(
38 | in_channels=100,
39 | out_channels=50,
40 | kernel_size=7,
41 | bias=False,
42 | padding=3,
43 | ))
44 | self.conv3 = nn.Conv1d(
45 | in_channels=100,
46 | out_channels=100,
47 | kernel_size=5,
48 | bias=False
49 | )
50 | self.lstm1 = nn.LSTM(
51 | input_size=100,
52 | hidden_size=128,
53 | num_layers=1,
54 | bias=False,
55 | batch_first=True###########################
56 | )
57 | self.lstm2 = nn.LSTM(
58 | input_size=128,
59 | hidden_size=128,
60 | num_layers=1,
61 | bias=False,
62 | batch_first=True
63 | )
64 | self.fc = Sequential(
65 | nn.Linear(128, 128),
66 | nn.SELU(True),
67 | nn.Dropout(0.5),
68 | nn.Linear(128, 128),
69 | nn.SELU(True),
70 | nn.Dropout(0.5),
71 | nn.Linear(128, num_classes)
72 | )
73 | def forward(self, x):
74 | assert len(x.shape)==3 and x.shape[1]==2
75 | x1 = self.conv1(x)
76 | x2 = self.conv2(x)
77 | x3 = F.relu(torch.cat([x1,x2],dim=1))
78 | x3 = F.relu(self.conv3(x3))
79 | x3, _ = self.lstm1(x3.transpose(2,1))
80 | _, (x3, __) = self.lstm2(x3)
81 | x3 = self.fc(x3.squeeze())
82 |
83 | return x3
84 |
85 |
86 | # model = MCLDNN(11)
87 | # data = torch.randn(10,2,512)
88 | # out = model(data)
89 | # print(out.shape)
90 |
91 | # from torchinfo import summary
92 | # model = MCLDNN(11).cuda()
93 | # summary(model, input_size=(128, 2, 3040))
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/models/network.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | from args import args
4 | from models.lstm import lstm2
5 | from models.mcldnn import MCLDNN
6 | from models.Alexnet import AlexNet_or
7 | from models.CNN1D import ResNet1D
8 | from models.CNN2D import CNN2D
9 | from models.gru import gru2
10 | from models.LeNet import LeNet_or
11 | from models.vgg16 import VGG16_or
12 | from models.mobilenet import mobilenet
13 | from models.resnet import resnet
14 | from models.RRR import resnet as r8
15 | from models.vgg import vgg11_bn
16 |
17 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
18 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
19 |
20 | def define_model(name):
21 | if name == 'LSTM':
22 | net = lstm2(dataset=args.dataset).to(device)
23 | elif name == 'MCLDNN':
24 | net = MCLDNN(dataset=args.dataset).to(device)
25 | elif name == 'Alexnet':
26 | net = AlexNet_or(dataset=args.dataset).to(device)
27 | elif name == 'CNN1D':
28 | net = ResNet1D(dataset=args.dataset).to(device)
29 | elif name == 'CNN2D':
30 | net = CNN2D(dataset=args.dataset).to(device)
31 | elif name == 'Lenet':
32 | net = LeNet_or(dataset=args.dataset).to(device)
33 | elif name == 'Vgg16':
34 | net = VGG16_or(dataset=args.dataset).to(device)
35 | elif name == 'GRU':
36 | net = gru2(dataset=args.dataset).to(device)
37 | elif name == 'mobilenet':
38 | net = mobilenet().to(device)
39 | # elif name == 'resnet8':
40 | # net = resnet(depth=8).to(device)
41 | elif name == 'r8conv1':
42 | net = r8(depth=8).to(device)
43 | elif name == 'vgg11_bn':
44 | net = vgg11_bn().to(device)
45 | else:
46 | raise Exception('model name does not exist.')
47 | # if True:
48 | # # net = torch.nn.DataParallel(net).cuda()
49 | # net = net.to(device)
50 | # else:
51 | # net = torch.nn.DataParallel(net)
52 |
53 | return net
--------------------------------------------------------------------------------
/models/resnet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import math
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 | from functools import partial
8 | from torch.autograd import Variable
9 | import os
10 | from args import args
11 | os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
12 | device = torch.device(f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
13 |
14 | __all__ = ['resnet']
15 |
16 | def conv3x3(in_planes, out_planes, stride=1):
17 | "3x3 convolution with padding"
18 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
19 | padding=1, bias=False)
20 |
21 |
22 | class BasicBlock(nn.Module):
23 | expansion = 1
24 |
25 | def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
26 | # cfg should be a number in this case
27 | super(BasicBlock, self).__init__()
28 | self.conv1 = conv3x3(inplanes, cfg, stride)
29 | self.bn1 = nn.BatchNorm2d(cfg)
30 | self.relu = nn.ReLU(inplace=True)
31 | self.conv2 = conv3x3(cfg, planes)
32 | self.bn2 = nn.BatchNorm2d(planes)
33 | self.downsample = downsample
34 | self.stride = stride
35 |
36 | def forward(self, x):
37 | residual = x
38 |
39 | out = self.conv1(x)
40 | out = self.bn1(out)
41 | out = self.relu(out)
42 |
43 | out = self.conv2(out)
44 | out = self.bn2(out)
45 |
46 | if self.downsample is not None:
47 | residual = self.downsample(x)
48 |
49 | out += residual
50 | out = self.relu(out)
51 |
52 | return out
53 |
54 | def downsample_basic_block(x, planes):
55 | x = F.adaptive_avg_pool2d(x, (1, 1))
56 | #x = nn.AvgPool2d(2,2)(x)
57 | zero_pads = torch.Tensor(
58 | x.size(0), planes - x.size(1), x.size(2), x.size(3)).zero_()
59 | if isinstance(x.data, torch.cuda.FloatTensor):
60 | # zero_pads = zero_pads.cuda()
61 | zero_pads = zero_pads.to(device)
62 |
63 | out = Variable(torch.cat([x.data, zero_pads], dim=1))
64 |
65 | return out
66 |
67 | class ResNet(nn.Module):
68 |
69 | def __init__(self, depth, dataset='cifar10', cfg=None):
70 | super(ResNet, self).__init__()
71 | # Model type specifies number of layers for CIFAR-10 model
72 | assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
73 | n = (depth - 2) // 6
74 |
75 | block = BasicBlock
76 | if cfg == None:
77 | cfg = [[16]*n, [32]*n, [64]*n]
78 | cfg = [item for sub_list in cfg for item in sub_list]
79 |
80 | self.cfg = cfg
81 |
82 | self.inplanes = 16
83 | #self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,bias=False)
84 | self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1,bias=False)
85 | self.bn1 = nn.BatchNorm2d(16)
86 | self.relu = nn.ReLU(inplace=True)
87 | self.layer1 = self._make_layer(block, 16, n, cfg=cfg[0:n])
88 | self.layer2 = self._make_layer(block, 32, n, cfg=cfg[n:2*n], stride=2)
89 | self.layer3 = self._make_layer(block, 64, n, cfg=cfg[2*n:3*n], stride=2)
90 | self.avgpool = nn.AvgPool2d(8)
91 | if args.dataset == '128':
92 | num_classes = 11
93 | elif args.dataset == '1024':
94 | num_classes = 24
95 | self.fc = nn.Linear(64 * block.expansion, num_classes)
96 |
97 | for m in self.modules():
98 | if isinstance(m, nn.Conv2d):
99 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
100 | m.weight.data.normal_(0, math.sqrt(2. / n))
101 | elif isinstance(m, nn.BatchNorm2d):
102 | m.weight.data.fill_(1)
103 | m.bias.data.zero_()
104 |
105 | def _make_layer(self, block, planes, blocks, cfg, stride=1):
106 | downsample = None
107 | if stride != 1 or self.inplanes != planes * block.expansion:
108 | downsample = partial(downsample_basic_block, planes=planes*block.expansion)
109 |
110 | layers = []
111 | layers.append(block(self.inplanes, planes, cfg[0], stride, downsample))
112 | self.inplanes = planes * block.expansion
113 | for i in range(1, blocks):
114 | layers.append(block(self.inplanes, planes, cfg[i]))
115 |
116 | return nn.Sequential(*layers)
117 |
118 | def forward(self, x):
119 | x = self.conv1(x)
120 | x = self.bn1(x)
121 | x = self.relu(x) # 32x32
122 |
123 | x = self.layer1(x) # 32x32
124 | x = self.layer2(x) # 16x16
125 | x = self.layer3(x) # 8x8
126 | x = F.adaptive_avg_pool2d(x, (1, 1))
127 | #x = self.avgpool(x)
128 | x = x.view(x.size(0), -1)
129 | x = self.fc(x)
130 |
131 | return x
132 |
133 | def resnet(**kwargs):
134 | """
135 | Constructs a ResNet model.
136 | """
137 | return ResNet(**kwargs)
138 |
139 | # if __name__ == '__main__':
140 | # net = resnet(depth=56)
141 | # x=Variable(torch.FloatTensor(16, 3, 32, 32))
142 | # y = net(x)
143 | # print(y.data.shape)
144 |
145 | # data = torch.randn(10,1,2,128)
146 | # model = resnet(depth=8)
147 | # out = model(data)
148 | # print(out.shape)
--------------------------------------------------------------------------------
/models/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for model in CNN1D CNN2D LSTM GRU MCLDNN Lenet Vgg16 Alexnet;
3 | do
4 | for dataset in 128 1024 3040;
5 | do
6 | CUDA_VISIBLE_DEVICES=3 python train.py --model $model --dataset $dataset --num_workers 8 --epochs 1;
7 | done
8 | done
9 |
10 |
11 |
--------------------------------------------------------------------------------
/models/train_par.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for dataset in 1024;
3 | do
4 | CUDA_VISIBLE_DEVICES=2 python train.py --model CNN1D --dataset $dataset --num_workers 8 --epochs 50 &
5 | CUDA_VISIBLE_DEVICES=3 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50 &
6 | CUDA_VISIBLE_DEVICES=4 python train.py --model LSTM --dataset $dataset --num_workers 8 --epochs 50 &
7 | CUDA_VISIBLE_DEVICES=5 python train.py --model GRU --dataset $dataset --num_workers 8 --epochs 50 ;
8 | CUDA_VISIBLE_DEVICES=2 python train.py --model MCLDNN --dataset $dataset --num_workers 8 --epochs 50 &
9 | CUDA_VISIBLE_DEVICES=3 python train.py --model Lenet --dataset $dataset --num_workers 8 --epochs 50 &
10 | CUDA_VISIBLE_DEVICES=4 python train.py --model Vgg16 --dataset $dataset --num_workers 8 --epochs 50 &
11 | CUDA_VISIBLE_DEVICES=5 python train.py --model Alexnet --dataset $dataset --num_workers 8 --epochs 50 ;
12 | done
13 |
14 | for dataset in 3040;
15 | do
16 | CUDA_VISIBLE_DEVICES=2 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50 &
17 | CUDA_VISIBLE_DEVICES=4 python train.py --model Lenet --dataset $dataset --num_workers 8 --epochs 50 ;
18 | done
19 |
20 | for dataset in 512;
21 | do
22 | CUDA_VISIBLE_DEVICES=2 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50;
23 | done
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/models/vgg.py:
--------------------------------------------------------------------------------
1 | """vgg in pytorch
2 |
3 |
4 | [1] Karen Simonyan, Andrew Zisserman
5 |
6 | Very Deep Convolutional Networks for Large-Scale Image Recognition.
7 | https://arxiv.org/abs/1409.1556v6
8 | """
9 | '''VGG11/13/16/19 in Pytorch.'''
10 |
11 | import torch
12 | import torch.nn as nn
13 |
14 | cfg = {
15 | 'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
16 | 'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
17 | 'D' : [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 'M', 128, 128, 128, 'M', 128, 128, 128, 'M'],
18 | 'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
19 | }
20 |
21 | class VGG(nn.Module):
22 |
23 | def __init__(self, features, num_class=24):
24 | super().__init__()
25 | self.features = features
26 |
27 | self.classifier = nn.Sequential(
28 | nn.Linear(512, 2048),
29 | nn.ReLU(inplace=True),
30 | nn.Dropout(),
31 | nn.Linear(2048, 2048),
32 | nn.ReLU(inplace=True),
33 | nn.Dropout(),
34 | nn.Linear(2048, num_class)
35 | )
36 |
37 | self.pooling = nn.Sequential(
38 | nn.AdaptiveAvgPool1d(1),
39 | nn.Flatten()
40 | )
41 |
42 | def forward(self, x):
43 | output = self.features(x)
44 | # output = output.view(output.size()[0], -1)
45 | output = self.pooling(output)
46 | output = self.classifier(output)
47 |
48 | return output
49 |
50 | def make_layers(cfg, batch_norm=False):
51 | layers = []
52 |
53 | input_channel = 2
54 | for l in cfg:
55 | if l == 'M':
56 | layers += [nn.MaxPool1d(kernel_size=2, stride=2)]
57 | continue
58 | # print(l)
59 | layers += [nn.Conv1d(input_channel, l, kernel_size=3, padding=1)]
60 |
61 | if batch_norm:
62 | layers += [nn.BatchNorm1d(l)]
63 |
64 | layers += [nn.ReLU(inplace=True)]
65 | input_channel = l
66 |
67 | return nn.Sequential(*layers)
68 |
69 | def vgg11_bn():
70 | return VGG(make_layers(cfg['A'], batch_norm=True))
71 |
72 | def vgg13_bn():
73 | return VGG(make_layers(cfg['B'], batch_norm=True))
74 |
75 | def vgg16_bn():
76 | return VGG(make_layers(cfg['D'], batch_norm=True))
77 |
78 | def vgg19_bn():
79 | return VGG(make_layers(cfg['E'], batch_norm=True))
80 |
81 | # from torchsummary import summary
82 | # summary(vgg16_bn().cuda(), (2, 1024))
83 |
--------------------------------------------------------------------------------
/models/vgg16.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 | from torch.autograd import Variable
5 |
6 | def conv_layer(chann_in, chann_out, k_size, p_size):
7 | layer = nn.Sequential(
8 | nn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size),
9 | nn.BatchNorm2d(chann_out),
10 | nn.ReLU()
11 | )
12 | return layer
13 |
14 | def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s):
15 | layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ]
16 | layers += [ nn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)]
17 | return nn.Sequential(*layers)
18 |
19 | def vgg_fc_layer(size_in, size_out):
20 | layer = nn.Sequential(
21 | nn.Linear(size_in, size_out),
22 | nn.BatchNorm1d(size_out),
23 | nn.ReLU()
24 | )
25 | return layer
26 |
27 |
28 | class VGG16_or(nn.Module):
29 | def __init__(self, dataset='128'):
30 | super(VGG16_or, self).__init__()
31 | # Conv blocks (BatchNorm + ReLU activation added in each block)
32 | self.layer1 = vgg_conv_block([1,64], [64,64], [3,3], [3,3], 2, 2)
33 | self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [3,3], 2, 2)
34 | self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
35 | self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
36 | # self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
37 |
38 | # FC layers
39 | if dataset == '128':
40 | num_classes = 11
41 | self.layer5 = vgg_fc_layer(4608, 256)
42 | elif dataset == '512':
43 | num_classes = 12
44 | self.layer5 = vgg_fc_layer(16896, 256)
45 | elif dataset == '1024':
46 | num_classes = 24
47 | self.layer5 = vgg_fc_layer(33280, 256)
48 | elif dataset == '3040':
49 | num_classes = 106
50 | self.layer5 = vgg_fc_layer(97792, 256)
51 | self.layer6 = vgg_fc_layer(256, 128)
52 |
53 | # Final layer
54 | self.layer7 = nn.Linear(128, num_classes)
55 |
56 | def forward(self, x):
57 |
58 | out = self.layer1(x)
59 | out = self.layer2(out)
60 | out = self.layer3(out)
61 | out = self.layer4(out)
62 |
63 | out = out.view(-1, self.num_flat_feature(out))
64 | # print(out.shape)
65 | out = self.layer5(out)
66 | out = self.layer6(out)
67 | out = self.layer7(out)
68 |
69 | return out
70 |
71 | def num_flat_feature(self,x):
72 | size=x.size()[1:]
73 | num_feature=1
74 | for s in size:
75 | num_feature*=s
76 | # print("num_feature",num_feature)
77 | return num_feature
78 |
79 |
80 |
81 |
82 |
83 | class VGG16(nn.Module):
84 | def __init__(self, n_classes=11):
85 | super(VGG16, self).__init__()
86 |
87 | # Conv blocks (BatchNorm + ReLU activation added in each block)
88 | self.layer1 = vgg_conv_block([1,64], [64,64], [3,3], [3,3], 2, 2)
89 | self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [3,3], 2, 2)
90 | self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
91 | self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
92 | self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
93 |
94 | # FC layers
95 | self.layer6 = vgg_fc_layer(512, 256)
96 | self.layer7 = vgg_fc_layer(256, 128)
97 |
98 | # Final layer
99 | self.layer8 = nn.Linear(128, n_classes)
100 |
101 | def forward(self, x):
102 |
103 | out = self.layer1(x)
104 | out = self.layer2(out)
105 | out = self.layer3(out)
106 | out = self.layer4(out)
107 | vgg16_features = self.layer5(out)
108 | out = vgg16_features.view(-1, self.num_flat_feature(vgg16_features))
109 | out = self.layer6(out)
110 | out = self.layer7(out)
111 | out = self.layer8(out)
112 |
113 | return out
114 |
115 |
116 | # from torchinfo import summary
117 | # model = VGG16_or(dataset='3040').cuda()
118 | # summary(model, input_size=(128, 1, 2, 3040))
--------------------------------------------------------------------------------
/order/ParaTestMob.sh:
--------------------------------------------------------------------------------
1 | #00.010.01 0.5 1
2 | cd ..
3 | for K in 0;
4 | do
5 | python AMD.py \
6 | --save_root "./results/para/0.1/mobilenet128" \
7 | --t1_model "" \
8 | --t2_model "" \
9 | --s_init "/base-mobilenet128/initial_rmobilenet.pth.tar" \
10 | --dataset 128 --t1_name MCLDNN --t2_name Vgg16 --s_name mobilenet \
11 | --kd_mode logits --lambda_kd1 0.5 --lambda_kd2 1.2 --lambda_kd3 6 \
12 | --epsilon 0.06 --step 5 --step_size 0.03 \
13 | --note 0.5-1.2-6\
14 | --gpu_index 4 &
15 |
16 |
17 |
18 |
19 | done
--------------------------------------------------------------------------------
/order/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/order/__init__.py
--------------------------------------------------------------------------------
/order/def.sh:
--------------------------------------------------------------------------------
1 | for ((i=0;i<1;i++))
2 | do
3 | cd ../Defenses
4 | # python PAT_Test.py --dataset=128 --eps=0.031 --step_num=40 --step_size=0.01 --model Vgg16 --name Vgg16-0.031-40-0.01-e100 --gpu_index $1
5 |
6 | # python PAT_Test.py --dataset=128 --eps=0.062 --step_num=40 --step_size=0.007843 --model CNN1D --name alldb-CNN1D-0.062-40-0.007 --gpu_index $1
7 | # python PAT_Test.py --dataset=128 --eps=0.062 --step_num=40 --step_size=0.007843 --model LSTM --name alldb-LSTM-0.062-40-0.007 --gpu_index $1
8 |
9 |
10 | done
--------------------------------------------------------------------------------
/order/signaldB.sh:
--------------------------------------------------------------------------------
1 | location=(
2 | "/results/para/0.1/mobilenet128/1-1-1/checkpoint.pth_200.tar"
3 | ""
4 |
5 | )
6 |
7 | for modelL in ${location[@]}
8 | do
9 | for ((i=-4;i<19;i=i+2))
10 | do
11 | j=$i
12 | cd ../CleanDatasets
13 |
14 | python CandidatesSelection.py --dataset=128 --number=1100 --gpu_index $1 --model $2 --db $j --note $modelL --location $modelL
15 | cd ../Attacks
16 | # pgd
17 | python PGD_Generation.py --dataset=128 --epsilon=0.15 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2 --db $j --note $modelL --location $modelL
18 | # UMIfgsm
19 | # python UMIFGSM_Generation.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --db $j --note $modelL --location $modelL
20 | # AA
21 | # python AutoAttack.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --location $modelL --note $modelL --db $j
22 | # df
23 | # python DeepFool_Generation.py --dataset=128 --gpu_index $1 --model CNN1D --location $modelL --note $modelL --db $j
24 |
25 | done
26 |
27 | done
--------------------------------------------------------------------------------
/order/signaldBr.sh:
--------------------------------------------------------------------------------
1 | location=(
2 | #"/home/zjut/public/signal/wzw/KD/results/newMV/0.1/para/r8conv1/1-0.01-1/checkpoint.pth_200.tar"
3 | #"/home/zjut/public/signal/wzw/KD/DefenseEnhancedModels/PAT/128_r8conv1only_eps0.06_batch128/model_best.pth_epoch199.tar"
4 | #"/home/zjut/public/signal/wzw/KD/DefenseEnhancedModels/DD/128_r8conv1_temp30.0/checkpoint.pth_199.tar"
5 | #"/home/zjut/public/signal/wzw/KD/raw_model_training/result/model/128_r8conv1_best_lr=0.001.pth"
6 | #"/home/zjut/public/signal/wzw/KD/DefenseEnhancedModels/TRADESr8conv1128/r8conv1-epoch199.pt"
7 | "/home/zjut/public/signal/wzw/KD/results/para/0.1/mobilenet128/1-1-1/checkpoint.pth_200.tar"
8 | )
9 |
10 | for modelL in ${location[@]}
11 | do
12 | for ((i=-4;i<19;i=i+2))
13 | do
14 | j=$i
15 | cd ../CleanDatasets
16 |
17 | python CandidatesSelection.py --dataset=128 --number=1100 --gpu_index $1 --model $2 --db $j --note $modelL --location $modelL
18 | cd ../Attacks
19 | # pgd
20 | # python PGD_Generation.py --dataset=128 --epsilon=0.15 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2 --db $j --note $modelL
21 | # UMIfgsm
22 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --db $j --note $modelL --location $modelL
23 | # AA
24 | python AutoAttack.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --location $modelL --note $modelL --db $j
25 | # df
26 | # python DeepFool_Generation.py --dataset=128 --gpu_index $1 --model CNN1D --location $modelL --note $modelL --db $j
27 |
28 | done
29 |
30 | done
--------------------------------------------------------------------------------
/order/testattack.sh:
--------------------------------------------------------------------------------
1 | for ((i=0;i<1;i++))
2 | #for ((i=-4;i<19;i=i+2))
3 | do
4 | j=$i
5 | cd ../CleanDatasets
6 | # python CandidatesSelection.py --dataset=128 --number=22000 --gpu_index $1 --model $2
7 |
8 | python CandidatesSelection.py --dataset=128 --number=19000 --gpu_index $1 --model $2 # 11
9 | # python CandidatesSelection.py --dataset=128 --number=1100 --gpu_index $1 --model $2 --db $j
10 | cd ../Attacks
11 | # python PGD_Generation.py --dataset=128 --epsilon=0.15 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2 --db $j
12 | # pgd
13 | python PGD_Generation.py --dataset=128 --epsilon=0.02 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
14 | python PGD_Generation.py --dataset=128 --epsilon=0.04 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
15 | python PGD_Generation.py --dataset=128 --epsilon=0.06 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
16 | python PGD_Generation.py --dataset=128 --epsilon=0.08 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
17 | python PGD_Generation.py --dataset=128 --epsilon=0.10 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
18 | python PGD_Generation.py --dataset=128 --epsilon=0.12 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
19 | python PGD_Generation.py --dataset=128 --epsilon=0.14 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
20 | python PGD_Generation.py --dataset=128 --epsilon=0.16 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
21 | python PGD_Generation.py --dataset=128 --epsilon=0.18 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
22 | python PGD_Generation.py --dataset=128 --epsilon=0.20 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
23 | python PGD_Generation.py --dataset=128 --epsilon=0.22 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
24 | python PGD_Generation.py --dataset=128 --epsilon=0.24 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
25 | python PGD_Generation.py --dataset=128 --epsilon=0.26 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
26 | python PGD_Generation.py --dataset=128 --epsilon=0.28 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
27 | python PGD_Generation.py --dataset=128 --epsilon=0.30 --epsilon_iter=0.03 --num_steps 15 --gpu_index $1 --model $2
28 |
29 |
30 |
31 | # python FGSM_Generation.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --db $j
32 | python FGSM_Generation.py --dataset=128 --epsilon=0.02 --gpu_index $1 --model $2
33 | python FGSM_Generation.py --dataset=128 --epsilon=0.04 --gpu_index $1 --model $2
34 | python FGSM_Generation.py --dataset=128 --epsilon=0.06 --gpu_index $1 --model $2
35 | python FGSM_Generation.py --dataset=128 --epsilon=0.08 --gpu_index $1 --model $2
36 | python FGSM_Generation.py --dataset=128 --epsilon=0.10 --gpu_index $1 --model $2
37 | python FGSM_Generation.py --dataset=128 --epsilon=0.12 --gpu_index $1 --model $2
38 | python FGSM_Generation.py --dataset=128 --epsilon=0.14 --gpu_index $1 --model $2
39 | python FGSM_Generation.py --dataset=128 --epsilon=0.16 --gpu_index $1 --model $2
40 | python FGSM_Generation.py --dataset=128 --epsilon=0.18 --gpu_index $1 --model $2
41 | python FGSM_Generation.py --dataset=128 --epsilon=0.20 --gpu_index $1 --model $2
42 | python FGSM_Generation.py --dataset=128 --epsilon=0.22 --gpu_index $1 --model $2
43 | python FGSM_Generation.py --dataset=128 --epsilon=0.24 --gpu_index $1 --model $2
44 | python FGSM_Generation.py --dataset=128 --epsilon=0.26 --gpu_index $1 --model $2
45 | python FGSM_Generation.py --dataset=128 --epsilon=0.28 --gpu_index $1 --model $2
46 | python FGSM_Generation.py --dataset=128 --epsilon=0.30 --gpu_index $1 --model $2
47 |
48 | # UMIfgsm
49 | # python UMIFGSM_Generation.py --dataset=128 --epsilon=0.15 --gpu_index $1 --model $2 --db $j
50 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.02 --gpu_index $1 --model $2
51 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.04 --gpu_index $1 --model $2
52 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.06 --gpu_index $1 --model $2
53 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.08 --gpu_index $1 --model $2
54 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.10 --gpu_index $1 --model $2
55 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.12 --gpu_index $1 --model $2
56 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.14 --gpu_index $1 --model $2
57 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.16 --gpu_index $1 --model $2
58 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.18 --gpu_index $1 --model $2
59 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.20 --gpu_index $1 --model $2
60 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.22 --gpu_index $1 --model $2
61 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.24 --gpu_index $1 --model $2
62 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.26 --gpu_index $1 --model $2
63 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.28 --gpu_index $1 --model $2
64 | python UMIFGSM_Generation.py --dataset=128 --epsilon=0.30 --gpu_index $1 --model $2
65 |
66 |
67 |
68 | done
--------------------------------------------------------------------------------
/raw_model_training/Alexnet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 | from torch.autograd import Variable
5 | class AlexNet(nn.Module):
6 | def __init__(self, num_calss= None):
7 | super(AlexNet, self).__init__()
8 | self.features=nn.Sequential(
9 | nn.Conv2d(1,64,kernel_size=(3,3),stride=1,padding=2),
10 |
11 | nn.ReLU(inplace=True),
12 | nn.BatchNorm2d(64),
13 | nn.MaxPool2d(kernel_size=2,stride=2),
14 | nn.Conv2d(64,192,kernel_size=3,padding=2),
15 | nn.ReLU(inplace=True),
16 | nn.BatchNorm2d(192),
17 | nn.MaxPool2d(kernel_size=2,stride=2),
18 | nn.Conv2d(192,384,kernel_size=3,padding=2),
19 | nn.ReLU(inplace=True),
20 | nn.BatchNorm2d(384),
21 | nn.Conv2d(384,256,kernel_size=3,padding=2),
22 | nn.ReLU(inplace=True),
23 | nn.Conv2d(256,256,kernel_size=3,padding=2),
24 | nn.ReLU(inplace=True),
25 | nn.BatchNorm2d(256),
26 | nn.MaxPool2d(kernel_size=2,stride=2),
27 | )
28 | self.avgpool=nn.AdaptiveAvgPool2d((6,6))
29 | self.classifier=nn.Sequential(
30 | nn.Dropout(),
31 | nn.Linear(256*6*6,500),
32 | nn.ReLU(inplace=True),
33 | nn.Dropout(),
34 | nn.Linear(500,100),
35 | nn.ReLU(inplace=True),
36 | nn.Linear(100,num_calss)
37 | )
38 | def forward(self,x):
39 | # x = x.unsqueeze(dim=1)
40 | x=self.features(x)
41 | x=self.avgpool(x)
42 | x=torch.flatten(x,1)
43 | x=self.classifier(x)
44 | return x
45 |
46 | class AlexNet_or(nn.Module):
47 | def __init__(self, dataset='128'):
48 | super(AlexNet_or, self).__init__()
49 | if dataset == '128':
50 | num_classes = 11
51 | elif dataset == '512':
52 | num_classes = 12
53 | elif dataset == '1024':
54 | num_classes = 24
55 | elif dataset == '3040':
56 | num_classes = 106
57 | self.features=nn.Sequential(
58 | nn.Conv2d(1,64,kernel_size=(2,3),stride=2,padding=2),
59 | nn.ReLU(inplace=True),
60 | nn.BatchNorm2d(64),
61 | nn.MaxPool2d(kernel_size=2,stride=2),
62 | nn.Conv2d(64,192,kernel_size=2,padding=2),
63 | nn.ReLU(inplace=True),
64 | nn.BatchNorm2d(192),
65 | nn.MaxPool2d(kernel_size=2,stride=2),
66 | nn.Conv2d(192,384,kernel_size=2,padding=1),
67 | nn.ReLU(inplace=True),
68 | nn.Conv2d(384,256,kernel_size=2,padding=1),
69 | nn.ReLU(inplace=True),
70 | nn.Conv2d(256,256,kernel_size=2,padding=1),
71 | nn.ReLU(inplace=True),
72 | nn.MaxPool2d(kernel_size=2,stride=2),
73 | )
74 | self.avgpool=nn.AdaptiveAvgPool2d((6,6))
75 | self.classifier=nn.Sequential(
76 | nn.Dropout(),
77 | nn.Linear(256*6*6,500),
78 | nn.ReLU(inplace=True),
79 | nn.Dropout(),
80 | nn.Linear(500,100),
81 | nn.ReLU(inplace=True),
82 | nn.Linear(100, num_classes)
83 | )
84 | def forward(self,x):
85 | # x = x.unsqueeze(dim=1)
86 | x=self.features(x)
87 | x=self.avgpool(x)
88 | x=torch.flatten(x,1)
89 | x=self.classifier(x)
90 | return x
91 |
92 | # from torchinfo import summary
93 | # model = AlexNet_or(dataset='3040').cuda()
94 | # summary(model, input_size=(128, 1, 2, 3040))
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/raw_model_training/CNN1D.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 | # from torch.fftpack import fft, rfft, ifft
5 | from torch.fft import fft,ifft
6 |
7 | # num_classes = 11
8 | # ResNet {{{
9 | class ResNet1D(nn.Module):
10 | def __init__(self, dataset='128'):
11 | super(ResNet1D, self).__init__()
12 | self.conv1 = ResidualStack(1, kernel_size=(2, 3),pool_size=(2, 2),first=True)
13 | self.conv2 = ResidualStack(32, kernel_size=3, pool_size=2)
14 | self.conv3 = ResidualStack(32, kernel_size=3, pool_size=2)
15 | self.conv4 = ResidualStack(32, kernel_size=3, pool_size=2)
16 | self.conv5 = ResidualStack(32, kernel_size=3, pool_size=2)
17 | self.conv6 = ResidualStack(32, kernel_size=3, pool_size=2)
18 | if dataset == '128':
19 | num_classes = 11
20 | self.dense = nn.Linear(64, 128)
21 | elif dataset == '512':
22 | num_classes = 12
23 | self.dense = nn.Linear(256, 128)
24 | elif dataset == '1024':
25 | num_classes = 24
26 | self.dense = nn.Linear(512, 128)
27 | elif dataset == '3040':
28 | num_classes = 106
29 | self.dense = nn.Linear(1504, 128)
30 | self.drop = nn.Dropout(p=0.3)
31 | self.classfier = nn.Linear(128, num_classes)
32 |
33 | def forward(self, x):
34 |
35 | x = self.conv1(x.unsqueeze(dim=1)).squeeze(dim=2)
36 | x = self.conv2(x)
37 | x = self.conv3(x)
38 | x = self.conv4(x)
39 | x = self.conv5(x)
40 | x = self.conv6(x).view(x.size(0),-1)
41 | x = self.classfier(self.drop(self.dense(x)))
42 | return x
43 |
44 |
45 | class ResidualStack(nn.Module):
46 | def __init__(self, in_channel, kernel_size, pool_size, first=False):
47 | super(ResidualStack, self).__init__()
48 | mid_channel = 32
49 | padding = 1
50 | if first:
51 | conv = nn.Conv2d
52 | pool = nn.MaxPool2d
53 | self.conv1 = conv(in_channel, mid_channel, kernel_size=1, padding=0, bias=False)
54 | self.conv2 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(1, padding), bias=False)
55 | self.conv3 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(0, padding), bias=False)
56 | self.conv4 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(1, padding), bias=False)
57 | self.conv5 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=(0, padding), bias=False)
58 | self.pool = pool(kernel_size=pool_size, stride=pool_size)
59 | else:
60 | conv = nn.Conv1d
61 | pool = nn.MaxPool1d
62 | self.conv1 = conv(in_channel, mid_channel, kernel_size=1, padding=0, bias=False)
63 | self.conv2 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
64 | self.conv3 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
65 | self.conv4 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
66 | self.conv5 = conv(mid_channel, mid_channel, kernel_size=kernel_size, padding=padding, bias=False)
67 | self.pool = pool(kernel_size=pool_size, stride=pool_size)
68 | def forward(self, x):
69 | # residual 1
70 | x = self.conv1(x)
71 | shortcut = x
72 | x = self.conv2(x)
73 | x = F.relu(x)
74 | x = self.conv3(x)
75 | x += shortcut
76 | x = F.relu(x)
77 |
78 | # residual 2
79 | shortcut = x
80 | x = self.conv4(x)
81 | x = F.relu(x)
82 | x = self.conv5(x)
83 | x += shortcut
84 | x = F.relu(x)
85 | x = self.pool(x)
86 |
87 | return x
88 |
89 | # def resnet1d(**kwargs):
90 | # return ResNet1D(**kwargs)
91 |
92 |
93 | # data = torch.randn(10,2,512)
94 | # print(len(data))
95 | # # model = resnet1d()
96 | # # out = model(data)
97 | # # print(out.shape)
98 | # from torchsummary import summary
99 | # model = resnet1d().cuda()
100 | # summary(model, (2, 128))
101 |
102 | # from torchinfo import summary
103 | # model = resnet1d(dataset='128').cuda()
104 | # summary(model, input_size=(128, 2, 128))
105 | #
106 |
--------------------------------------------------------------------------------
/raw_model_training/CNN2D.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 |
5 | # num_classes = 11
6 | # ResNet {{{
7 | class CNN2D(nn.Module):
8 | def __init__(self, dataset='128'):
9 | super(CNN2D, self).__init__()
10 | self.conv1 = nn.Conv2d(1, 256, kernel_size=(1, 3), padding=(0, 1), bias=False)
11 | self.drop1 = nn.Dropout(p=0.5)
12 | self.conv2 = nn.Conv2d(256, 80, kernel_size=(2, 3), bias=False)
13 | self.drop2 = nn.Dropout(p=0.5)
14 | self.dataset = dataset
15 | # if num_classes == 11:
16 | if dataset == '128':
17 | num_classes = 11
18 | self.fc_pool = nn.Linear(126, 128)
19 | self.dense = nn.Linear(10240, 256)
20 | elif dataset == '512':
21 | self.fc_pool = nn.Linear(510, 256)
22 | num_classes = 12
23 | self.dense = nn.Linear(20480, 256)
24 | elif dataset == '1024':
25 | self.fc_pool = nn.Linear(1022, 256)
26 | num_classes = 24
27 | self.dense = nn.Linear(20480, 256)
28 | elif dataset == '3040':
29 | num_classes = 106
30 | self.fc_pool = nn.Linear(3038, 128)
31 | self.dense = nn.Linear(10240, 256)
32 | # self.dense = nn.Linear(10080, 256)
33 | self.drop3 = nn.Dropout(p=0.5)
34 | self.classfier = nn.Linear(256, num_classes)
35 |
36 |
37 | def forward(self, x):
38 | x = x.unsqueeze(dim=1)
39 | x = F.relu(self.conv1(x))
40 | x = self.drop1(x)
41 | x = F.relu(self.conv2(x)).squeeze(dim=2)
42 | x = self.fc_pool(x)
43 | x = self.drop2(x).view(x.size(0), -1)
44 | x = F.relu(self.dense(x))
45 |
46 | x = self.drop3(x)
47 | x = self.classfier(x)
48 | return x
49 |
50 |
51 |
52 |
53 | # def cnn2d(**kwargs):
54 | # return CNN2D(**kwargs)
55 | # data = torch.randn(10,2,512)
56 | # model = cnn2d()
57 | # out = model(data)
58 | # print(out.shape)
59 | # from torchsummary import summary
60 | # model = cnn2d().cuda()
61 | # summary(model, (2, 128))
62 |
63 | # from torchinfo import summary
64 | # model = CNN2D(dataset='512').cuda()
65 | # summary(model, input_size=(128, 2, 512))
66 | #
67 |
--------------------------------------------------------------------------------
/raw_model_training/LeNet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | class LeNet(nn.Module):
4 | def __init__(self,num_class=None):
5 | super(LeNet, self).__init__()
6 | self.conv1=nn.Conv2d(1,64,3,padding=2)
7 | self.bath1 = nn.BatchNorm2d(64)
8 | self.pool1=nn.MaxPool2d(2,2)
9 | self.conv2=nn.Conv2d(64,128,3,padding=2)
10 | self.bath2=nn.BatchNorm2d(128)
11 | self.pool2=nn.MaxPool2d(2,2)
12 | self.drop=nn.Dropout(p=0.5)
13 | # self.conv3 = nn.Conv2d(16, 32, 3)
14 | # self.bath3 = nn.BatchNorm2d(32)
15 | # self.pool3 = nn.MaxPool2d(2, 2)
16 | self.fc3=nn.Linear(512,120)
17 | self.fc4=nn.Linear(120,84)
18 | self.fc5=nn.Linear(84,num_class)
19 |
20 | def forward(self,x):
21 | # x = x.unsqueeze(dim=1)
22 | x=self.pool1(self.bath1(torch.relu(self.conv1(x))))
23 | x=self.pool2(self.bath2(torch.relu(self.conv2(x))))
24 | # x =self.pool3(self.bath3(torch.relu(self.conv3(x))))
25 | # print(x.size)
26 | x=x.view(-1,self.num_flat_feature(x))
27 |
28 | x=torch.relu(self.fc3(x))
29 | x = self.drop(x)
30 | x=torch.relu(self.fc4(x))
31 | x=self.fc5(x)
32 | return x
33 | def num_flat_feature(self,x):
34 | size=x.size()[1:]
35 | num_feature=1
36 |
37 | for s in size:
38 | num_feature*=s
39 | print("num_feature",num_feature)
40 | return num_feature
41 |
42 | class LeNet_or(nn.Module):
43 | def __init__(self, dataset='128'):
44 | super(LeNet_or, self).__init__()
45 | self.conv1=nn.Conv2d(1,6,kernel_size=(2,3),padding=2)
46 | self.pool1=nn.MaxPool2d(2,2)
47 | self.drop=nn.Dropout(0.5)
48 | self.conv2=nn.Conv2d(6,16,kernel_size=(2,3),padding=2)
49 | self.pool2=nn.MaxPool2d(2,2)
50 | if dataset == '128':
51 | num_classes = 11
52 | self.fc_pool = nn.Linear(33, 33) # 修改部分,否则128以上长度数据集训练效果较差
53 | self.fc3 = nn.Linear(1056, 500)
54 | elif dataset == '512':
55 | num_classes = 12
56 | self.fc_pool = nn.Linear(129, 128) # 修改部分,否则128以上长度数据集训练效果较差
57 | self.fc3 = nn.Linear(4096, 500)
58 | elif dataset == '1024':
59 | num_classes = 24
60 | self.fc_pool = nn.Linear(257, 128) # 修改部分,否则128以上长度数据集训练效果较差
61 | self.fc3 = nn.Linear(4096, 500)
62 | elif dataset == '3040':
63 | num_classes = 106
64 | self.fc_pool = nn.Linear(761, 128) # 修改部分,否则128以上长度数据集训练效果较差
65 | self.fc3 = nn.Linear(4096, 500)
66 | self.fc4=nn.Linear(500,84)
67 | self.fc5=nn.Linear(84, num_classes)
68 |
69 | def forward(self,x):
70 | # x = x.unsqueeze(dim=1)
71 | x=self.pool1(torch.relu(self.conv1(x)))
72 | x=self.pool2(torch.relu(self.conv2(x)))
73 | # print(x.size)
74 | x=self.fc_pool(x)
75 | x=x.view(-1,self.num_flat_feature(x))
76 | # print(x.shape)
77 | x=torch.relu(self.fc3(x))
78 | x=self.drop(x)
79 | x=torch.relu(self.fc4(x))
80 | x=self.fc5(x)
81 | return x
82 | def num_flat_feature(self,x):
83 | size=x.size()[1:]
84 | num_feature=1
85 | for s in size:
86 | num_feature*=s
87 | # print("num_feature",num_feature)
88 | return num_feature
89 |
90 | # from torchinfo import summary
91 | # model = LeNet_or(dataset='1024').cuda()
92 | # summary(model, input_size=(128, 1, 2, 1024))
--------------------------------------------------------------------------------
/raw_model_training/RRR.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import sys
4 |
5 | import math
6 |
7 | import torch
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 | from functools import partial
11 | from torch.autograd import Variable
12 | import os
13 | # sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
14 | # from args import args
15 | # os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
16 | device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
17 |
18 | __all__ = ['resnet']
19 |
20 | def conv3x3(in_planes, out_planes, stride=1):
21 | "3x3 convolution with padding"
22 | return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,
23 | padding=1, bias=False)
24 |
25 |
26 | class BasicBlock(nn.Module):
27 | expansion = 1
28 |
29 | def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
30 | # cfg should be a number in this case
31 | super(BasicBlock, self).__init__()
32 | self.conv1 = conv3x3(inplanes, cfg, stride)
33 | self.bn1 = nn.BatchNorm1d(cfg)
34 | self.relu = nn.ReLU(inplace=True)
35 | self.conv2 = conv3x3(cfg, planes)
36 | self.bn2 = nn.BatchNorm1d(planes)
37 | self.downsample = downsample
38 | self.stride = stride
39 |
40 | def forward(self, x):
41 | residual = x
42 |
43 | out = self.conv1(x)
44 | out = self.bn1(out)
45 | out = self.relu(out)
46 |
47 | out = self.conv2(out)
48 | out = self.bn2(out)
49 |
50 | if self.downsample is not None:
51 | residual = self.downsample(x)
52 |
53 | out += residual
54 | out = self.relu(out)
55 |
56 | return out
57 |
58 | def downsample_basic_block(x, planes):
59 | x = F.adaptive_avg_pool1d(x, 1)
60 | #x = nn.AvgPool2d(2,2)(x)
61 | zero_pads = torch.Tensor(
62 | x.size(0), planes - x.size(1), x.size(2)).zero_()
63 | if isinstance(x.data, torch.cuda.FloatTensor):
64 | # zero_pads = zero_pads.cuda()
65 | zero_pads = zero_pads.to(device)
66 | # zero_pads = zero_pads.cuda()
67 |
68 | out = Variable(torch.cat([x.data, zero_pads], dim=1))
69 |
70 | return out
71 |
72 | class ResNet(nn.Module):
73 |
74 | def __init__(self, depth, dataset='cifar10', cfg=None):
75 | super(ResNet, self).__init__()
76 | # Model type specifies number of layers for CIFAR-10 model
77 | assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
78 | n = (depth - 2) // 6
79 |
80 | block = BasicBlock
81 | if cfg == None:
82 | cfg = [[16]*n, [32]*n, [64]*n]
83 | cfg = [item for sub_list in cfg for item in sub_list]
84 |
85 | self.cfg = cfg
86 |
87 | self.inplanes = 16
88 | #self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,bias=False)
89 | self.conv1 = nn.Conv1d(2, 16, kernel_size=3, padding=1,bias=False)
90 | self.bn1 = nn.BatchNorm1d(16)
91 | self.relu = nn.ReLU(inplace=True)
92 | self.layer1 = self._make_layer(block, 16, n, cfg=cfg[0:n])
93 | self.layer2 = self._make_layer(block, 32, n, cfg=cfg[n:2*n], stride=2)
94 | self.layer3 = self._make_layer(block, 64, n, cfg=cfg[2*n:3*n], stride=2)
95 | self.avgpool = nn.AvgPool1d(8)
96 | if dataset == 'cifar10':
97 | num_classes = 24 # 128 11
98 | elif dataset == 'cifar100':
99 | num_classes = 100
100 | self.fc = nn.Linear(64 * block.expansion, num_classes)
101 |
102 | for m in self.modules():
103 | if isinstance(m, nn.Conv1d):
104 | # print(m.kernel_size)
105 | n = m.kernel_size[0] * m.out_channels#m.kernel_size[1] *
106 | m.weight.data.normal_(0, math.sqrt(2. / n))
107 | elif isinstance(m, nn.BatchNorm1d):
108 | m.weight.data.fill_(1)
109 | m.bias.data.zero_()
110 |
111 | def _make_layer(self, block, planes, blocks, cfg, stride=1):
112 | downsample = None
113 | if stride != 1 or self.inplanes != planes * block.expansion:
114 | downsample = partial(downsample_basic_block, planes=planes*block.expansion)
115 |
116 | layers = []
117 | layers.append(block(self.inplanes, planes, cfg[0], stride, downsample))
118 | self.inplanes = planes * block.expansion
119 | for i in range(1, blocks):
120 | layers.append(block(self.inplanes, planes, cfg[i]))
121 |
122 | return nn.Sequential(*layers)
123 |
124 | def forward(self, x):
125 | x = self.conv1(x)
126 | x = self.bn1(x)
127 | x = self.relu(x) # 32x32
128 |
129 | x = self.layer1(x) # 32x32
130 | x = self.layer2(x) # 16x16
131 | x = self.layer3(x) # 8x8
132 | x = F.adaptive_avg_pool1d(x, 1)
133 | #x = self.avgpool(x)
134 | x = x.view(x.size(0), -1)
135 | x = self.fc(x)
136 |
137 | return x
138 |
139 | def resnet(**kwargs):
140 | """
141 | Constructs a ResNet model.
142 | """
143 | return ResNet(**kwargs)
144 |
145 | # if __name__ == '__main__':
146 | # net = resnet(depth=56)
147 | # x=Variable(torch.FloatTensor(16, 3, 32, 32))
148 | # y = net(x)
149 | # print(y.data.shape)
150 |
151 | # data = torch.randn(10,2,128)
152 | # model = resnet(depth=8)
153 | # out = model(data)
154 | # print(out.shape)
--------------------------------------------------------------------------------
/raw_model_training/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Wangzhangwei19/Adversarial-Multi-Distillation/7e4b1b3f7709fe071a00457c364308aa1258fbcc/raw_model_training/__init__.py
--------------------------------------------------------------------------------
/raw_model_training/gru.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | import torch
4 | import torch.nn.functional as F
5 |
6 | # num_classes = 11
7 | class gru2(nn.Module):
8 | def __init__(self, dataset='128'):
9 | super(gru2, self).__init__()
10 |
11 | self.gru1 = nn.GRU(
12 | input_size=2,
13 | hidden_size=128,
14 | num_layers=1,
15 | bias=False,
16 | batch_first=True
17 | )
18 | self.gru2 = nn.GRU(
19 | input_size=128,
20 | hidden_size=64,
21 | num_layers=1,
22 | bias=False,
23 | batch_first=True
24 | )
25 |
26 | if dataset == '128':
27 | num_classes = 11
28 | self.fc1 = nn.Linear(128*64, 64)
29 | self.fc2 = nn.Linear(64, num_classes)
30 | elif dataset == '512':
31 | num_classes = 12
32 | self.fc1 = nn.Linear(512*64, 64)
33 | self.fc2 = nn.Linear(64, num_classes)
34 | elif dataset == '1024':
35 | num_classes = 24
36 | self.fc1 = nn.Linear(1024*64, 64)
37 | self.fc2 = nn.Linear(64, num_classes)
38 | elif dataset == '3040':
39 | num_classes = 106
40 | self.fc1 = nn.Linear(3040*64, 64)
41 | self.fc2 = nn.Linear(64, num_classes)
42 |
43 |
44 | def forward(self, x):
45 | x, _ = self.gru1(x.transpose(2,1))
46 | x = F.relu(x)
47 | x, _ = self.gru2(x)
48 | x = torch.reshape(x, [x.shape[0],-1])
49 | x = self.fc1(x)
50 | x = self.fc2(x)
51 |
52 | return x
53 |
54 | # from torchinfo import summary
55 | # model = gru2(dataset='128').cuda()
56 | # summary(model, input_size=(128, 2, 128))
57 |
--------------------------------------------------------------------------------
/raw_model_training/lstm.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | import torch
4 |
5 | # num_classes = 11
6 | class lstm2(nn.Module):
7 | def __init__(self, dataset='128'):
8 | super(lstm2, self).__init__()
9 |
10 | self.lstm1 = nn.LSTM(
11 | input_size=2,
12 | hidden_size=128,
13 | num_layers=1,
14 | bias=False,
15 | batch_first=True
16 | )
17 | self.lstm2 = nn.LSTM(
18 | input_size=128,
19 | hidden_size=64,
20 | num_layers=1,
21 | bias=False,
22 | batch_first=True
23 | )
24 |
25 | if dataset == '128':
26 | num_classes = 11
27 | self.fc = nn.Linear(128*64, num_classes)
28 | elif dataset == '512':
29 | num_classes = 12
30 | self.fc = nn.Linear(512*64, num_classes)
31 | elif dataset == '1024':
32 | num_classes = 24
33 | self.fc = nn.Linear(1024*64, num_classes)
34 | elif dataset == '3040':
35 | num_classes = 106
36 | self.fc = nn.Linear(3040*64, num_classes)
37 |
38 | # if num_classes == 10:
39 | # self.fc = nn.Linear(128*64, num_classes)
40 | # if num_classes == 11:
41 | # self.fc = nn.Linear(128*64, num_classes)
42 | # if num_classes == 12:
43 | # self.fc = nn.Linear(512*64, num_classes)
44 |
45 | def forward(self, x):
46 |
47 | x, _ = self.lstm1(x.transpose(2,1))
48 | x, _ = self.lstm2(x)
49 | x = torch.reshape(x, [x.shape[0],-1])
50 | # print(x.shape)
51 | x = self.fc(x)
52 |
53 | return x
54 |
55 | # data = torch.randn(20,2,512)
56 | # model = lstm2()
57 | # print(model(data).shape)
58 |
59 | # from torchinfo import summary
60 | # model = lstm2(dataset='3040').cuda()
61 | # summary(model, input_size=(128, 2, 3040))
62 |
63 |
--------------------------------------------------------------------------------
/raw_model_training/mcldnn.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 | from torch.nn import Sequential
5 |
6 | # num_classes = 11
7 | # BasicBlock {{{
8 | class MCLDNN(nn.Module):
9 |
10 | def __init__(self, dataset='128'):
11 | super(MCLDNN, self).__init__()
12 | if dataset == '128':
13 | num_classes = 11
14 | elif dataset == '512':
15 | num_classes = 12
16 | elif dataset == '1024':
17 | num_classes = 24
18 | elif dataset == '3040':
19 | num_classes = 106
20 | self.conv1 = nn.Conv1d(
21 | in_channels=2,
22 | out_channels=50,
23 | kernel_size=7,
24 | bias=False,
25 | padding=3,
26 | )
27 | self.conv2 = Sequential(
28 | nn.Conv1d(
29 | in_channels=2,
30 | out_channels=100,
31 | kernel_size=7,
32 | bias=False,
33 | padding=3,
34 | groups=2
35 | ),
36 | nn.ReLU(True),
37 | nn.Conv1d(
38 | in_channels=100,
39 | out_channels=50,
40 | kernel_size=7,
41 | bias=False,
42 | padding=3,
43 | ))
44 | self.conv3 = nn.Conv1d(
45 | in_channels=100,
46 | out_channels=100,
47 | kernel_size=5,
48 | bias=False
49 | )
50 | self.lstm1 = nn.LSTM(
51 | input_size=100,
52 | hidden_size=128,
53 | num_layers=1,
54 | bias=False,
55 | )
56 | self.lstm2 = nn.LSTM(
57 | input_size=128,
58 | hidden_size=128,
59 | num_layers=1,
60 | bias=False,
61 | batch_first=True
62 | )
63 | self.fc = Sequential(
64 | nn.Linear(128, 128),
65 | nn.SELU(True),
66 | nn.Dropout(0.5),
67 | nn.Linear(128, 128),
68 | nn.SELU(True),
69 | nn.Dropout(0.5),
70 | nn.Linear(128, num_classes)
71 | )
72 | def forward(self, x):
73 | assert len(x.shape)==3 and x.shape[1]==2
74 | x1 = self.conv1(x)
75 | x2 = self.conv2(x)
76 | x3 = F.relu(torch.cat([x1,x2],dim=1))
77 | x3 = F.relu(self.conv3(x3))
78 | x3, _ = self.lstm1(x3.transpose(2,1))
79 | _, (x3, __) = self.lstm2(x3)
80 | x3 = self.fc(x3.squeeze())
81 |
82 | return x3
83 |
84 |
85 | # model = MCLDNN(11)
86 | # data = torch.randn(10,2,512)
87 | # out = model(data)
88 | # print(out.shape)
89 |
90 | # from torchinfo import summary
91 | # model = MCLDNN(11).cuda()
92 | # summary(model, input_size=(128, 2, 3040))
93 |
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/raw_model_training/mobilenet_2d.py:
--------------------------------------------------------------------------------
1 | """mobilenet in pytorch
2 |
3 |
4 |
5 | [1] Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam
6 |
7 | MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications
8 | https://arxiv.org/abs/1704.04861
9 | """
10 |
11 | import torch
12 | import torch.nn as nn
13 |
14 |
15 | class DepthSeperabelConv2d(nn.Module):
16 |
17 | def __init__(self, input_channels, output_channels, kernel_size, **kwargs):
18 | super().__init__()
19 | self.depthwise = nn.Sequential(
20 | nn.Conv2d(
21 | input_channels,
22 | input_channels,
23 | kernel_size,
24 | groups=input_channels,
25 | **kwargs),
26 | nn.BatchNorm2d(input_channels),
27 | nn.ReLU(inplace=True)
28 | )
29 |
30 | self.pointwise = nn.Sequential(
31 | nn.Conv2d(input_channels, output_channels, 1),
32 | nn.BatchNorm2d(output_channels),
33 | nn.ReLU(inplace=True)
34 | )
35 |
36 | def forward(self, x):
37 | x = self.depthwise(x)
38 | x = self.pointwise(x)
39 |
40 | return x
41 |
42 |
43 | class BasicConv2d(nn.Module):
44 |
45 | def __init__(self, input_channels, output_channels, kernel_size, **kwargs):
46 |
47 | super().__init__()
48 | self.conv = nn.Conv2d(
49 | input_channels, output_channels, kernel_size, **kwargs)
50 | self.bn = nn.BatchNorm2d(output_channels)
51 | self.relu = nn.ReLU(inplace=True)
52 |
53 | def forward(self, x):
54 | x = self.conv(x)
55 | x = self.bn(x)
56 | x = self.relu(x)
57 |
58 | return x
59 |
60 |
61 | class MobileNet(nn.Module):
62 |
63 | """
64 | Args:
65 | width multipler: The role of the width multiplier α is to thin
66 | a network uniformly at each layer. For a given
67 | layer and width multiplier α, the number of
68 | input channels M becomes αM and the number of
69 | output channels N becomes αN.
70 | """
71 |
72 | def __init__(self, width_multiplier=1, class_num=11):
73 | super().__init__()
74 |
75 | alpha = width_multiplier
76 | self.stem = nn.Sequential(
77 | BasicConv2d(1, int(32 * alpha), 3, padding=1, bias=False),
78 | DepthSeperabelConv2d(
79 | int(32 * alpha),
80 | int(64 * alpha),
81 | 3,
82 | padding=1,
83 | bias=False
84 | )
85 | )
86 |
87 | #downsample
88 | self.conv1 = nn.Sequential(
89 | DepthSeperabelConv2d(
90 | int(64 * alpha),
91 | int(128 * alpha),
92 | 3,
93 | stride=2,
94 | padding=1,
95 | bias=False
96 | ),
97 | DepthSeperabelConv2d(
98 | int(128 * alpha),
99 | int(128 * alpha),
100 | 3,
101 | padding=1,
102 | bias=False
103 | )
104 | )
105 |
106 | #downsample
107 | self.conv2 = nn.Sequential(
108 | DepthSeperabelConv2d(
109 | int(128 * alpha),
110 | int(256 * alpha),
111 | 3,
112 | stride=2,
113 | padding=1,
114 | bias=False
115 | ),
116 | DepthSeperabelConv2d(
117 | int(256 * alpha),
118 | int(256 * alpha),
119 | 3,
120 | padding=1,
121 | bias=False
122 | )
123 | )
124 |
125 | #downsample
126 | self.conv3 = nn.Sequential(
127 | DepthSeperabelConv2d(
128 | int(256 * alpha),
129 | int(512 * alpha),
130 | 3,
131 | stride=2,
132 | padding=1,
133 | bias=False
134 | ),
135 |
136 | DepthSeperabelConv2d(
137 | int(512 * alpha),
138 | int(512 * alpha),
139 | 3,
140 | padding=1,
141 | bias=False
142 | ),
143 | DepthSeperabelConv2d(
144 | int(512 * alpha),
145 | int(512 * alpha),
146 | 3,
147 | padding=1,
148 | bias=False
149 | ),
150 | DepthSeperabelConv2d(
151 | int(512 * alpha),
152 | int(512 * alpha),
153 | 3,
154 | padding=1,
155 | bias=False
156 | ),
157 | DepthSeperabelConv2d(
158 | int(512 * alpha),
159 | int(512 * alpha),
160 | 3,
161 | padding=1,
162 | bias=False
163 | ),
164 | DepthSeperabelConv2d(
165 | int(512 * alpha),
166 | int(512 * alpha),
167 | 3,
168 | padding=1,
169 | bias=False
170 | )
171 | )
172 |
173 | #downsample
174 | self.conv4 = nn.Sequential(
175 | DepthSeperabelConv2d(
176 | int(512 * alpha),
177 | int(1024 * alpha),
178 | 3,
179 | stride=2,
180 | padding=1,
181 | bias=False
182 | ),
183 | DepthSeperabelConv2d(
184 | int(1024 * alpha),
185 | int(1024 * alpha),
186 | 3,
187 | padding=1,
188 | bias=False
189 | )
190 | )
191 |
192 | self.fc = nn.Linear(int(1024 * alpha), class_num)
193 | self.avg = nn.AdaptiveAvgPool2d(1)
194 |
195 | def forward(self, x):
196 | x = self.stem(x)
197 |
198 | x = self.conv1(x)
199 | x = self.conv2(x)
200 | x = self.conv3(x)
201 | x = self.conv4(x)
202 |
203 | x = self.avg(x)
204 | x = x.view(x.size(0), -1)
205 | x = self.fc(x)
206 | return x
207 |
208 |
209 | def mobilenet(alpha=1, class_num=11):
210 | return MobileNet(alpha, class_num)
211 |
212 | # data = torch.randn(10,1,2,128)
213 | # model = mobilenet()
214 | # out = model(data)
215 | # print(out.shape)
216 |
--------------------------------------------------------------------------------
/raw_model_training/resnet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import math
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 | from functools import partial
8 | from torch.autograd import Variable
9 |
10 |
11 | __all__ = ['resnet']
12 |
13 | def conv3x3(in_planes, out_planes, stride=1):
14 | "3x3 convolution with padding"
15 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
16 | padding=1, bias=False)
17 |
18 |
19 | class BasicBlock(nn.Module):
20 | expansion = 1
21 |
22 | def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
23 | # cfg should be a number in this case
24 | super(BasicBlock, self).__init__()
25 | self.conv1 = conv3x3(inplanes, cfg, stride)
26 | self.bn1 = nn.BatchNorm2d(cfg)
27 | self.relu = nn.ReLU(inplace=True)
28 | self.conv2 = conv3x3(cfg, planes)
29 | self.bn2 = nn.BatchNorm2d(planes)
30 | self.downsample = downsample
31 | self.stride = stride
32 |
33 | def forward(self, x):
34 | residual = x
35 |
36 | out = self.conv1(x)
37 | out = self.bn1(out)
38 | out = self.relu(out)
39 |
40 | out = self.conv2(out)
41 | out = self.bn2(out)
42 |
43 | if self.downsample is not None:
44 | residual = self.downsample(x)
45 |
46 | out += residual
47 | out = self.relu(out)
48 |
49 | return out
50 |
51 | def downsample_basic_block(x, planes):
52 | x = F.adaptive_avg_pool2d(x, (1, 1))
53 | #x = nn.AvgPool2d(2,2)(x)
54 | zero_pads = torch.Tensor(
55 | x.size(0), planes - x.size(1), x.size(2), x.size(3)).zero_()
56 | if isinstance(x.data, torch.cuda.FloatTensor):
57 | zero_pads = zero_pads.cuda()
58 |
59 | out = Variable(torch.cat([x.data, zero_pads], dim=1))
60 |
61 | return out
62 |
63 | class ResNet(nn.Module):
64 |
65 | def __init__(self, depth, dataset='cifar10', cfg=None):
66 | super(ResNet, self).__init__()
67 | # Model type specifies number of layers for CIFAR-10 model
68 | assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
69 | n = (depth - 2) // 6
70 |
71 | block = BasicBlock
72 | if cfg == None:
73 | cfg = [[16]*n, [32]*n, [64]*n]
74 | cfg = [item for sub_list in cfg for item in sub_list]
75 |
76 | self.cfg = cfg
77 |
78 | self.inplanes = 16
79 | #self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,bias=False)
80 | self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1,bias=False)
81 | self.bn1 = nn.BatchNorm2d(16)
82 | self.relu = nn.ReLU(inplace=True)
83 | self.layer1 = self._make_layer(block, 16, n, cfg=cfg[0:n])
84 | self.layer2 = self._make_layer(block, 32, n, cfg=cfg[n:2*n], stride=2)
85 | self.layer3 = self._make_layer(block, 64, n, cfg=cfg[2*n:3*n], stride=2)
86 | self.avgpool = nn.AvgPool2d(8)
87 | if dataset == 'cifar10':
88 | num_classes = 11
89 | elif dataset == 'cifar100':
90 | num_classes = 100
91 | self.fc = nn.Linear(64 * block.expansion, num_classes)
92 |
93 | for m in self.modules():
94 | if isinstance(m, nn.Conv2d):
95 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
96 | m.weight.data.normal_(0, math.sqrt(2. / n))
97 | elif isinstance(m, nn.BatchNorm2d):
98 | m.weight.data.fill_(1)
99 | m.bias.data.zero_()
100 |
101 | def _make_layer(self, block, planes, blocks, cfg, stride=1):
102 | downsample = None
103 | if stride != 1 or self.inplanes != planes * block.expansion:
104 | downsample = partial(downsample_basic_block, planes=planes*block.expansion)
105 |
106 | layers = []
107 | layers.append(block(self.inplanes, planes, cfg[0], stride, downsample))
108 | self.inplanes = planes * block.expansion
109 | for i in range(1, blocks):
110 | layers.append(block(self.inplanes, planes, cfg[i]))
111 |
112 | return nn.Sequential(*layers)
113 |
114 | def forward(self, x):
115 | x = self.conv1(x)
116 | x = self.bn1(x)
117 | x = self.relu(x) # 32x32
118 |
119 | x = self.layer1(x) # 32x32
120 | x = self.layer2(x) # 16x16
121 | x = self.layer3(x) # 8x8
122 | x = F.adaptive_avg_pool2d(x, (1, 1))
123 | #x = self.avgpool(x)
124 | x = x.view(x.size(0), -1)
125 | x = self.fc(x)
126 |
127 | return x
128 |
129 | def resnet(**kwargs):
130 | """
131 | Constructs a ResNet model.
132 | """
133 | return ResNet(**kwargs)
134 |
135 | # if __name__ == '__main__':
136 | # net = resnet(depth=56)
137 | # x=Variable(torch.FloatTensor(16, 3, 32, 32))
138 | # y = net(x)
139 | # print(y.data.shape)
140 |
141 | # data = torch.randn(10,1,2,128)
142 | # model = resnet(depth=8)
143 | # out = model(data)
144 | # print(out.shape)
--------------------------------------------------------------------------------
/raw_model_training/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for model in CNN1D CNN2D LSTM GRU MCLDNN Lenet Vgg16 Alexnet;
3 | do
4 | CUDA_VISIBLE_DEVICES=2 python test.py --model $model --dataset 128 --num_workers 5 &
5 | CUDA_VISIBLE_DEVICES=3 python test.py --model $model --dataset 512 --num_workers 7 &
6 | CUDA_VISIBLE_DEVICES=4 python test.py --model $model --dataset 1024 --num_workers 8 &
7 | CUDA_VISIBLE_DEVICES=6 python test.py --model $model --dataset 3040 --num_workers 8;
8 | done
9 |
10 |
11 |
--------------------------------------------------------------------------------
/raw_model_training/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #for model in CNN1D CNN2D LSTM GRU MCLDNN Lenet Vgg16 Alexnet;
3 | #for model in CNN1D MCLDNN r8conv1;
4 | #do
5 | # for dataset in 1024;
6 | # do
7 | # CUDA_VISIBLE_DEVICES=0 python train.py --model $model --dataset $dataset --num_workers 8 --epochs 50
8 | # CUDA_VISIBLE_DEVICES=1 python train.py --model $model --dataset $dataset --num_workers 8 --epochs 50
9 | # CUDA_VISIBLE_DEVICES=2 python train.py --model $model --dataset $dataset --num_workers 8 --epochs 50
10 | # CUDA_VISIBLE_DEVICES=3 python train.py --model $model --dataset $dataset --num_workers 8 --epochs 50
11 | # done
12 | #done
13 |
14 | #!/bin/bash
15 | for dataset in 1024;
16 | do
17 | CUDA_VISIBLE_DEVICES=0 python train.py --model vgg11_bn --dataset $dataset --num_workers 8 --epochs 50 &
18 | CUDA_VISIBLE_DEVICES=0 python train.py --model r8conv1 --dataset $dataset --num_workers 8 --epochs 50
19 |
20 | done
21 |
--------------------------------------------------------------------------------
/raw_model_training/train_par.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for dataset in 128;
3 | do
4 | # CUDA_VISIBLE_DEVICES=6 python train.py --model GRU --dataset $dataset --num_workers 8 --epochs 50
5 | # CUDA_VISIBLE_DEVICES=1 python train.py --model CNN1D --dataset $dataset --num_workers 8 --epochs 50 &
6 | # CUDA_VISIBLE_DEVICES=3 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50 &
7 | # CUDA_VISIBLE_DEVICES=6 python train.py --model LSTM --dataset $dataset --num_workers 8 --epochs 50 &
8 | # CUDA_VISIBLE_DEVICES=5 python train.py --model GRU --dataset $dataset --num_workers 8 --epochs 50 ;
9 | # CUDA_VISIBLE_DEVICES=2 python train.py --model MCLDNN --dataset $dataset --num_workers 8 --epochs 50 &
10 | # CUDA_VISIBLE_DEVICES=3 python train.py --model Lenet --dataset $dataset --num_workers 8 --epochs 50 &
11 | # CUDA_VISIBLE_DEVICES=7 python train.py --model Vgg16 --dataset $dataset --num_workers 8 --epochs 50 &
12 | # CUDA_VISIBLE_DEVICES=5 python train.py --model Alexnet --dataset $dataset --num_workers 8 --epochs 50 ;
13 | done
14 | #
15 | #for dataset in 3040;
16 | #do
17 | # CUDA_VISIBLE_DEVICES=2 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50 &
18 | # CUDA_VISIBLE_DEVICES=4 python train.py --model Lenet --dataset $dataset --num_workers 8 --epochs 50 ;
19 | #done
20 | #
21 | #for dataset in 512;
22 | #do
23 | # CUDA_VISIBLE_DEVICES=2 python train.py --model CNN2D --dataset $dataset --num_workers 8 --epochs 50;
24 | #done
25 | #
26 | #
27 | #
28 |
29 |
--------------------------------------------------------------------------------
/raw_model_training/vgg.py:
--------------------------------------------------------------------------------
1 | """vgg in pytorch
2 |
3 |
4 | [1] Karen Simonyan, Andrew Zisserman
5 |
6 | Very Deep Convolutional Networks for Large-Scale Image Recognition.
7 | https://arxiv.org/abs/1409.1556v6
8 | """
9 | '''VGG11/13/16/19 in Pytorch.'''
10 |
11 | import torch
12 | import torch.nn as nn
13 |
14 | cfg = {
15 | 'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
16 | 'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
17 | 'D' : [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 'M', 128, 128, 128, 'M', 128, 128, 128, 'M'],
18 | 'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
19 | }
20 |
21 | class VGG(nn.Module):
22 |
23 | def __init__(self, features, num_class=24):
24 | super().__init__()
25 | self.features = features
26 |
27 | self.classifier = nn.Sequential(
28 | nn.Linear(512, 2048),
29 | nn.ReLU(inplace=True),
30 | nn.Dropout(),
31 | nn.Linear(2048, 2048),
32 | nn.ReLU(inplace=True),
33 | nn.Dropout(),
34 | nn.Linear(2048, num_class)
35 | )
36 |
37 | self.pooling = nn.Sequential(
38 | nn.AdaptiveAvgPool1d(1),
39 | nn.Flatten()
40 | )
41 |
42 | def forward(self, x):
43 | output = self.features(x)
44 | # output = output.view(output.size()[0], -1)
45 | output = self.pooling(output)
46 | output = self.classifier(output)
47 |
48 | return output
49 |
50 | def make_layers(cfg, batch_norm=False):
51 | layers = []
52 |
53 | input_channel = 2
54 | for l in cfg:
55 | if l == 'M':
56 | layers += [nn.MaxPool1d(kernel_size=2, stride=2)]
57 | continue
58 | # print(l)
59 | layers += [nn.Conv1d(input_channel, l, kernel_size=3, padding=1)]
60 |
61 | if batch_norm:
62 | layers += [nn.BatchNorm1d(l)]
63 |
64 | layers += [nn.ReLU(inplace=True)]
65 | input_channel = l
66 |
67 | return nn.Sequential(*layers)
68 |
69 | def vgg11_bn():
70 | return VGG(make_layers(cfg['A'], batch_norm=True))
71 |
72 | def vgg13_bn():
73 | return VGG(make_layers(cfg['B'], batch_norm=True))
74 |
75 | def vgg16_bn():
76 | return VGG(make_layers(cfg['D'], batch_norm=True))
77 |
78 | def vgg19_bn():
79 | return VGG(make_layers(cfg['E'], batch_norm=True))
80 |
81 | # from torchsummary import summary
82 | # summary(vgg16_bn().cuda(), (2, 1024))
83 |
--------------------------------------------------------------------------------
/raw_model_training/vgg16.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | import numpy as np
4 | from torch.autograd import Variable
5 |
6 | def conv_layer(chann_in, chann_out, k_size, p_size):
7 | layer = nn.Sequential(
8 | nn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size),
9 | nn.BatchNorm2d(chann_out),
10 | nn.ReLU()
11 | )
12 | return layer
13 |
14 | def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s):
15 | layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ]
16 | layers += [ nn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)]
17 | return nn.Sequential(*layers)
18 |
19 | def vgg_fc_layer(size_in, size_out):
20 | layer = nn.Sequential(
21 | nn.Linear(size_in, size_out),
22 | nn.BatchNorm1d(size_out),
23 | nn.ReLU()
24 | )
25 | return layer
26 |
27 |
28 | class VGG16_or(nn.Module):
29 | def __init__(self, dataset='128'):
30 | super(VGG16_or, self).__init__()
31 | # Conv blocks (BatchNorm + ReLU activation added in each block)
32 | self.layer1 = vgg_conv_block([1,64], [64,64], [3,3], [3,3], 2, 2)
33 | self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [3,3], 2, 2)
34 | self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
35 | self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
36 | # self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
37 |
38 | # FC layers
39 | if dataset == '128':
40 | num_classes = 11
41 | self.layer5 = vgg_fc_layer(4608, 256)
42 | elif dataset == '512':
43 | num_classes = 12
44 | self.layer5 = vgg_fc_layer(16896, 256)
45 | elif dataset == '1024':
46 | num_classes = 24
47 | self.layer5 = vgg_fc_layer(33280, 256)
48 | elif dataset == '3040':
49 | num_classes = 106
50 | self.layer5 = vgg_fc_layer(97792, 256)
51 | self.layer6 = vgg_fc_layer(256, 128)
52 |
53 | # Final layer
54 | self.layer7 = nn.Linear(128, num_classes)
55 |
56 | def forward(self, x):
57 |
58 | out = self.layer1(x)
59 | out = self.layer2(out)
60 | out = self.layer3(out)
61 | out = self.layer4(out)
62 |
63 | out = out.view(-1, self.num_flat_feature(out))
64 | # print(out.shape)
65 | out = self.layer5(out)
66 | out = self.layer6(out)
67 | out = self.layer7(out)
68 |
69 | return out
70 |
71 | def num_flat_feature(self,x):
72 | size=x.size()[1:]
73 | num_feature=1
74 | for s in size:
75 | num_feature*=s
76 | # print("num_feature",num_feature)
77 | return num_feature
78 |
79 |
80 |
81 |
82 |
83 | class VGG16(nn.Module):
84 | def __init__(self, n_classes=24):
85 | super(VGG16, self).__init__()
86 |
87 | # Conv blocks (BatchNorm + ReLU activation added in each block)
88 | self.layer1 = vgg_conv_block([1,64], [64,64], [3,3], [3,3], 2, 2)
89 | self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [3,3], 2, 2)
90 | self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
91 | self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
92 | self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
93 |
94 | # FC layers
95 | self.layer6 = vgg_fc_layer(512, 256)
96 | self.layer7 = vgg_fc_layer(256, 128)
97 |
98 | # Final layer
99 | self.layer8 = nn.Linear(128, n_classes)
100 |
101 | def forward(self, x):
102 |
103 | out = self.layer1(x)
104 | out = self.layer2(out)
105 | out = self.layer3(out)
106 | out = self.layer4(out)
107 | vgg16_features = self.layer5(out)
108 | out = vgg16_features.view(-1, self.num_flat_feature(vgg16_features))
109 | out = self.layer6(out)
110 | out = self.layer7(out)
111 | out = self.layer8(out)
112 |
113 | return out
114 |
115 |
116 | # from torchinfo import summary
117 | # model = VGG16_or(dataset='3040').cuda()
118 | # summary(model, input_size=(128, 1, 2, 3040))
--------------------------------------------------------------------------------
/requirement.txt:
--------------------------------------------------------------------------------
1 | Package Version
2 | ------------------------------ -----------------------
3 | absl-py 1.1.0
4 | adversarial-robustness-toolbox 1.8.1
5 | decorator 5.1.1
6 | eagerpy 0.30.0
7 | flatbuffers 1.12
8 | fonttools 4.33.3
9 | foolbox 2.3.0
10 | gast 0.2.2
11 | gitdb 4.0.9
12 | GitPython 3.1.27
13 | google-auth 2.8.0
14 | google-auth-oauthlib 0.4.6
15 | google-pasta 0.2.0
16 | grpcio 1.46.3
17 | h5py 2.10.0
18 | idna 3.3
19 | imageio 2.19.5
20 | importlib-metadata 4.11.4
21 | Markdown 3.3.7
22 | MarkupSafe 2.1.1
23 | matplotlib 3.5.2
24 | matplotlib-inline 0.1.3
25 | mysql-connector-python 8.0.33
26 | numba 0.56.0
27 | numpy 1.21.6
28 | nvidia-pyindex 1.0.9
29 | oauthlib 3.2.0
30 | opencv-python 4.6.0.66
31 | opt-einsum 3.3.0
32 | packaging 21.3
33 | pandas 1.3.5
34 | parso 0.8.3
35 | pexpect 4.8.0
36 | pickleshare 0.7.5
37 | Pillow 9.1.0
38 | pip 22.0.4
39 | prompt-toolkit 3.0.29
40 | protobuf 3.19.4
41 | psutil 5.9.3
42 | ptyprocess 0.7.0
43 | pyasn1 0.4.8
44 | pyasn1-modules 0.2.8
45 | pyDeprecate 0.3.2
46 | Pygments 2.12.0
47 | pyparsing 3.0.8
48 | python-dateutil 2.8.2
49 | pytz 2022.1
50 | PyWavelets 1.1.1
51 | PyYAML 6.0
52 | requests 2.27.1
53 | requests-oauthlib 1.3.1
54 | rsa 4.8
55 | scikit-learn 1.0.2
56 | scipy 1.7.3
57 | setuptools 62.1.0
58 | six 1.16.0
59 | smmap 5.0.0
60 | torch 1.8.0+cu111
61 | torch-cluster 1.5.9
62 | torch-geometric 2.0.4
63 | torch-scatter 2.0.8
64 | torch-sparse 0.6.9
65 | torch-spline-conv 1.2.1
66 | torchaudio 0.8.0
67 | torchmetrics 0.8.2
68 | torchsummary 1.5.1
69 | torchvision 0.9.0+cu111
70 | tqdm 4.64.0
71 | traitlets 5.1.1
72 | typing_extensions 4.2.0
73 | urllib3 1.26.9
74 | wcwidth 0.2.5
75 | Werkzeug 2.1.2
76 | wheel 0.37.1
77 | wrapt 1.14.1
78 | zipp 3.8.0
79 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 | import os
5 | import shutil
6 | import numpy as np
7 | import torch
8 |
9 |
10 | class AverageMeter(object):
11 | def __init__(self):
12 | self.reset()
13 |
14 | def reset(self):
15 | self.val = 0
16 | self.avg = 0
17 | self.sum = 0
18 | self.count = 0
19 |
20 | def update(self, val, n=1):
21 | self.val = val
22 | self.sum += val * n
23 | self.count += n
24 | self.avg = self.sum / self.count
25 |
26 |
27 | def count_parameters_in_MB(model):
28 | # return sum(np.prod(v.size()) for name, v in model.named_parameters())/1e6
29 | return sum(p.numel() for p in model.parameters())/1e6
30 |
31 | def create_exp_dir(path):
32 | if not os.path.exists(path):
33 | os.makedirs(path)
34 | print('Experiment dir : {}'.format(path))
35 |
36 |
37 | def load_pretrained_model(model, pretrained_dict):
38 | model_dict = model.state_dict()
39 | # 1. filter out unnecessary keys
40 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
41 | # 2. overwrite entries in the existing state dict
42 | model_dict.update(pretrained_dict)
43 | # 3. load the new state dict
44 | model.load_state_dict(model_dict)
45 |
46 |
47 | def transform_time(s):
48 | m, s = divmod(int(s), 60)
49 | h, m = divmod(m, 60)
50 | return h,m,s
51 |
52 |
53 | def save_checkpoint(state, is_best, save_root, epoch):
54 | save_path = os.path.join(save_root, 'checkpoint.pth_{}.tar'.format(epoch))
55 | torch.save(state, save_path)
56 | if is_best:
57 | best_save_path = os.path.join(save_root, 'model_best.pth_{}.tar'.format(epoch))
58 | # shutil.copyfile(save_path, best_save_path)
59 | torch.save(state, best_save_path)
60 |
61 | def accuracy(output, target, topk=(1,)):
62 | """Computes the precision@k for the specified values of k"""
63 | maxk = max(topk)
64 | batch_size = target.size(0)
65 |
66 | _, pred = output.topk(maxk, 1, True, True)
67 | pred = pred.t()
68 | correct = pred.eq(target.view(1, -1).expand_as(pred))
69 |
70 | res = []
71 | for k in topk:
72 | correct_k = correct[:k].contiguous().view(-1).float().sum(0) #add continuous
73 | res.append(correct_k.mul_(100.0 / batch_size))
74 | return res
75 |
--------------------------------------------------------------------------------