├── README.md └── rap_attack.py /README.md: -------------------------------------------------------------------------------- 1 | # Boosting the Transferability of Adversarial Attacks with Reverse Adversarial Perturbation (NeurIPS 2022) 2 | 3 | 4 | **PyTorch implementation for the state-of-art transfer attack: Reverse Adversarial Perturbation (RAP).** 5 | 6 | *Boosting the Transferability of Adversarial Attacks with Reverse Adversarial Perturbation* 7 | 8 | Zeyu Qin*, Yanbo Fan*, Yi Liu, Li Shen, Yong Zhang, Jue Wang, Baoyuan Wu 9 | 10 | In NeurIPS 2022. 11 | 12 | ---- 13 | 14 | ### Codes: 15 | - rap_attack.py: full version 16 | 17 | 18 | ### The examples: 19 | 20 | - targeted attack with DI and logit loss from ResNet-50 21 | 22 | ``` 23 | 24 | python /targeted_attack/rap_attack.py --num_data_augmentation 1 --targeted --transpoint 400 --seed 9018 --source_model resnet_50 --loss_function MaxLogit --DI --max_iterations 300 25 | ``` 26 | 27 | 28 | - RAP targeted attack with DI and logit loss from ResNet-50 29 | 30 | ``` 31 | python /targeted_attack/rap_attack.py --num_data_augmentation 1 --targeted --transpoint 0 --seed 9018 --source_model resnet_50 --loss_function MaxLogit --DI --max_iterations 300 32 | ``` 33 | 34 | 35 | - RAP-LS targeted attack with DI and logit loss from ResNet-50 36 | 37 | ``` 38 | python /targeted_attack/rap_attack.py --num_data_augmentation 1 --targeted --transpoint 100 --seed 9018 --source_model resnet_50 --loss_function MaxLogit --DI --max_iterations 300 39 | ``` 40 | 41 | ### The parameters of config: 42 | 43 | 44 | - targeted attack or not : --targeted or None 45 | - source model: -- source_model (resnet_50, densenet, inception, vgg16) 46 | - random seed: --seed 1234 47 | - interation number of outer minimization: --max_iterations 48 | - MI or not: --MI or None 49 | - DI or not: --DI or None 50 | - TI or not: --TI or None 51 | - SI or not: (--SI and --m2 5) or None 52 | - Admix or not: 53 | (--m1 3 an --m2 5) or None 54 | --strength 0.2 55 | - transpoint: 56 | --transpoint 400: baseline method 57 | --transpoint 0: baseline+RAP 58 | --transpoint 100: baseline+RAP-LS 59 | - loss function: --loss_function: CE or MaxLogit for outer minimization 60 | - epsilon of attacks: --adv_epsilon: 16/255, the perturbation budget for - inner maximization 61 | --adv_steps: 8, the step for inner maximization 62 | 63 | 64 | #### This code is based on [source code from NeurIPS 2021 paper](https://github.com/ZhengyuZhao/Targeted-Tansfer) , *"On Success and Simplicity: A Second Look at Transferable Targeted Attacks"*. The used dataset is also contained in their repository. Please consider leaving a :star: on their repository. 65 | -------------------------------------------------------------------------------- /rap_attack.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import torch.optim as optim 6 | import torch.autograd as autograd 7 | import torchvision 8 | import torchvision.datasets as td 9 | import torch.distributions as tdist 10 | import argparse 11 | from torchvision import models, transforms 12 | from PIL import Image 13 | import csv 14 | import numpy as np 15 | import os 16 | import scipy.stats as st 17 | 18 | ## hyperparameter 19 | parser = argparse.ArgumentParser() 20 | 21 | parser.add_argument('--source_model', type=str, default='resnet50', choices=['resnet50', 'inception-v3', 'densenet121', 'vgg16bn']) 22 | 23 | parser.add_argument('--batch_size', type=int, default=50) 24 | parser.add_argument('--max_iterations', type=int, default=400) 25 | 26 | parser.add_argument('--loss_function', type=str, default='CE', choices=['CE','MaxLogit']) 27 | 28 | parser.add_argument('--targeted', action='store_true') 29 | 30 | parser.add_argument('--m1', type=int, default=1, help='number of randomly sampled images') 31 | parser.add_argument('--m2', type=int, default=1, help='num of copies') 32 | parser.add_argument('--strength', type=float, default=0) 33 | 34 | parser.add_argument('--adv_perturbation', action='store_true') 35 | 36 | parser.add_argument('--adv_loss_function', type=str, default='CE', choices=['CE', 'MaxLogit']) 37 | 38 | parser.add_argument('--adv_epsilon', type=eval, default=16/255) 39 | parser.add_argument('--adv_steps', type=int, default=8) 40 | 41 | parser.add_argument('--transpoint', type=int, default=0) 42 | 43 | parser.add_argument('--seed', type=int, default=0) 44 | 45 | 46 | parser.add_argument('--MI', action='store_true') 47 | parser.add_argument('--DI', action='store_true') 48 | parser.add_argument('--TI', action='store_true') 49 | parser.add_argument('--SI', action='store_true') 50 | parser.add_argument('--random_start', action='store_true') 51 | 52 | 53 | parser.add_argument('--save', action='store_true') 54 | 55 | parser.add_argument('--device', type=int, default=0) 56 | 57 | 58 | arg = parser.parse_args() 59 | 60 | 61 | os.environ["CUDA_VISIBLE_DEVICES"] = str(arg.device) 62 | 63 | 64 | arg.adv_alpha = arg.adv_epsilon / arg.adv_steps 65 | 66 | 67 | def makedir(path): 68 | folder = os.path.exists(path) 69 | if not folder: 70 | os.makedirs(path) 71 | print('----------- new folder ------------') 72 | print('------------ ok ----------------') 73 | 74 | else: 75 | print('----------- There is this folder! -----------') 76 | 77 | 78 | exp_name = arg.source_model + '_' + arg.loss_function + '_' 79 | 80 | if arg.targeted: 81 | exp_name += 'T_' 82 | if arg.MI: 83 | exp_name += 'MI_' 84 | if arg.DI: 85 | exp_name += 'DI_' 86 | if arg.TI: 87 | exp_name += 'TI_' 88 | if arg.SI: 89 | exp_name += 'SI_' 90 | if arg.m1 != 1: 91 | exp_name += f'm1_{arg.m1}_' 92 | if arg.m2 != 1: 93 | exp_name += f'm2_{arg.m2}_' 94 | if arg.strength != 0: 95 | exp_name += 'Admix_' 96 | 97 | 98 | exp_name += str(arg.transpoint) 99 | 100 | 101 | if arg.targeted: 102 | exp_name += '_target' 103 | 104 | 105 | # for targeted attack, we need to conduct the untargeted attack during the inner loop. 106 | # for untargeted attack, we need to conduct the targeted attack (the true label) during the inner loop. 107 | if not arg.targeted: 108 | arg.adv_targeted = 1 109 | else: 110 | arg.adv_targeted = 0 111 | 112 | 113 | if arg.save: 114 | 115 | arg.file_path = "/targeted_attack/adv_example/"+exp_name 116 | 117 | makedir(arg.file_path) 118 | 119 | 120 | def logging(s, print_=True, log_=True): 121 | 122 | if print_: 123 | print(s) 124 | 125 | if log_: 126 | with open(os.path.join(arg.file_path, 'log.txt'), 'a+') as f_log: 127 | f_log.write(s + '\n') 128 | 129 | 130 | logging(exp_name.format()) 131 | 132 | logging('Hyper-parameters: {}\n'.format(arg.__dict__)) 133 | 134 | 135 | ##load image metadata (Image_ID, true label, and target label) 136 | def load_ground_truth(csv_filename): 137 | image_id_list = [] 138 | label_ori_list = [] 139 | label_tar_list = [] 140 | 141 | with open(csv_filename) as csvfile: 142 | reader = csv.DictReader(csvfile, delimiter=',') 143 | for row in reader: 144 | image_id_list.append(row['ImageId']) 145 | label_ori_list.append(int(row['TrueLabel']) - 1) 146 | label_tar_list.append(int(row['TargetClass']) - 1) 147 | 148 | return image_id_list, label_ori_list, label_tar_list 149 | 150 | ## simple Module to normalize an image 151 | class Normalize(nn.Module): 152 | def __init__(self, mean, std): 153 | super(Normalize, self).__init__() 154 | self.mean = torch.Tensor(mean) 155 | self.std = torch.Tensor(std) 156 | 157 | def forward(self, x): 158 | return (x - self.mean.type_as(x)[None, :, None, None]) / self.std.type_as(x)[None, :, None, None] 159 | 160 | ##define TI 161 | def gkern(kernlen=15, nsig=3): 162 | x = np.linspace(-nsig, nsig, kernlen) 163 | kern1d = st.norm.pdf(x) 164 | kernel_raw = np.outer(kern1d, kern1d) 165 | kernel = kernel_raw / kernel_raw.sum() 166 | return kernel 167 | channels = 3 168 | kernel_size = 5 169 | kernel = gkern(kernel_size, 3).astype(np.float32) 170 | gaussian_kernel = np.stack([kernel, kernel, kernel]) 171 | gaussian_kernel = np.expand_dims(gaussian_kernel, 1) 172 | gaussian_kernel = torch.from_numpy(gaussian_kernel).cuda() 173 | 174 | ##define DI 175 | def DI(X_in): 176 | rnd = np.random.randint(299, 330, size=1)[0] 177 | h_rem = 330 - rnd 178 | w_rem = 330 - rnd 179 | pad_top = np.random.randint(0, h_rem, size=1)[0] 180 | pad_bottom = h_rem - pad_top 181 | pad_left = np.random.randint(0, w_rem, size=1)[0] 182 | pad_right = w_rem - pad_left 183 | 184 | c = np.random.rand(1) 185 | if c <= 0.7: 186 | X_out = F.pad(F.interpolate(X_in, size=(rnd, rnd)), (pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) 187 | return X_out 188 | else: 189 | return X_in 190 | 191 | 192 | def pgd(model, data, labels, targeted, epsilon, k, a, random_start=True): 193 | 194 | data_max = data + epsilon 195 | data_min = data - epsilon 196 | data_max.clamp_(0, 1) 197 | data_min.clamp_(0, 1) 198 | 199 | data = data.clone().detach().to(device) 200 | labels = labels.clone().detach().to(device) 201 | 202 | perturbed_data = data.clone().detach() 203 | 204 | if random_start: 205 | # Starting at a uniformly random point 206 | perturbed_data = perturbed_data + torch.empty_like(perturbed_data).uniform_(-epsilon, epsilon) 207 | perturbed_data = torch.clamp(perturbed_data, min=0, max=1).detach() 208 | 209 | for _ in range(k): 210 | perturbed_data.requires_grad = True 211 | outputs = model(norm(perturbed_data)) 212 | if arg.adv_loss_function == 'CE': 213 | loss = nn.CrossEntropyLoss(reduction='sum') 214 | if targeted: 215 | cost = loss(outputs, labels) 216 | else: 217 | cost = -1 * loss(outputs, labels) 218 | 219 | 220 | elif arg.adv_loss_function == 'MaxLogit': 221 | if targeted: 222 | real = outputs.gather(1, labels.unsqueeze(1)).squeeze(1) 223 | logit_dists = -1 * real 224 | cost = logit_dists.sum() 225 | else: 226 | real = outputs.gather(1, labels.unsqueeze(1)).squeeze(1) 227 | cost = real.sum() 228 | 229 | # Update adversarial images 230 | cost.backward() 231 | 232 | gradient = perturbed_data.grad.clone().to(device) 233 | perturbed_data.grad.zero_() 234 | 235 | with torch.no_grad(): 236 | perturbed_data.data -= a * torch.sign(gradient) 237 | perturbed_data.data = torch.max(torch.min(perturbed_data.data, data_max), data_min) 238 | return perturbed_data.detach() 239 | 240 | 241 | model_1 = models.inception_v3(pretrained=True, transform_input=True).eval() 242 | model_2 = models.resnet50(pretrained=True).eval() 243 | model_3 = models.densenet121(pretrained=True).eval() 244 | model_4 = models.vgg16_bn(pretrained=True).eval() 245 | 246 | 247 | for param in model_1.parameters(): 248 | param.requires_grad = False 249 | for param in model_2.parameters(): 250 | param.requires_grad = False 251 | for param in model_3.parameters(): 252 | param.requires_grad = False 253 | for param in model_4.parameters(): 254 | param.requires_grad = False 255 | 256 | 257 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 258 | logging(f'device: {device}') 259 | 260 | model_1.to(device) 261 | model_2.to(device) 262 | model_3.to(device) 263 | model_4.to(device) 264 | 265 | if arg.source_model == 'inception-v3': 266 | model_source = model_1 267 | elif arg.source_model == 'resnet50': 268 | model_source = model_2 269 | elif arg.source_model == 'densenet121': 270 | model_source = model_3 271 | elif arg.source_model == 'vgg16bn': 272 | model_source = model_4 273 | 274 | logging("setting up the source and target models") 275 | 276 | torch.manual_seed(arg.seed) 277 | torch.backends.cudnn.deterministic = True 278 | np.random.seed(arg.seed) 279 | 280 | 281 | 282 | # values are standard normalization for ImageNet images, 283 | # from https://github.com/pytorch/examples/blob/master/imagenet/main.py 284 | norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 285 | trn = transforms.Compose([transforms.ToTensor(), ]) 286 | image_id_list, label_ori_list, label_tar_list = load_ground_truth('/targeted_attack/dataset/images.csv') 287 | 288 | img_size = 299 289 | input_path = '/targeted_attack/dataset/images/' 290 | lr = 2 / 255 # step size 291 | epsilon = 16 # L_inf norm bound 292 | num_batches = np.int(np.ceil(len(image_id_list) / arg.batch_size)) 293 | 294 | logging("loaded the images".format()) 295 | n = tdist.Normal(0.0, 15/255) 296 | 297 | #-------------------------------------# 298 | X_adv_10 = torch.zeros(len(image_id_list), 3, img_size, img_size) 299 | X_adv_50 = torch.zeros(len(image_id_list), 3, img_size, img_size) 300 | X_adv_100 = torch.zeros(len(image_id_list), 3, img_size, img_size) 301 | X_adv_200 = torch.zeros(len(image_id_list), 3, img_size, img_size) 302 | X_adv_300 = torch.zeros(len(image_id_list), 3, img_size, img_size) 303 | X_adv_400 = torch.zeros(len(image_id_list), 3, img_size, img_size) 304 | 305 | fixing_point = 0 306 | 307 | adv_activate = 0 308 | 309 | pos = np.zeros((4, arg.max_iterations // 10)) 310 | 311 | 312 | for k in range(0, num_batches): 313 | batch_size_cur = min(arg.batch_size, len(image_id_list) - k * arg.batch_size) 314 | X_ori = torch.zeros(batch_size_cur, 3, img_size, img_size).to(device) 315 | delta = torch.zeros_like(X_ori, requires_grad=True).to(device) 316 | for i in range(batch_size_cur): 317 | X_ori[i] = trn(Image.open(input_path + image_id_list[k * arg.batch_size + i] + '.png')) 318 | labels = torch.tensor(label_ori_list[k * arg.batch_size:k * arg.batch_size + batch_size_cur]).to(device) 319 | target_labels = torch.tensor(label_tar_list[k * arg.batch_size:k * arg.batch_size + batch_size_cur]).to(device) 320 | grad_pre = 0 321 | prev = float('inf') 322 | 323 | if arg.random_start: 324 | # Starting at a uniformly random point 325 | delta.requires_grad_(False) 326 | delta = delta + torch.empty_like(X_ori).uniform_(-epsilon/255, epsilon/255) 327 | delta = torch.clamp(X_ori+delta, min=0, max=1) - X_ori 328 | delta.requires_grad_(True) 329 | 330 | logging(50*"#") 331 | logging("starting :{} batch".format(k+1)) 332 | 333 | 334 | for t in range(arg.max_iterations): 335 | if t < arg.transpoint: 336 | adv_activate = 0 337 | else: 338 | if arg.adv_perturbation: 339 | adv_activate = 1 340 | else: 341 | adv_activate = 0 342 | grad_list = [] 343 | 344 | for q in range(arg.m1): 345 | delta.requires_grad_(False) 346 | 347 | if arg.strength == 0: 348 | X_addin = torch.zeros_like(X_ori).to(device) 349 | else: 350 | X_addin = torch.zeros_like(X_ori).to(device) 351 | random_labels = torch.zeros(batch_size_cur).to(device) 352 | stop = False 353 | while stop == False: 354 | random_indices = np.random.randint(0, 1000, batch_size_cur) 355 | for i in range(batch_size_cur): 356 | X_addin[i] = trn(Image.open(input_path + image_id_list[random_indices[i]] + '.png')) 357 | random_labels[i] = label_ori_list[random_indices[i]] 358 | if torch.sum(random_labels==labels).item() == 0: 359 | stop = True 360 | X_addin = arg.strength * X_addin 361 | X_addin = torch.clamp(X_ori+delta+X_addin, min=0, max=1) - (X_ori+delta) 362 | 363 | if arg.SI: 364 | 365 | if adv_activate: 366 | top_values_1, top_indices_1 = model_source(norm(X_ori+delta+X_addin)).topk(arg.m1+1, dim=1, largest=True, sorted=True) 367 | 368 | if arg.adv_targeted: 369 | label_pred = labels 370 | else: 371 | label_pred = target_labels 372 | 373 | X_advaug = pgd(model_source, X_ori+delta+X_addin, label_pred, arg.adv_targeted, arg.adv_epsilon, arg.adv_steps, arg.adv_alpha) 374 | X_aug = X_advaug - (X_ori+delta+X_addin) 375 | 376 | else: 377 | X_aug = torch.zeros_like(X_ori).to(device) 378 | 379 | delta.requires_grad_(True) 380 | 381 | for j in range(arg.m2): 382 | 383 | if not arg.SI: 384 | delta.requires_grad_(False) 385 | 386 | if adv_activate: 387 | top_values_2, top_indices_2 = model_source(norm(X_ori+delta+X_addin)).topk(arg.m2+1, dim=1, largest=True, sorted=True) 388 | 389 | if arg.adv_targeted: 390 | label_pred = labels 391 | else: 392 | label_pred = target_labels 393 | 394 | X_advaug = pgd(model_source, X_ori+delta+X_addin, label_pred, arg.adv_targeted, arg.adv_epsilon, arg.adv_steps, arg.adv_alpha) 395 | X_aug = X_advaug - (X_ori+delta+X_addin) 396 | 397 | else: 398 | X_aug = torch.zeros_like(X_ori).to(device) 399 | delta.requires_grad_(True) 400 | 401 | if arg.DI: # DI 402 | if arg.SI: 403 | logits = model_source(norm(DI((X_ori + delta + X_addin + X_aug )/2**j))) 404 | else: 405 | logits = model_source(norm(DI(X_ori + delta + X_addin + X_aug ))) 406 | else: 407 | if arg.SI: 408 | logits = model_source(norm((X_ori + delta + X_addin + X_aug )/2**j)) 409 | else: 410 | logits = model_source(norm(X_ori + delta + X_addin + X_aug )) 411 | 412 | if arg.loss_function == 'CE': 413 | loss_func = nn.CrossEntropyLoss(reduction='sum') 414 | if arg.targeted: 415 | loss = loss_func(logits, target_labels) 416 | else: 417 | loss = -1 * loss_func(logits, labels) 418 | 419 | elif arg.loss_function == 'MaxLogit': 420 | if arg.targeted: 421 | real = logits.gather(1,target_labels.unsqueeze(1)).squeeze(1) 422 | loss = -1 * real.sum() 423 | else: 424 | real = logits.gather(1,labels.unsqueeze(1)).squeeze(1) 425 | loss = real.sum() 426 | 427 | loss.backward() 428 | grad_cc = delta.grad.clone().to(device) 429 | 430 | if arg.TI: # TI 431 | grad_cc = F.conv2d(grad_cc, gaussian_kernel, bias=None, stride=1, padding=(2, 2), groups=3) 432 | grad_list.append(grad_cc) 433 | delta.grad.zero_() 434 | 435 | grad_c = 0 436 | 437 | for j in range(arg.m1 * arg.m2): 438 | grad_c += grad_list[j] 439 | grad_c = grad_c / (arg.m1 * arg.m2) 440 | 441 | if arg.MI: # MI 442 | grad_c = grad_c / torch.mean(torch.abs(grad_c), (1, 2, 3), keepdim=True) + 1 * grad_pre 443 | 444 | grad_pre = grad_c 445 | delta.data = delta.data - lr * torch.sign(grad_c) 446 | delta.data = delta.data.clamp(-epsilon / 255, epsilon / 255) 447 | delta.data = ((X_ori + delta.data).clamp(0, 1)) - X_ori 448 | 449 | if t % 10 == 9: 450 | if arg.targeted: 451 | pos[0, t // 10] = pos[0, t // 10] + sum(torch.argmax(model_1(norm(X_ori + delta)), dim=1) == target_labels).cpu().numpy() 452 | pos[1, t // 10] = pos[1, t // 10] + sum(torch.argmax(model_2(norm(X_ori + delta)), dim=1) == target_labels).cpu().numpy() 453 | pos[2, t // 10] = pos[2, t // 10] + sum(torch.argmax(model_3(norm(X_ori + delta)), dim=1) == target_labels).cpu().numpy() 454 | pos[3, t // 10] = pos[3, t // 10] + sum(torch.argmax(model_4(norm(X_ori + delta)), dim=1) == target_labels).cpu().numpy() 455 | else: 456 | pos[0, t // 10] = pos[0, t // 10] + sum(torch.argmax(model_1(norm(X_ori + delta)), dim=1) != labels).cpu().numpy() 457 | pos[1, t // 10] = pos[1, t // 10] + sum(torch.argmax(model_2(norm(X_ori + delta)), dim=1) != labels).cpu().numpy() 458 | pos[2, t // 10] = pos[2, t // 10] + sum(torch.argmax(model_3(norm(X_ori + delta)), dim=1) != labels).cpu().numpy() 459 | pos[3, t // 10] = pos[3, t // 10] + sum(torch.argmax(model_4(norm(X_ori + delta)), dim=1) != labels).cpu().numpy() 460 | 461 | logging(str(pos)) 462 | logging(30*"#") 463 | 464 | 465 | 466 | if t == (1-1): 467 | X_adv_10[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 468 | if t == (50-1): 469 | X_adv_50[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 470 | if t == (100-1): 471 | X_adv_100[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 472 | if t == (200-1): 473 | X_adv_200[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 474 | if t == (300-1): 475 | X_adv_300[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 476 | if t == (400-1): 477 | X_adv_400[fixing_point:fixing_point+batch_size_cur] = (X_ori + delta).clone().detach().cpu() 478 | 479 | fixing_point += batch_size_cur 480 | logging(50*"#") 481 | 482 | torch.cuda.empty_cache() 483 | 484 | 485 | logging(arg.file_path.format()) 486 | 487 | logging('Hyper-parameters: {}\n'.format(arg.__dict__)) 488 | 489 | 490 | logging("final result") 491 | logging('Source model : Ensemble --> Target model: Inception-v3 | ResNet50 | DenseNet121 | VGG16bn') 492 | logging(str(pos)) 493 | 494 | logging("results for 10 iters:") 495 | logging(str(pos[:, 0])) 496 | 497 | logging("results for 100 iters:") 498 | logging(str(pos[:, 9])) 499 | 500 | logging("results for 200 iters:") 501 | logging(str(pos[:, 19])) 502 | 503 | logging("results for 300 iters:") 504 | logging(str(pos[:, 29])) 505 | 506 | logging("results for 400 iters:") 507 | logging(str(pos[:, 39])) 508 | 509 | 510 | if arg.save: 511 | np.save(arg.file_path+'/'+'results'+'.npy', pos) 512 | 513 | 514 | # X_adv_10 = X_adv_10.detach().cpu() 515 | # X_adv_50 = X_adv_50.detach().cpu() 516 | # X_adv_100 = X_adv_100.detach().cpu() 517 | # X_adv_200 = X_adv_200.detach().cpu() 518 | # X_adv_300 = X_adv_300.detach().cpu() 519 | X_adv_400 = X_adv_400.detach().cpu() 520 | 521 | logging("saving the adversarial examples") 522 | 523 | # torch.save(X_adv_10, file_path+'/'+'iter_10'+'.pt') 524 | # np.save(file_path+'/'+'iter_10'+'.npy', X_adv_10.numpy()) 525 | 526 | # # torch.save(X_adv_50, file_path+'/'+'iter_50'+'.pt') 527 | # np.save(file_path+'/'+'iter_50'+'.npy', X_adv_50.numpy()) 528 | 529 | # torch.save(X_adv_100, file_path+'/'+'iter_100'+'.pt') 530 | # np.save(file_path+'/'+'iter_100'+'.npy', X_adv_100.numpy()) 531 | 532 | # # torch.save(X_adv_200, file_path+'/'+'iter_200'+'.pt') 533 | # np.save(file_path+'/'+'iter_200'+'.npy', X_adv_200.numpy()) 534 | 535 | # torch.save(X_adv_300, file_path+'/'+'iter_300'+'.pt') 536 | # np.save(file_path+'/'+'iter_300'+'.npy', X_adv_300.numpy()) 537 | 538 | # torch.save(X_adv_final, file_path+'/'+'iter_final'+'.pt') 539 | np.save(arg.file_path+'/'+'iter_400'+'.npy', X_adv_400.numpy()) 540 | 541 | logging("finishing saving the adversarial examples") 542 | 543 | logging("finishing the attack experiment") 544 | logging(50*"#") 545 | logging(50*"#") 546 | logging(50*"#") 547 | 548 | --------------------------------------------------------------------------------