├── README.md ├── cifar10_curve.png ├── experiments ├── cifar100_l2 │ └── preactresnet18 │ │ └── output.log ├── cifar100_standard │ └── preactresnet18 │ │ └── output.log ├── cifar10_cutout │ ├── preactresnet18_10 │ │ └── output.log │ ├── preactresnet18_12 │ │ └── output.log │ ├── preactresnet18_14 │ │ └── output.log │ ├── preactresnet18_16 │ │ └── output.log │ ├── preactresnet18_18 │ │ └── output.log │ ├── preactresnet18_2 │ │ └── output.log │ ├── preactresnet18_20 │ │ └── output.log │ ├── preactresnet18_4 │ │ └── output.log │ ├── preactresnet18_6 │ │ └── output.log │ └── preactresnet18_8 │ │ └── output.log ├── cifar10_dropepoch │ ├── preactresnet_60 │ │ └── output.log │ ├── preactresnet_70 │ │ └── output.log │ ├── preactresnet_80 │ │ └── output.log │ └── preactresnet_90 │ │ └── output.log ├── cifar10_droplr │ ├── preactresnet_0.006 │ │ └── output.log │ ├── preactresnet_0.008 │ │ └── output.log │ ├── preactresnet_0.03 │ │ └── output.log │ └── preactresnet_0.05 │ │ └── output.log ├── cifar10_fgsm │ ├── preactresnet18_0.75 │ │ └── output.log │ └── preactresnet18_0.875 │ │ └── output.log ├── cifar10_fgsm_l2 │ └── preactresnet18 │ │ └── output.log ├── cifar10_l1 │ ├── preactresnet18_5e-4 │ │ └── output.log │ ├── preactresnet18_5e-5 │ │ └── output.log │ ├── preactresnet18_5e-6 │ │ └── output.log │ └── preactresnet18_5e-7 │ │ └── output.log ├── cifar10_l2 │ ├── preactresnet18_0.01 │ │ └── output.log │ ├── preactresnet18_5 │ │ └── output.log │ ├── preactresnet18_50 │ │ └── output.log │ ├── preactresnet18_5e-1 │ │ └── output.log │ ├── preactresnet18_5e-2 │ │ └── output.log │ ├── preactresnet18_5e-3 │ │ └── output.log │ └── preactresnet18_5e-4 │ │ └── output.log ├── cifar10_lr │ ├── preactresnet18_cosine │ │ └── output.log │ ├── preactresnet18_cyclic │ │ └── output.log │ ├── preactresnet18_manystep │ │ └── output.log │ ├── preactresnet18_piecewise_0_2 │ │ └── output.log │ ├── preactresnet18_piecewisezoom │ │ └── output.log │ ├── preactresnet18_piecewisezoom_long │ │ └── output.log │ └── preactresnet18_smartdrop │ │ └── output.log ├── cifar10_mixup │ ├── preactresnet18_0.2 │ │ └── output.log │ ├── preactresnet18_0.4 │ │ └── output.log │ ├── preactresnet18_0.6 │ │ └── output.log │ ├── preactresnet18_0.8 │ │ └── output.log │ ├── preactresnet18_1.0 │ │ └── output.log │ ├── preactresnet18_1.2 │ │ └── output.log │ ├── preactresnet18_1.4 │ │ └── output.log │ ├── preactresnet18_1.6 │ │ └── output.log │ ├── preactresnet18_1.8 │ │ └── output.log │ └── preactresnet18_2.0 │ │ └── output.log ├── cifar10_semisupervised │ └── preactresnet18_500000 │ │ └── output.log ├── cifar10_standard │ ├── preactresnet18 │ │ └── output.log │ └── wideresnet_10 │ │ └── output.log ├── cifar10_standard_l2 │ └── preactresnet18_0 │ │ └── output.log ├── cifar10_startlr │ ├── preactresnet_0.06 │ │ └── output.log │ ├── preactresnet_0.08 │ │ └── output.log │ ├── preactresnet_0.3 │ │ └── output.log │ └── preactresnet_0.5 │ │ └── output.log ├── cifar10_trades │ ├── eval.log │ ├── trades.out │ ├── trades_madry.out │ ├── trades_madry_resnet18.out │ └── trades_resnet18.out ├── cifar10_trades_l2 │ ├── eval.log │ └── trades_madry_l2.out ├── cifar10_validation │ └── preactresnet18 │ │ └── output.log ├── cifar10_wide │ ├── wideresnet_1 │ │ └── output.log │ ├── wideresnet_10 │ │ └── output.log │ ├── wideresnet_15 │ │ └── output.log │ ├── wideresnet_2 │ │ └── output.log │ ├── wideresnet_20 │ │ └── output.log │ ├── wideresnet_3 │ │ └── output.log │ ├── wideresnet_4 │ │ └── output.log │ ├── wideresnet_5 │ │ └── output.log │ ├── wideresnet_6 │ │ └── output.log │ ├── wideresnet_7 │ │ └── output.log │ ├── wideresnet_8 │ │ └── output.log │ └── wideresnet_9 │ │ └── output.log ├── imagenet │ ├── l2_eps_3 │ │ └── data.pth │ └── linf_eps_4 │ │ └── data.pth ├── svhn_standard │ └── preactresnet18 │ │ └── output.log └── svhn_standard_l2 │ └── preactresnet18 │ └── output.log ├── generate_validation.py ├── imagenet_scripts ├── resume_l2.sh └── resume_linf.sh ├── preactresnet.py ├── train_cifar.py ├── train_cifar100.py ├── train_cifar_semisupervised_half.py ├── train_svhn.py ├── utils.py └── wideresnet.py /README.md: -------------------------------------------------------------------------------- 1 | # Overfitting in adversarially robust deep learning 2 | A repository which implements the experiments for exploring the phenomenon of robust overfitting, where robust performance on the test performance degradessignificantly over training. Created by [Leslie Rice][leslie link], [Eric Wong][eric link], and [Zico Kolter][zico link]. See our paper on arXiv [here][arxiv]. 3 | 4 | ## News 5 | + 04/10/2020 - The AutoAttack framework of [Croce & Hein (2020)][autoattack arxiv] evaluated our released models using this repository [here][autoattack]. On CIFAR10, our models trained with standard PGD and early stopping ranks at \#5 overall, and \#1 for defenses that do not rely on additional data. 6 | + 02/26/2020 - arXiv posted and repository release 7 | 8 | ## Robust overfitting hurts - early stopping is essential! 9 | A large amount of research over the past couple years has looked into defending deep networks against adversarial examples, with significant improvements over the well-known PGD-based adversarial training defense. However, adversarial training doesn't always behave similarly to standard training. The main observation we find is that, unlike in standard training, training to convergence can significantly harm robust generalization, and actually increases robust test error well before training has converged, as seen in the following learning curve: 10 | 11 | ![overfitting](https://github.com/locuslab/robust_overfitting/blob/master/cifar10_curve.png) 12 | 13 | After the initial learning rate decay, the robust test error actually increases! As a result, training to convergence is bad for adversarial training, and oftentimes, simply training for one epoch after decaying the learning rate achieves the best robust error on the test set. This behavior is reflected across multiple datasets, different approaches to adversarial training, and both L-infinity and L-2 threat models. 14 | 15 | ## No algorithmic improvements over PGD-based adversarial training 16 | We can apply this knowledge to PGD-based adversarial training (e.g. as done by the original paper [here](https://arxiv.org/abs/1706.06083)), and find that early stopping can substantially improve the robust test error by 8%! As a result, we find that PGD-based adversarial training is as good as existing SOTA methods for adversarial robustness (e.g. on par with or slightly better than [TRADES](https://github.com/yaodongyu/TRADES)). On the flipside, we note that the results reported by TRADES also rely on early stopping, as training the TRADES approach to convergence results in a significant increase in robust test error. Unfortunately, this means that all of the algorithmic gains over PGD in adversarially robust training can be equivalent obtained with early stopping. 17 | 18 | ## What is in this repository? 19 | + The experiments for CIFAR-10, CIFAR-100, and SVHN are in `train_cifar.py`, `train_cifar100.py`, `train_svhn.py` respectively. 20 | + CIFAR-10 training with semisupervised data is done in `train_cifar_semisupervised_half.py`, and uses the 500K pseudo-labeled TinyImages data from 21 | + TRADES training is done with the repository located at , with the only modification being the changes to the learning rate schedule to train to convergence (to decay at epochs 100 and 150 out of 200 total epochs). 22 | + For ImageNet training, we used the repository located at with no modifications. The resulting logged data is stored in `.pth` files which can be loaded with `torch.load()` and are simply dictionaries of logged data. The scripts containing the parameters for resuming the ImageNet experiments can be found in `imagenet_scripts/`. 23 | + Training logs are all located in the `experiments` folder, and each subfolder corresponds to a set of experiments carried in the paper. 24 | 25 | Model weights for the following models can be found in this [drive folder][model weights]: 26 | + The best checkpoints for CIFAR-10 WideResNets defined in `wideresnet.py` (in for width factor 10 and 20 (from the double descent curve trained against L-infinity) 27 | + The best checkpoints for SVHN / CIFAR-10 (L2) / CIFAR-100 / ImageNet models reported in Table 1 (the ImageNet checkpoints are in the format directly used by ). The remaining models are for the Preactivation ResNet18 defined in `preactresnet.py`. 28 | 29 | [leslie link]: https://leslierice1.github.io/ 30 | [eric link]: https://riceric22.github.io/ 31 | [zico link]: http://zicokolter.com/ 32 | 33 | [arxiv]: https://arxiv.org/abs/2002.11569 34 | [model weights]: https://drive.google.com/drive/folders/110JHo_yH9zwIf1b12jKoG6dRonrow9eA?usp=sharing 35 | [autoattack]: https://github.com/fra31/auto-attack 36 | [autoattack arxiv]: https://arxiv.org/abs/2003.01690 37 | -------------------------------------------------------------------------------- /cifar10_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/locuslab/robust_overfitting/c47a25c5e00c8b2bb35488d962c04dd771b7e9af/cifar10_curve.png -------------------------------------------------------------------------------- /experiments/cifar10_dropepoch/preactresnet_60/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/29 22:27:21] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=110, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_dropepoch/preactresnet_60', half=False, l1=0, l2=0, lr_drop_epoch=60, lr_max=0.1, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/29 22:27:26] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/29 22:32:55] - 0 277.3 51.8 0.1000 1.9727 0.2767 2.1582 0.1978 1.7570 0.3768 1.9728 0.2636 4 | [2020/01/29 22:38:24] - 1 277.1 51.6 0.1000 1.7037 0.3865 1.9732 0.2590 1.5849 0.4426 1.8584 0.3108 5 | [2020/01/29 22:43:54] - 2 277.4 51.7 0.1000 1.5774 0.4450 1.8959 0.2882 1.5381 0.4591 1.8434 0.3111 6 | [2020/01/29 22:49:23] - 3 277.6 51.8 0.1000 1.4858 0.4825 1.8375 0.3050 1.4065 0.5218 1.7480 0.3435 7 | [2020/01/29 22:54:53] - 4 277.5 51.6 0.1000 1.4012 0.5249 1.7875 0.3235 1.3846 0.5291 1.7343 0.3527 8 | [2020/01/29 23:00:22] - 5 276.8 51.4 0.1000 1.3307 0.5579 1.7429 0.3428 1.2890 0.5591 1.6675 0.3756 9 | [2020/01/29 23:05:50] - 6 276.5 51.6 0.1000 1.2754 0.5851 1.7066 0.3565 1.1932 0.6017 1.6146 0.4011 10 | [2020/01/29 23:11:19] - 7 277.2 51.6 0.1000 1.2210 0.6086 1.6720 0.3677 1.1653 0.6237 1.6078 0.3925 11 | [2020/01/29 23:16:47] - 8 275.9 51.6 0.1000 1.1816 0.6272 1.6460 0.3755 1.1290 0.6205 1.5815 0.3996 12 | [2020/01/29 23:22:15] - 9 276.3 51.5 0.1000 1.1518 0.6410 1.6272 0.3851 1.0835 0.6543 1.5793 0.4067 13 | [2020/01/29 23:27:43] - 10 276.0 51.3 0.1000 1.1241 0.6535 1.6100 0.3926 1.1086 0.6238 1.5710 0.4115 14 | [2020/01/29 23:33:12] - 11 276.6 51.6 0.1000 1.1004 0.6631 1.5922 0.3981 1.1301 0.6511 1.5585 0.4185 15 | [2020/01/29 23:38:41] - 12 277.3 51.7 0.1000 1.0813 0.6730 1.5819 0.4008 1.0549 0.6704 1.5266 0.4246 16 | [2020/01/29 23:44:11] - 13 277.4 51.7 0.1000 1.0640 0.6800 1.5695 0.4075 1.0441 0.6612 1.5613 0.4130 17 | [2020/01/29 23:49:40] - 14 277.8 51.7 0.1000 1.0448 0.6876 1.5543 0.4104 1.0242 0.6822 1.5292 0.4219 18 | [2020/01/29 23:55:10] - 15 278.2 51.7 0.1000 1.0299 0.6959 1.5442 0.4139 1.0112 0.6922 1.5066 0.4371 19 | [2020/01/30 00:00:41] - 16 278.4 52.0 0.1000 1.0177 0.6995 1.5349 0.4183 1.0217 0.6478 1.5084 0.4374 20 | [2020/01/30 00:06:13] - 17 279.2 52.2 0.1000 1.0042 0.7048 1.5268 0.4214 1.0185 0.6914 1.4920 0.4489 21 | [2020/01/30 00:11:44] - 18 278.6 51.9 0.1000 0.9971 0.7056 1.5209 0.4224 0.9630 0.7043 1.5159 0.4239 22 | [2020/01/30 00:17:13] - 19 277.0 51.7 0.1000 0.9822 0.7149 1.5094 0.4287 0.9897 0.6936 1.5060 0.4336 23 | [2020/01/30 00:22:43] - 20 277.6 51.9 0.1000 0.9735 0.7178 1.5043 0.4310 0.9784 0.6919 1.4927 0.4419 24 | [2020/01/30 00:28:13] - 21 278.4 51.9 0.1000 0.9678 0.7163 1.4980 0.4325 1.0054 0.6809 1.4895 0.4390 25 | [2020/01/30 00:33:43] - 22 277.9 51.6 0.1000 0.9600 0.7221 1.4940 0.4334 0.9646 0.7089 1.4675 0.4487 26 | [2020/01/30 00:39:12] - 23 277.9 51.6 0.1000 0.9528 0.7256 1.4916 0.4322 0.9616 0.7098 1.4638 0.4398 27 | [2020/01/30 00:44:43] - 24 278.4 52.0 0.1000 0.9462 0.7301 1.4838 0.4371 0.9954 0.6769 1.4601 0.4524 28 | [2020/01/30 00:50:12] - 25 277.8 51.5 0.1000 0.9348 0.7349 1.4759 0.4385 0.9437 0.7010 1.4612 0.4528 29 | [2020/01/30 00:55:42] - 26 277.6 51.8 0.1000 0.9329 0.7334 1.4746 0.4380 0.9627 0.7136 1.4848 0.4479 30 | [2020/01/30 01:01:12] - 27 278.4 51.8 0.1000 0.9281 0.7375 1.4707 0.4405 0.9614 0.7135 1.4762 0.4451 31 | [2020/01/30 01:06:43] - 28 278.5 52.0 0.1000 0.9248 0.7354 1.4677 0.4430 0.9608 0.7045 1.4729 0.4482 32 | [2020/01/30 01:12:14] - 29 279.4 52.1 0.1000 0.9217 0.7378 1.4640 0.4456 0.9308 0.7233 1.4390 0.4642 33 | [2020/01/30 01:17:47] - 30 279.9 51.8 0.1000 0.9143 0.7418 1.4602 0.4421 0.9565 0.7072 1.4702 0.4500 34 | [2020/01/30 01:23:18] - 31 279.3 51.9 0.1000 0.9088 0.7438 1.4559 0.4476 0.9427 0.7269 1.4645 0.4543 35 | [2020/01/30 01:28:48] - 32 278.2 51.7 0.1000 0.9032 0.7453 1.4520 0.4498 0.9196 0.7240 1.4708 0.4485 36 | [2020/01/30 01:34:17] - 33 277.2 51.7 0.1000 0.9065 0.7431 1.4536 0.4481 0.9079 0.7302 1.4555 0.4561 37 | [2020/01/30 01:39:47] - 34 278.0 51.8 0.1000 0.8999 0.7467 1.4499 0.4496 0.8734 0.7336 1.4137 0.4707 38 | [2020/01/30 01:45:18] - 35 278.8 51.9 0.1000 0.8945 0.7512 1.4466 0.4488 0.9312 0.7271 1.4662 0.4578 39 | [2020/01/30 01:50:49] - 36 279.1 51.9 0.1000 0.8945 0.7499 1.4444 0.4510 0.9389 0.7264 1.4683 0.4533 40 | [2020/01/30 01:56:20] - 37 278.7 51.9 0.1000 0.8932 0.7490 1.4463 0.4514 0.9125 0.7391 1.4409 0.4647 41 | [2020/01/30 02:01:51] - 38 278.3 52.3 0.1000 0.8897 0.7517 1.4420 0.4506 0.9318 0.7200 1.4607 0.4534 42 | [2020/01/30 02:07:19] - 39 277.3 51.2 0.1000 0.8865 0.7549 1.4411 0.4520 0.8972 0.7167 1.4518 0.4520 43 | [2020/01/30 02:12:49] - 40 277.1 51.8 0.1000 0.8837 0.7533 1.4376 0.4533 0.9234 0.7285 1.4401 0.4690 44 | [2020/01/30 02:18:20] - 41 279.3 52.1 0.1000 0.8807 0.7562 1.4362 0.4528 0.9033 0.7291 1.4320 0.4627 45 | [2020/01/30 02:23:52] - 42 279.7 52.0 0.1000 0.8824 0.7551 1.4390 0.4526 0.9209 0.7158 1.4408 0.4646 46 | [2020/01/30 02:29:24] - 43 279.6 52.1 0.1000 0.8762 0.7572 1.4330 0.4538 0.8804 0.7322 1.4446 0.4468 47 | [2020/01/30 02:34:56] - 44 280.0 52.1 0.1000 0.8769 0.7564 1.4321 0.4545 0.9301 0.7277 1.4393 0.4691 48 | [2020/01/30 02:40:25] - 45 278.1 51.4 0.1000 0.8732 0.7580 1.4295 0.4577 0.8571 0.7257 1.4523 0.4515 49 | [2020/01/30 02:45:54] - 46 277.3 51.8 0.1000 0.8741 0.7573 1.4305 0.4542 0.8616 0.7372 1.4146 0.4703 50 | [2020/01/30 02:51:25] - 47 279.2 51.8 0.1000 0.8688 0.7603 1.4255 0.4576 0.8708 0.7340 1.4102 0.4681 51 | [2020/01/30 02:56:55] - 48 277.3 51.8 0.1000 0.8703 0.7603 1.4307 0.4553 0.9103 0.7134 1.4652 0.4517 52 | [2020/01/30 03:02:25] - 49 278.6 52.0 0.1000 0.8645 0.7631 1.4223 0.4596 0.9190 0.7178 1.4506 0.4538 53 | [2020/01/30 03:07:57] - 50 278.9 51.9 0.1000 0.8631 0.7637 1.4214 0.4589 0.8550 0.7198 1.4201 0.4615 54 | [2020/01/30 03:13:27] - 51 278.5 51.9 0.1000 0.8578 0.7660 1.4203 0.4582 0.8756 0.7249 1.4283 0.4632 55 | [2020/01/30 03:18:58] - 52 279.0 52.1 0.1000 0.8583 0.7675 1.4198 0.4606 0.8434 0.7505 1.4136 0.4625 56 | [2020/01/30 03:24:30] - 53 279.9 52.0 0.1000 0.8635 0.7635 1.4237 0.4600 0.9016 0.7222 1.4458 0.4591 57 | [2020/01/30 03:30:01] - 54 279.4 51.9 0.1000 0.8560 0.7650 1.4172 0.4604 0.8301 0.7482 1.4361 0.4567 58 | [2020/01/30 03:35:32] - 55 279.1 51.9 0.1000 0.8548 0.7679 1.4184 0.4599 0.8983 0.7401 1.4646 0.4615 59 | [2020/01/30 03:41:03] - 56 278.3 51.8 0.1000 0.8562 0.7661 1.4167 0.4603 0.8500 0.7330 1.4494 0.4506 60 | [2020/01/30 03:46:34] - 57 279.3 52.0 0.1000 0.8467 0.7707 1.4085 0.4625 0.8527 0.7321 1.4255 0.4667 61 | [2020/01/30 03:52:06] - 58 280.1 52.1 0.1000 0.8582 0.7638 1.4190 0.4598 0.8697 0.7483 1.4286 0.4686 62 | [2020/01/30 03:57:37] - 59 279.3 51.9 0.0100 0.8481 0.7705 1.4118 0.4602 0.8886 0.7344 1.4068 0.4724 63 | [2020/01/30 04:03:10] - 60 279.1 51.9 0.0100 0.7413 0.8025 1.2329 0.5230 0.6985 0.7980 1.2683 0.5172 64 | [2020/01/30 04:08:41] - 61 278.8 51.9 0.0100 0.6632 0.8262 1.1703 0.5441 0.6796 0.8010 1.2904 0.5161 65 | [2020/01/30 04:14:12] - 62 279.6 52.0 0.0100 0.6358 0.8352 1.1435 0.5520 0.6361 0.8089 1.2707 0.5179 66 | [2020/01/30 04:19:44] - 63 279.3 51.9 0.0100 0.6138 0.8431 1.1261 0.5602 0.6601 0.7999 1.2552 0.5262 67 | [2020/01/30 04:25:14] - 64 278.1 51.7 0.0100 0.6000 0.8482 1.1115 0.5632 0.6399 0.8111 1.2492 0.5263 68 | [2020/01/30 04:30:45] - 65 278.2 51.9 0.0100 0.5838 0.8521 1.0982 0.5682 0.6233 0.8170 1.2554 0.5211 69 | [2020/01/30 04:36:16] - 66 279.1 51.9 0.0100 0.5695 0.8588 1.0848 0.5720 0.6120 0.8209 1.2642 0.5241 70 | [2020/01/30 04:41:47] - 67 279.2 52.0 0.0100 0.5572 0.8613 1.0725 0.5778 0.6124 0.8204 1.2641 0.5261 71 | [2020/01/30 04:47:18] - 68 279.0 51.8 0.0100 0.5470 0.8646 1.0631 0.5791 0.6258 0.8111 1.2894 0.5166 72 | [2020/01/30 04:52:49] - 69 278.8 51.9 0.0100 0.5380 0.8681 1.0547 0.5804 0.6056 0.8218 1.2854 0.5165 73 | [2020/01/30 04:58:17] - 70 276.0 51.4 0.0100 0.5242 0.8719 1.0433 0.5853 0.6292 0.8034 1.2831 0.5179 74 | [2020/01/30 05:03:47] - 71 278.2 51.8 0.0100 0.5195 0.8747 1.0404 0.5879 0.6032 0.8180 1.2763 0.5230 75 | [2020/01/30 05:09:18] - 72 278.9 51.9 0.0100 0.5118 0.8791 1.0325 0.5890 0.5947 0.8193 1.3244 0.5074 76 | [2020/01/30 05:14:48] - 73 279.1 51.8 0.0100 0.5039 0.8815 1.0251 0.5905 0.5976 0.8168 1.3231 0.5084 77 | [2020/01/30 05:20:20] - 74 279.3 52.0 0.0100 0.4981 0.8840 1.0224 0.5911 0.6172 0.8151 1.3230 0.5110 78 | [2020/01/30 05:25:52] - 75 280.2 52.2 0.0100 0.4888 0.8865 1.0114 0.5941 0.6003 0.8284 1.3122 0.5073 79 | [2020/01/30 05:31:23] - 76 279.3 51.9 0.0100 0.4829 0.8896 1.0082 0.5945 0.5770 0.8274 1.3157 0.5110 80 | [2020/01/30 05:36:54] - 77 279.0 52.0 0.0100 0.4779 0.8909 1.0074 0.5959 0.5972 0.8220 1.3199 0.5103 81 | [2020/01/30 05:42:24] - 78 278.4 51.7 0.0100 0.4723 0.8919 0.9969 0.5986 0.5989 0.8241 1.3533 0.5014 82 | [2020/01/30 05:47:55] - 79 279.0 52.1 0.0100 0.4676 0.8958 0.9918 0.6013 0.5925 0.8199 1.3432 0.5065 83 | [2020/01/30 05:53:28] - 80 279.6 51.9 0.0100 0.4614 0.8974 0.9894 0.6008 0.5959 0.8263 1.3376 0.5053 84 | [2020/01/30 05:58:59] - 81 279.7 51.9 0.0100 0.4546 0.8979 0.9814 0.6051 0.5966 0.8204 1.3422 0.5074 85 | [2020/01/30 06:04:30] - 82 279.1 52.0 0.0100 0.4483 0.9030 0.9780 0.6063 0.5664 0.8284 1.3827 0.4948 86 | [2020/01/30 06:10:02] - 83 279.3 51.9 0.0100 0.4445 0.9030 0.9731 0.6057 0.5877 0.8225 1.3273 0.5087 87 | [2020/01/30 06:15:33] - 84 279.2 51.9 0.0100 0.4399 0.9047 0.9664 0.6073 0.6077 0.8208 1.3326 0.5072 88 | [2020/01/30 06:21:04] - 85 278.9 51.9 0.0100 0.4376 0.9048 0.9656 0.6074 0.6075 0.8170 1.3364 0.5182 89 | [2020/01/30 06:26:34] - 86 278.4 51.8 0.0100 0.4321 0.9066 0.9583 0.6113 0.5890 0.8257 1.3658 0.5029 90 | [2020/01/30 06:32:04] - 87 278.6 51.7 0.0100 0.4261 0.9104 0.9555 0.6121 0.5803 0.8252 1.3911 0.4948 91 | [2020/01/30 06:37:34] - 88 278.5 51.8 0.0100 0.4232 0.9120 0.9519 0.6126 0.6368 0.7986 1.3915 0.4988 92 | [2020/01/30 06:43:05] - 89 278.6 52.0 0.0100 0.4200 0.9107 0.9494 0.6134 0.6084 0.8238 1.3497 0.5051 93 | [2020/01/30 06:48:36] - 90 278.5 52.0 0.0100 0.4140 0.9140 0.9417 0.6166 0.5868 0.8266 1.3798 0.5005 94 | [2020/01/30 06:54:08] - 91 279.4 52.0 0.0100 0.4115 0.9129 0.9395 0.6163 0.5717 0.8288 1.3774 0.5045 95 | [2020/01/30 06:59:40] - 92 280.0 52.0 0.0100 0.4037 0.9171 0.9324 0.6179 0.5980 0.8197 1.3881 0.4999 96 | [2020/01/30 07:05:12] - 93 280.0 51.9 0.0100 0.4030 0.9173 0.9310 0.6191 0.5572 0.8308 1.4238 0.4938 97 | [2020/01/30 07:10:43] - 94 278.9 51.7 0.0100 0.4013 0.9185 0.9296 0.6197 0.5738 0.8217 1.4097 0.4970 98 | [2020/01/30 07:16:12] - 95 277.0 52.1 0.0100 0.3934 0.9205 0.9209 0.6236 0.5891 0.8155 1.4103 0.4934 99 | [2020/01/30 07:21:42] - 96 278.1 51.9 0.0100 0.3932 0.9198 0.9189 0.6219 0.5763 0.8266 1.3882 0.4961 100 | [2020/01/30 07:27:12] - 97 278.7 51.9 0.0100 0.3874 0.9228 0.9146 0.6217 0.6044 0.8204 1.3887 0.4978 101 | [2020/01/30 07:32:44] - 98 279.4 51.9 0.0100 0.3838 0.9229 0.9129 0.6233 0.5732 0.8246 1.4243 0.4883 102 | [2020/01/30 07:38:12] - 99 276.9 51.3 0.0100 0.3810 0.9245 0.9077 0.6262 0.5783 0.8202 1.4536 0.4869 103 | [2020/01/30 07:43:41] - 100 277.1 51.7 0.0100 0.3791 0.9251 0.9044 0.6254 0.5658 0.8237 1.4507 0.4915 104 | [2020/01/30 07:49:08] - 101 275.2 51.1 0.0100 0.3742 0.9257 0.9019 0.6268 0.5843 0.8312 1.4119 0.4966 105 | [2020/01/30 07:54:35] - 102 276.1 51.4 0.0100 0.3730 0.9271 0.8988 0.6296 0.5925 0.8181 1.4436 0.4890 106 | [2020/01/30 08:00:04] - 103 276.9 51.5 0.0100 0.3670 0.9284 0.8903 0.6328 0.5594 0.8321 1.4332 0.4921 107 | [2020/01/30 08:05:31] - 104 276.4 51.5 0.0100 0.3621 0.9296 0.8913 0.6306 0.5992 0.8169 1.4889 0.4864 108 | [2020/01/30 08:10:58] - 105 275.4 51.1 0.0100 0.3598 0.9320 0.8871 0.6314 0.6046 0.8115 1.4087 0.5011 109 | [2020/01/30 08:16:25] - 106 275.8 51.4 0.0100 0.3561 0.9329 0.8801 0.6349 0.5776 0.8257 1.3924 0.5005 110 | [2020/01/30 08:21:53] - 107 276.3 51.5 0.0100 0.3519 0.9326 0.8754 0.6375 0.5774 0.8203 1.4518 0.4890 111 | [2020/01/30 08:27:21] - 108 276.6 51.4 0.0100 0.3489 0.9336 0.8697 0.6378 0.5649 0.8229 1.4610 0.4871 112 | [2020/01/30 08:32:48] - 109 275.8 51.6 0.0100 0.3454 0.9357 0.8684 0.6387 0.5536 0.8328 1.5049 0.4809 113 | -------------------------------------------------------------------------------- /experiments/cifar10_dropepoch/preactresnet_70/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/29 22:45:23] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=110, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_dropepoch/preactresnet_70', half=False, l1=0, l2=0, lr_drop_epoch=70, lr_max=0.1, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/29 22:45:27] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/29 22:51:02] - 0 282.0 52.6 0.1000 1.9746 0.2755 2.1604 0.1969 1.7627 0.3600 1.9910 0.2521 4 | [2020/01/29 22:56:40] - 1 284.8 53.1 0.1000 1.7088 0.3833 1.9780 0.2575 1.5976 0.4395 1.8713 0.3093 5 | [2020/01/29 23:02:17] - 2 283.5 53.1 0.1000 1.5850 0.4420 1.9006 0.2862 1.5608 0.4513 1.8584 0.3086 6 | [2020/01/29 23:07:55] - 3 284.8 53.6 0.1000 1.4951 0.4761 1.8430 0.3020 1.4267 0.5174 1.7576 0.3418 7 | [2020/01/29 23:13:34] - 4 285.1 53.2 0.1000 1.4110 0.5199 1.7934 0.3233 1.4007 0.5259 1.7398 0.3559 8 | [2020/01/29 23:19:11] - 5 282.5 53.4 0.1000 1.3406 0.5526 1.7499 0.3412 1.2939 0.5546 1.6720 0.3715 9 | [2020/01/29 23:24:50] - 6 285.4 53.5 0.1000 1.2845 0.5817 1.7125 0.3533 1.1954 0.6034 1.6202 0.3997 10 | [2020/01/29 23:30:29] - 7 285.5 53.4 0.1000 1.2302 0.6050 1.6779 0.3636 1.1736 0.6192 1.6108 0.3900 11 | [2020/01/29 23:36:08] - 8 285.6 53.4 0.1000 1.1893 0.6246 1.6510 0.3729 1.1334 0.6236 1.5805 0.3966 12 | [2020/01/29 23:41:47] - 9 285.7 53.5 0.1000 1.1569 0.6385 1.6303 0.3826 1.0977 0.6513 1.5783 0.4075 13 | [2020/01/29 23:47:28] - 10 285.5 53.5 0.1000 1.1312 0.6497 1.6148 0.3920 1.1087 0.6239 1.5724 0.4116 14 | [2020/01/29 23:53:07] - 11 285.5 53.1 0.1000 1.1042 0.6603 1.5952 0.3970 1.1303 0.6476 1.5592 0.4196 15 | [2020/01/29 23:58:45] - 12 284.6 53.5 0.1000 1.0844 0.6699 1.5832 0.4002 1.0492 0.6747 1.5208 0.4277 16 | [2020/01/30 00:04:25] - 13 285.6 53.8 0.1000 1.0671 0.6796 1.5717 0.4066 1.0321 0.6649 1.5606 0.4148 17 | [2020/01/30 00:10:04] - 14 285.0 53.8 0.1000 1.0448 0.6853 1.5551 0.4117 1.0294 0.6830 1.5365 0.4167 18 | [2020/01/30 00:15:42] - 15 284.7 53.4 0.1000 1.0307 0.6944 1.5451 0.4120 1.0080 0.6931 1.5060 0.4337 19 | [2020/01/30 00:21:22] - 16 285.7 53.5 0.1000 1.0176 0.7004 1.5348 0.4192 1.0095 0.6551 1.5072 0.4359 20 | [2020/01/30 00:27:02] - 17 285.6 53.7 0.1000 1.0044 0.7040 1.5269 0.4211 1.0200 0.6884 1.4994 0.4420 21 | [2020/01/30 00:32:41] - 18 285.9 53.3 0.1000 0.9959 0.7064 1.5210 0.4217 0.9535 0.7042 1.5141 0.4250 22 | [2020/01/30 00:38:20] - 19 285.6 53.3 0.1000 0.9819 0.7153 1.5092 0.4276 0.9908 0.6888 1.5008 0.4367 23 | [2020/01/30 00:44:00] - 20 285.6 53.4 0.1000 0.9721 0.7196 1.5027 0.4312 0.9774 0.6968 1.4924 0.4393 24 | [2020/01/30 00:49:41] - 21 287.6 53.7 0.1000 0.9674 0.7169 1.4971 0.4334 1.0048 0.6828 1.4902 0.4407 25 | [2020/01/30 00:55:22] - 22 287.3 54.0 0.1000 0.9603 0.7224 1.4931 0.4335 0.9640 0.7119 1.4601 0.4539 26 | [2020/01/30 01:01:02] - 23 285.7 53.8 0.1000 0.9515 0.7266 1.4904 0.4351 0.9491 0.7163 1.4595 0.4451 27 | [2020/01/30 01:06:44] - 24 287.7 53.5 0.1000 0.9438 0.7306 1.4813 0.4373 0.9957 0.6777 1.4608 0.4535 28 | [2020/01/30 01:12:24] - 25 286.8 53.7 0.1000 0.9324 0.7358 1.4743 0.4392 0.9401 0.7068 1.4543 0.4575 29 | [2020/01/30 01:18:04] - 26 285.9 53.8 0.1000 0.9320 0.7324 1.4721 0.4387 0.9663 0.7133 1.4851 0.4485 30 | [2020/01/30 01:23:45] - 27 286.7 53.7 0.1000 0.9267 0.7374 1.4698 0.4408 0.9688 0.7122 1.4706 0.4528 31 | [2020/01/30 01:29:26] - 28 287.5 53.8 0.1000 0.9228 0.7360 1.4660 0.4446 0.9591 0.7088 1.4805 0.4456 32 | [2020/01/30 01:35:06] - 29 287.1 53.5 0.1000 0.9191 0.7405 1.4630 0.4460 0.9157 0.7330 1.4370 0.4640 33 | [2020/01/30 01:40:49] - 30 287.6 53.7 0.1000 0.9128 0.7426 1.4585 0.4425 0.9526 0.7167 1.4667 0.4510 34 | [2020/01/30 01:46:29] - 31 286.6 53.4 0.1000 0.9076 0.7460 1.4553 0.4464 0.9378 0.7265 1.4578 0.4531 35 | [2020/01/30 01:52:10] - 32 286.7 53.7 0.1000 0.9006 0.7463 1.4492 0.4488 0.9237 0.7187 1.4769 0.4470 36 | [2020/01/30 01:57:51] - 33 287.8 54.0 0.1000 0.9064 0.7423 1.4529 0.4500 0.9030 0.7288 1.4645 0.4546 37 | [2020/01/30 02:03:31] - 34 285.9 53.5 0.1000 0.8978 0.7478 1.4483 0.4493 0.8805 0.7307 1.4207 0.4685 38 | [2020/01/30 02:09:13] - 35 287.6 53.9 0.1000 0.8925 0.7525 1.4442 0.4490 0.9299 0.7257 1.4643 0.4599 39 | [2020/01/30 02:14:54] - 36 287.0 53.9 0.1000 0.8964 0.7489 1.4453 0.4513 0.9391 0.7263 1.4473 0.4583 40 | [2020/01/30 02:20:36] - 37 288.0 54.0 0.1000 0.8890 0.7508 1.4430 0.4530 0.9131 0.7355 1.4426 0.4586 41 | [2020/01/30 02:26:17] - 38 287.9 53.9 0.1000 0.8872 0.7540 1.4401 0.4497 0.9318 0.7166 1.4585 0.4504 42 | [2020/01/30 02:31:59] - 39 287.4 53.9 0.1000 0.8843 0.7560 1.4391 0.4539 0.8997 0.7223 1.4613 0.4456 43 | [2020/01/30 02:37:41] - 40 287.4 53.7 0.1000 0.8837 0.7551 1.4368 0.4529 0.9352 0.7217 1.4435 0.4646 44 | [2020/01/30 02:43:21] - 41 286.9 53.8 0.1000 0.8783 0.7574 1.4347 0.4522 0.8951 0.7348 1.4279 0.4674 45 | [2020/01/30 02:49:02] - 42 286.4 53.9 0.1000 0.8815 0.7555 1.4384 0.4538 0.9175 0.7141 1.4409 0.4671 46 | [2020/01/30 02:54:42] - 43 286.7 53.9 0.1000 0.8760 0.7578 1.4331 0.4540 0.8747 0.7362 1.4425 0.4477 47 | [2020/01/30 03:00:23] - 44 287.2 53.6 0.1000 0.8729 0.7581 1.4308 0.4556 0.9157 0.7369 1.4286 0.4747 48 | [2020/01/30 03:06:04] - 45 287.3 53.6 0.1000 0.8710 0.7592 1.4271 0.4571 0.8509 0.7271 1.4417 0.4525 49 | [2020/01/30 03:11:45] - 46 286.8 53.7 0.1000 0.8725 0.7570 1.4293 0.4556 0.8647 0.7261 1.4267 0.4658 50 | [2020/01/30 03:17:26] - 47 287.1 53.5 0.1000 0.8670 0.7636 1.4248 0.4576 0.8781 0.7231 1.4244 0.4648 51 | [2020/01/30 03:23:05] - 48 286.7 53.1 0.1000 0.8699 0.7625 1.4317 0.4577 0.9063 0.7172 1.4789 0.4458 52 | [2020/01/30 03:28:46] - 49 287.1 53.9 0.1000 0.8654 0.7625 1.4233 0.4582 0.9062 0.7246 1.4390 0.4592 53 | [2020/01/30 03:34:28] - 50 286.6 53.8 0.1000 0.8619 0.7640 1.4206 0.4604 0.8748 0.7115 1.4290 0.4605 54 | [2020/01/30 03:40:06] - 51 285.6 53.2 0.1000 0.8572 0.7652 1.4188 0.4586 0.8733 0.7254 1.4231 0.4677 55 | [2020/01/30 03:45:47] - 52 286.8 54.1 0.1000 0.8561 0.7680 1.4185 0.4609 0.8557 0.7421 1.4169 0.4580 56 | [2020/01/30 03:51:29] - 53 287.7 53.9 0.1000 0.8632 0.7651 1.4240 0.4608 0.9063 0.7220 1.4385 0.4641 57 | [2020/01/30 03:57:10] - 54 287.5 53.9 0.1000 0.8540 0.7660 1.4143 0.4605 0.8379 0.7489 1.4451 0.4545 58 | [2020/01/30 04:02:51] - 55 287.2 53.7 0.1000 0.8557 0.7671 1.4177 0.4596 0.8926 0.7389 1.4591 0.4615 59 | [2020/01/30 04:08:32] - 56 287.0 53.5 0.1000 0.8561 0.7642 1.4167 0.4613 0.8454 0.7397 1.4219 0.4613 60 | [2020/01/30 04:14:12] - 57 286.9 53.9 0.1000 0.8474 0.7693 1.4085 0.4621 0.8697 0.7254 1.4310 0.4679 61 | [2020/01/30 04:19:53] - 58 286.7 53.8 0.1000 0.8575 0.7643 1.4192 0.4603 0.8699 0.7492 1.4322 0.4700 62 | [2020/01/30 04:25:35] - 59 287.6 54.0 0.1000 0.8471 0.7720 1.4121 0.4611 0.8858 0.7328 1.4103 0.4682 63 | [2020/01/30 04:31:17] - 60 287.7 54.0 0.1000 0.8521 0.7666 1.4155 0.4608 0.8641 0.7533 1.4223 0.4649 64 | [2020/01/30 04:36:57] - 61 286.4 53.6 0.1000 0.8495 0.7717 1.4129 0.4627 0.8516 0.7436 1.4152 0.4711 65 | [2020/01/30 04:42:39] - 62 287.7 53.9 0.1000 0.8429 0.7716 1.4089 0.4637 0.8505 0.7345 1.4132 0.4706 66 | [2020/01/30 04:48:20] - 63 287.6 53.8 0.1000 0.8448 0.7710 1.4081 0.4636 0.8764 0.7388 1.4177 0.4719 67 | [2020/01/30 04:54:02] - 64 287.4 54.0 0.1000 0.8480 0.7714 1.4135 0.4624 0.8575 0.7442 1.4227 0.4689 68 | [2020/01/30 04:59:43] - 65 287.5 54.0 0.1000 0.8440 0.7716 1.4085 0.4605 0.8396 0.7410 1.4403 0.4609 69 | [2020/01/30 05:05:23] - 66 286.4 53.9 0.1000 0.8414 0.7732 1.4059 0.4628 0.8634 0.7363 1.4223 0.4613 70 | [2020/01/30 05:11:05] - 67 287.2 53.9 0.1000 0.8391 0.7746 1.4054 0.4633 0.8261 0.7637 1.4324 0.4594 71 | [2020/01/30 05:16:46] - 68 287.4 53.7 0.1000 0.8356 0.7757 1.4017 0.4654 0.8769 0.7383 1.4377 0.4636 72 | [2020/01/30 05:22:27] - 69 287.7 53.8 0.0100 0.8418 0.7703 1.4055 0.4641 0.8533 0.7459 1.4468 0.4525 73 | [2020/01/30 05:28:10] - 70 287.6 53.7 0.0100 0.7169 0.8121 1.2147 0.5327 0.7007 0.7907 1.2729 0.5189 74 | [2020/01/30 05:33:52] - 71 287.9 53.8 0.0100 0.6485 0.8311 1.1569 0.5513 0.6571 0.8017 1.2523 0.5209 75 | [2020/01/30 05:39:33] - 72 286.8 53.6 0.0100 0.6218 0.8398 1.1329 0.5588 0.6321 0.8157 1.2688 0.5188 76 | [2020/01/30 05:45:13] - 73 286.9 54.0 0.0100 0.6023 0.8460 1.1126 0.5637 0.6283 0.8111 1.2616 0.5211 77 | [2020/01/30 05:50:55] - 74 287.1 53.7 0.0100 0.5859 0.8499 1.0986 0.5694 0.6375 0.8090 1.2782 0.5159 78 | [2020/01/30 05:56:36] - 75 287.6 54.0 0.0100 0.5695 0.8575 1.0815 0.5751 0.6273 0.8193 1.2648 0.5201 79 | [2020/01/30 06:02:17] - 76 287.3 53.4 0.0100 0.5572 0.8633 1.0729 0.5772 0.5956 0.8316 1.2464 0.5272 80 | [2020/01/30 06:07:57] - 77 286.5 53.5 0.0100 0.5457 0.8665 1.0632 0.5794 0.6093 0.8165 1.2702 0.5214 81 | [2020/01/30 06:13:39] - 78 287.8 53.8 0.0100 0.5357 0.8690 1.0500 0.5834 0.6031 0.8246 1.2931 0.5129 82 | [2020/01/30 06:19:20] - 79 286.9 53.8 0.0100 0.5249 0.8734 1.0410 0.5859 0.6028 0.8258 1.2768 0.5241 83 | [2020/01/30 06:25:01] - 80 286.4 53.9 0.0100 0.5161 0.8773 1.0339 0.5889 0.6049 0.8247 1.2770 0.5215 84 | [2020/01/30 06:30:40] - 81 285.5 53.3 0.0100 0.5053 0.8796 1.0233 0.5924 0.6027 0.8207 1.2839 0.5196 85 | [2020/01/30 06:36:21] - 82 287.5 53.8 0.0100 0.4979 0.8838 1.0197 0.5922 0.5728 0.8306 1.3071 0.5134 86 | [2020/01/30 06:42:03] - 83 287.8 53.8 0.0100 0.4910 0.8858 1.0110 0.5950 0.5890 0.8239 1.2825 0.5204 87 | [2020/01/30 06:47:43] - 84 287.4 53.4 0.0100 0.4833 0.8890 1.0041 0.5979 0.6117 0.8251 1.2601 0.5257 88 | [2020/01/30 06:53:25] - 85 287.7 53.8 0.0100 0.4776 0.8908 0.9997 0.5984 0.6216 0.8178 1.3107 0.5165 89 | [2020/01/30 06:59:06] - 86 287.6 53.9 0.0100 0.4718 0.8920 0.9929 0.6007 0.5984 0.8220 1.3150 0.5140 90 | [2020/01/30 07:04:47] - 87 286.4 53.8 0.0100 0.4648 0.8961 0.9878 0.6019 0.5882 0.8259 1.3270 0.5063 91 | [2020/01/30 07:10:28] - 88 287.4 53.7 0.0100 0.4601 0.8974 0.9831 0.6036 0.6256 0.8070 1.3489 0.5028 92 | [2020/01/30 07:16:09] - 89 287.7 53.9 0.0100 0.4546 0.8983 0.9800 0.6063 0.6056 0.8207 1.3014 0.5123 93 | [2020/01/30 07:21:50] - 90 286.8 53.4 0.0100 0.4490 0.9027 0.9748 0.6052 0.5945 0.8282 1.3212 0.5131 94 | [2020/01/30 07:27:32] - 91 287.4 53.7 0.0100 0.4448 0.9018 0.9699 0.6069 0.5658 0.8340 1.3321 0.5086 95 | [2020/01/30 07:33:12] - 92 287.3 53.4 0.0100 0.4378 0.9051 0.9662 0.6081 0.5879 0.8243 1.3330 0.5098 96 | [2020/01/30 07:38:53] - 93 286.7 53.8 0.0100 0.4356 0.9055 0.9607 0.6105 0.5532 0.8329 1.3559 0.5046 97 | [2020/01/30 07:44:34] - 94 287.5 53.7 0.0100 0.4319 0.9083 0.9575 0.6107 0.5729 0.8281 1.3538 0.5040 98 | [2020/01/30 07:50:13] - 95 285.7 53.6 0.0100 0.4251 0.9104 0.9522 0.6134 0.5860 0.8241 1.3514 0.5036 99 | [2020/01/30 07:55:50] - 96 284.0 53.1 0.0100 0.4232 0.9093 0.9469 0.6159 0.5836 0.8323 1.3415 0.5070 100 | [2020/01/30 08:01:31] - 97 287.2 53.5 0.0100 0.4157 0.9132 0.9415 0.6149 0.6042 0.8258 1.3429 0.5075 101 | [2020/01/30 08:07:10] - 98 286.0 53.4 0.0100 0.4122 0.9131 0.9409 0.6155 0.5853 0.8226 1.3753 0.5001 102 | [2020/01/30 08:12:52] - 99 287.2 53.8 0.0100 0.4102 0.9148 0.9369 0.6167 0.5687 0.8286 1.3976 0.4988 103 | [2020/01/30 08:18:32] - 100 286.4 53.5 0.0100 0.4075 0.9162 0.9338 0.6153 0.5712 0.8226 1.3979 0.4965 104 | [2020/01/30 08:24:13] - 101 287.2 53.3 0.0100 0.4005 0.9168 0.9283 0.6179 0.5902 0.8287 1.3795 0.5025 105 | [2020/01/30 08:29:51] - 102 285.4 53.1 0.0100 0.3994 0.9180 0.9256 0.6209 0.5934 0.8242 1.3759 0.4985 106 | [2020/01/30 08:35:32] - 103 287.3 53.6 0.0100 0.3933 0.9200 0.9160 0.6244 0.5599 0.8301 1.4046 0.4990 107 | [2020/01/30 08:41:13] - 104 287.2 53.4 0.0100 0.3891 0.9221 0.9189 0.6229 0.5918 0.8193 1.4232 0.4952 108 | [2020/01/30 08:46:50] - 105 284.0 52.9 0.0100 0.3845 0.9226 0.9124 0.6248 0.6047 0.8150 1.3761 0.5028 109 | [2020/01/30 08:52:29] - 106 285.6 53.4 0.0100 0.3826 0.9249 0.9090 0.6266 0.5775 0.8288 1.3590 0.5066 110 | [2020/01/30 08:58:09] - 107 286.7 53.8 0.0100 0.3764 0.9267 0.9011 0.6271 0.5809 0.8290 1.3748 0.5009 111 | [2020/01/30 09:03:50] - 108 287.1 53.9 0.0100 0.3743 0.9266 0.8973 0.6297 0.5697 0.8266 1.4010 0.4949 112 | [2020/01/30 09:09:31] - 109 287.3 53.8 0.0100 0.3700 0.9268 0.8917 0.6320 0.5682 0.8315 1.4555 0.4884 113 | -------------------------------------------------------------------------------- /experiments/cifar10_dropepoch/preactresnet_80/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/29 22:02:52] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=110, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_dropepoch/preactresnet_80', half=False, l1=0, l2=0, lr_drop_epoch=80, lr_max=0.1, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/29 22:02:57] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/29 22:08:11] - 0 265.2 49.2 0.1000 1.9522 0.2863 2.1478 0.2024 1.7304 0.3793 1.9595 0.2613 4 | [2020/01/29 22:13:26] - 1 265.7 49.3 0.1000 1.6952 0.3896 1.9683 0.2603 1.5870 0.4434 1.8610 0.3119 5 | [2020/01/29 22:18:42] - 2 266.1 49.4 0.1000 1.5753 0.4466 1.8941 0.2903 1.5445 0.4595 1.8444 0.3140 6 | [2020/01/29 22:23:59] - 3 266.2 49.4 0.1000 1.4857 0.4825 1.8371 0.3049 1.4129 0.5194 1.7550 0.3410 7 | [2020/01/29 22:29:15] - 4 266.4 49.4 0.1000 1.4031 0.5244 1.7890 0.3250 1.3892 0.5306 1.7315 0.3586 8 | [2020/01/29 22:34:31] - 5 266.5 49.4 0.1000 1.3329 0.5553 1.7443 0.3417 1.2857 0.5585 1.6622 0.3764 9 | [2020/01/29 22:39:48] - 6 266.5 49.4 0.1000 1.2769 0.5850 1.7073 0.3565 1.1881 0.6095 1.6124 0.4023 10 | [2020/01/29 22:45:04] - 7 266.6 49.4 0.1000 1.2237 0.6073 1.6730 0.3671 1.1649 0.6227 1.6090 0.3907 11 | [2020/01/29 22:50:20] - 8 266.7 49.4 0.1000 1.1827 0.6275 1.6464 0.3751 1.1294 0.6227 1.5842 0.3984 12 | [2020/01/29 22:55:36] - 9 266.7 49.4 0.1000 1.1530 0.6408 1.6284 0.3849 1.0857 0.6560 1.5773 0.4092 13 | [2020/01/29 23:00:53] - 10 266.5 49.4 0.1000 1.1237 0.6530 1.6101 0.3922 1.1132 0.6224 1.5780 0.4056 14 | [2020/01/29 23:06:10] - 11 266.7 49.3 0.1000 1.0995 0.6639 1.5918 0.3985 1.1228 0.6505 1.5566 0.4183 15 | [2020/01/29 23:11:26] - 12 266.8 49.4 0.1000 1.0799 0.6725 1.5817 0.4007 1.0549 0.6737 1.5264 0.4249 16 | [2020/01/29 23:16:43] - 13 266.8 49.4 0.1000 1.0627 0.6810 1.5684 0.4074 1.0371 0.6606 1.5599 0.4144 17 | [2020/01/29 23:21:59] - 14 266.8 49.4 0.1000 1.0434 0.6868 1.5545 0.4104 1.0263 0.6844 1.5301 0.4170 18 | [2020/01/29 23:27:15] - 15 266.9 49.4 0.1000 1.0263 0.6969 1.5424 0.4140 1.0019 0.6922 1.4990 0.4414 19 | [2020/01/29 23:32:32] - 16 266.9 49.4 0.1000 1.0143 0.7010 1.5328 0.4195 1.0111 0.6541 1.5044 0.4361 20 | [2020/01/29 23:37:48] - 17 266.9 49.4 0.1000 1.0026 0.7044 1.5254 0.4211 1.0169 0.6950 1.4967 0.4421 21 | [2020/01/29 23:43:05] - 18 267.0 49.4 0.1000 0.9937 0.7076 1.5191 0.4203 0.9508 0.7041 1.5173 0.4236 22 | [2020/01/29 23:48:22] - 19 267.0 49.4 0.1000 0.9785 0.7156 1.5068 0.4269 0.9917 0.6933 1.5069 0.4354 23 | [2020/01/29 23:53:39] - 20 267.0 49.4 0.1000 0.9703 0.7202 1.5022 0.4301 0.9814 0.6918 1.4959 0.4356 24 | [2020/01/29 23:58:55] - 21 267.1 49.4 0.1000 0.9648 0.7178 1.4962 0.4335 1.0018 0.6866 1.4868 0.4443 25 | [2020/01/30 00:04:12] - 22 267.0 49.4 0.1000 0.9568 0.7237 1.4913 0.4337 0.9663 0.7115 1.4664 0.4481 26 | [2020/01/30 00:09:29] - 23 267.0 49.4 0.1000 0.9488 0.7288 1.4885 0.4354 0.9581 0.7097 1.4656 0.4398 27 | [2020/01/30 00:14:46] - 24 267.1 49.4 0.1000 0.9411 0.7313 1.4803 0.4379 0.9902 0.6801 1.4604 0.4535 28 | [2020/01/30 00:20:02] - 25 267.1 49.4 0.1000 0.9307 0.7363 1.4734 0.4399 0.9408 0.7086 1.4511 0.4567 29 | [2020/01/30 00:25:19] - 26 267.1 49.5 0.1000 0.9281 0.7345 1.4700 0.4389 0.9452 0.7252 1.4732 0.4543 30 | [2020/01/30 00:30:36] - 27 267.2 49.5 0.1000 0.9235 0.7375 1.4681 0.4413 0.9615 0.7105 1.4697 0.4459 31 | [2020/01/30 00:35:53] - 28 267.3 49.4 0.1000 0.9204 0.7373 1.4648 0.4433 0.9652 0.7001 1.4822 0.4426 32 | [2020/01/30 00:41:09] - 29 267.2 49.4 0.1000 0.9181 0.7416 1.4621 0.4443 0.9208 0.7274 1.4379 0.4655 33 | [2020/01/30 00:46:27] - 30 267.3 49.5 0.1000 0.9111 0.7427 1.4582 0.4440 0.9434 0.7171 1.4630 0.4493 34 | [2020/01/30 00:51:44] - 31 267.5 49.5 0.1000 0.9059 0.7478 1.4548 0.4458 0.9353 0.7282 1.4562 0.4546 35 | [2020/01/30 00:57:01] - 32 267.4 49.5 0.1000 0.9007 0.7463 1.4496 0.4510 0.9137 0.7254 1.4716 0.4510 36 | [2020/01/30 01:02:18] - 33 267.4 49.4 0.1000 0.9053 0.7442 1.4524 0.4477 0.9062 0.7309 1.4514 0.4580 37 | [2020/01/30 01:07:35] - 34 267.4 49.5 0.1000 0.8955 0.7494 1.4464 0.4501 0.8583 0.7407 1.4093 0.4679 38 | [2020/01/30 01:12:52] - 35 267.4 49.5 0.1000 0.8921 0.7515 1.4442 0.4507 0.9369 0.7240 1.4616 0.4605 39 | [2020/01/30 01:18:09] - 36 267.4 49.5 0.1000 0.8929 0.7504 1.4433 0.4511 0.9409 0.7194 1.4680 0.4485 40 | [2020/01/30 01:23:26] - 37 267.1 49.4 0.1000 0.8873 0.7524 1.4420 0.4539 0.9183 0.7292 1.4476 0.4573 41 | [2020/01/30 01:28:42] - 38 267.1 49.5 0.1000 0.8853 0.7544 1.4395 0.4511 0.9242 0.7310 1.4552 0.4505 42 | [2020/01/30 01:33:59] - 39 267.1 49.4 0.1000 0.8827 0.7554 1.4384 0.4528 0.9016 0.7169 1.4500 0.4526 43 | [2020/01/30 01:39:16] - 40 267.0 49.4 0.1000 0.8818 0.7554 1.4361 0.4542 0.9319 0.7191 1.4342 0.4714 44 | [2020/01/30 01:44:33] - 41 267.0 49.4 0.1000 0.8792 0.7564 1.4349 0.4531 0.8812 0.7404 1.4187 0.4663 45 | [2020/01/30 01:49:49] - 42 267.1 49.4 0.1000 0.8784 0.7560 1.4361 0.4525 0.9020 0.7216 1.4347 0.4676 46 | [2020/01/30 01:55:06] - 43 267.3 49.5 0.1000 0.8753 0.7583 1.4326 0.4535 0.8806 0.7334 1.4502 0.4467 47 | [2020/01/30 02:00:23] - 44 267.2 49.4 0.1000 0.8714 0.7597 1.4298 0.4538 0.9257 0.7312 1.4319 0.4727 48 | [2020/01/30 02:05:40] - 45 267.4 49.5 0.1000 0.8712 0.7588 1.4282 0.4571 0.8544 0.7326 1.4447 0.4579 49 | [2020/01/30 02:10:57] - 46 267.4 49.5 0.1000 0.8723 0.7584 1.4291 0.4554 0.8623 0.7306 1.4194 0.4727 50 | [2020/01/30 02:16:14] - 47 267.5 49.5 0.1000 0.8655 0.7626 1.4230 0.4585 0.8683 0.7267 1.4257 0.4626 51 | [2020/01/30 02:21:31] - 48 267.5 49.5 0.1000 0.8681 0.7631 1.4284 0.4569 0.9122 0.7148 1.4620 0.4575 52 | [2020/01/30 02:26:48] - 49 267.6 49.5 0.1000 0.8635 0.7636 1.4221 0.4585 0.9171 0.7183 1.4552 0.4540 53 | [2020/01/30 02:32:06] - 50 267.7 49.5 0.1000 0.8583 0.7664 1.4181 0.4602 0.8666 0.7173 1.4275 0.4619 54 | [2020/01/30 02:37:23] - 51 267.8 49.5 0.1000 0.8561 0.7666 1.4195 0.4571 0.8816 0.7175 1.4263 0.4648 55 | [2020/01/30 02:42:41] - 52 267.7 49.5 0.1000 0.8562 0.7668 1.4184 0.4618 0.8535 0.7416 1.4147 0.4616 56 | [2020/01/30 02:47:58] - 53 267.6 49.5 0.1000 0.8625 0.7634 1.4230 0.4588 0.8930 0.7260 1.4410 0.4598 57 | [2020/01/30 02:53:15] - 54 267.6 49.5 0.1000 0.8569 0.7652 1.4173 0.4592 0.8268 0.7534 1.4367 0.4580 58 | [2020/01/30 02:58:32] - 55 267.7 49.5 0.1000 0.8527 0.7678 1.4153 0.4595 0.8857 0.7429 1.4500 0.4645 59 | [2020/01/30 03:03:49] - 56 267.7 49.5 0.1000 0.8561 0.7646 1.4160 0.4595 0.8362 0.7443 1.4305 0.4585 60 | [2020/01/30 03:09:07] - 57 267.8 49.5 0.1000 0.8444 0.7722 1.4067 0.4623 0.8621 0.7265 1.4220 0.4685 61 | [2020/01/30 03:14:24] - 58 267.7 49.6 0.1000 0.8565 0.7653 1.4185 0.4605 0.8678 0.7453 1.4364 0.4654 62 | [2020/01/30 03:19:41] - 59 267.7 49.5 0.1000 0.8474 0.7716 1.4114 0.4603 0.8864 0.7334 1.4141 0.4673 63 | [2020/01/30 03:24:59] - 60 267.6 49.5 0.1000 0.8513 0.7676 1.4144 0.4619 0.8621 0.7508 1.4015 0.4746 64 | [2020/01/30 03:30:17] - 61 267.6 49.5 0.1000 0.8463 0.7731 1.4113 0.4615 0.8530 0.7431 1.4258 0.4706 65 | [2020/01/30 03:35:34] - 62 267.7 49.5 0.1000 0.8411 0.7724 1.4074 0.4640 0.8510 0.7361 1.4148 0.4654 66 | [2020/01/30 03:40:51] - 63 267.6 49.4 0.1000 0.8442 0.7708 1.4084 0.4619 0.8759 0.7399 1.4223 0.4702 67 | [2020/01/30 03:46:08] - 64 267.6 49.5 0.1000 0.8475 0.7710 1.4129 0.4615 0.8619 0.7465 1.4146 0.4713 68 | [2020/01/30 03:51:25] - 65 267.6 49.5 0.1000 0.8445 0.7707 1.4094 0.4620 0.8462 0.7354 1.4430 0.4637 69 | [2020/01/30 03:56:42] - 66 267.8 49.5 0.1000 0.8419 0.7731 1.4060 0.4648 0.8722 0.7315 1.4289 0.4612 70 | [2020/01/30 04:02:00] - 67 267.8 49.5 0.1000 0.8386 0.7753 1.4048 0.4649 0.8203 0.7598 1.4243 0.4675 71 | [2020/01/30 04:07:17] - 68 267.7 49.5 0.1000 0.8347 0.7740 1.4004 0.4652 0.9013 0.7312 1.4491 0.4602 72 | [2020/01/30 04:12:34] - 69 267.8 49.5 0.1000 0.8395 0.7710 1.4049 0.4639 0.8672 0.7415 1.4431 0.4518 73 | [2020/01/30 04:17:52] - 70 267.5 49.5 0.1000 0.8337 0.7755 1.4020 0.4656 0.8749 0.7230 1.4336 0.4646 74 | [2020/01/30 04:23:09] - 71 267.5 49.5 0.1000 0.8344 0.7761 1.4035 0.4661 0.8555 0.7486 1.4022 0.4789 75 | [2020/01/30 04:28:26] - 72 267.5 49.5 0.1000 0.8354 0.7767 1.4039 0.4661 0.8635 0.7370 1.4321 0.4559 76 | [2020/01/30 04:33:44] - 73 267.7 49.5 0.1000 0.8364 0.7748 1.4035 0.4679 0.8763 0.7327 1.4466 0.4607 77 | [2020/01/30 04:39:01] - 74 267.6 49.5 0.1000 0.8404 0.7731 1.4054 0.4632 0.8507 0.7281 1.4238 0.4693 78 | [2020/01/30 04:44:18] - 75 267.7 49.5 0.1000 0.8314 0.7782 1.3987 0.4676 0.8414 0.7512 1.4093 0.4687 79 | [2020/01/30 04:49:35] - 76 267.6 49.5 0.1000 0.8337 0.7772 1.4023 0.4651 0.8465 0.7497 1.4150 0.4724 80 | [2020/01/30 04:54:52] - 77 267.6 49.5 0.1000 0.8369 0.7755 1.4036 0.4634 0.8569 0.7231 1.4240 0.4686 81 | [2020/01/30 05:00:09] - 78 267.6 49.5 0.1000 0.8301 0.7763 1.3979 0.4662 0.8617 0.7619 1.4382 0.4622 82 | [2020/01/30 05:05:26] - 79 267.6 49.5 0.0100 0.8300 0.7772 1.3970 0.4675 0.8493 0.7507 1.4478 0.4656 83 | [2020/01/30 05:10:44] - 80 267.6 49.5 0.0100 0.7132 0.8151 1.2109 0.5310 0.6852 0.8046 1.2700 0.5212 84 | [2020/01/30 05:16:02] - 81 267.6 49.5 0.0100 0.6402 0.8343 1.1475 0.5517 0.6521 0.8117 1.2500 0.5247 85 | [2020/01/30 05:21:19] - 82 267.6 49.5 0.0100 0.6117 0.8442 1.1231 0.5594 0.6208 0.8180 1.2585 0.5210 86 | [2020/01/30 05:26:37] - 83 267.6 49.5 0.0100 0.5912 0.8512 1.1009 0.5683 0.6257 0.8197 1.2362 0.5309 87 | [2020/01/30 05:31:54] - 84 267.7 49.5 0.0100 0.5733 0.8584 1.0850 0.5729 0.6427 0.8186 1.2334 0.5308 88 | [2020/01/30 05:37:12] - 85 267.9 49.6 0.0100 0.5604 0.8612 1.0734 0.5777 0.6501 0.8080 1.2635 0.5252 89 | [2020/01/30 05:42:29] - 86 267.7 49.5 0.0100 0.5485 0.8642 1.0618 0.5808 0.6181 0.8202 1.2666 0.5243 90 | [2020/01/30 05:47:46] - 87 267.7 49.5 0.0100 0.5341 0.8706 1.0494 0.5853 0.5998 0.8292 1.2749 0.5172 91 | [2020/01/30 05:53:03] - 88 267.7 49.5 0.0100 0.5250 0.8736 1.0385 0.5868 0.6477 0.8026 1.2902 0.5154 92 | [2020/01/30 05:58:20] - 89 267.7 49.5 0.0100 0.5151 0.8754 1.0311 0.5910 0.6297 0.8173 1.2628 0.5254 93 | [2020/01/30 06:03:39] - 90 267.7 49.5 0.0100 0.5060 0.8810 1.0215 0.5933 0.6133 0.8267 1.2801 0.5181 94 | [2020/01/30 06:08:56] - 91 267.7 49.5 0.0100 0.4980 0.8823 1.0146 0.5953 0.5846 0.8328 1.2855 0.5208 95 | [2020/01/30 06:14:13] - 92 267.6 49.5 0.0100 0.4874 0.8864 1.0067 0.5962 0.5948 0.8241 1.2759 0.5199 96 | [2020/01/30 06:19:30] - 93 267.8 49.5 0.0100 0.4814 0.8895 0.9981 0.6000 0.5619 0.8355 1.3140 0.5121 97 | [2020/01/30 06:24:48] - 94 268.0 49.6 0.0100 0.4748 0.8923 0.9945 0.5993 0.5752 0.8300 1.3153 0.5160 98 | [2020/01/30 06:30:05] - 95 267.8 49.5 0.0100 0.4669 0.8954 0.9872 0.6035 0.5881 0.8222 1.3143 0.5117 99 | [2020/01/30 06:35:22] - 96 267.7 49.5 0.0100 0.4633 0.8941 0.9824 0.6062 0.5872 0.8307 1.3078 0.5143 100 | [2020/01/30 06:40:39] - 97 267.7 49.5 0.0100 0.4560 0.8987 0.9771 0.6051 0.6093 0.8261 1.3000 0.5185 101 | [2020/01/30 06:45:57] - 98 267.8 49.5 0.0100 0.4489 0.9002 0.9725 0.6085 0.5830 0.8246 1.3380 0.5069 102 | [2020/01/30 06:51:14] - 99 267.8 49.5 0.0100 0.4471 0.9018 0.9702 0.6070 0.5689 0.8297 1.3425 0.5086 103 | [2020/01/30 06:56:32] - 100 267.7 49.5 0.0100 0.4438 0.9025 0.9655 0.6077 0.5712 0.8227 1.3751 0.5028 104 | [2020/01/30 07:01:50] - 101 268.0 49.6 0.0100 0.4351 0.9056 0.9614 0.6095 0.5892 0.8296 1.3303 0.5128 105 | [2020/01/30 07:07:07] - 102 268.0 49.5 0.0100 0.4331 0.9062 0.9562 0.6119 0.5835 0.8244 1.3491 0.5026 106 | [2020/01/30 07:12:25] - 103 268.0 49.6 0.0100 0.4267 0.9089 0.9494 0.6155 0.5623 0.8330 1.3541 0.5062 107 | [2020/01/30 07:17:42] - 104 268.0 49.6 0.0100 0.4220 0.9097 0.9509 0.6130 0.6007 0.8172 1.3860 0.4996 108 | [2020/01/30 07:23:00] - 105 268.0 49.5 0.0100 0.4169 0.9122 0.9430 0.6155 0.6074 0.8140 1.3250 0.5168 109 | [2020/01/30 07:28:17] - 106 267.9 49.6 0.0100 0.4126 0.9140 0.9387 0.6190 0.5887 0.8283 1.3059 0.5144 110 | [2020/01/30 07:33:35] - 107 267.9 49.6 0.0100 0.4066 0.9167 0.9305 0.6204 0.5754 0.8292 1.3519 0.5040 111 | [2020/01/30 07:38:53] - 108 268.0 49.6 0.0100 0.4037 0.9166 0.9264 0.6210 0.5753 0.8246 1.3646 0.5045 112 | [2020/01/30 07:44:10] - 109 267.9 49.6 0.0100 0.3991 0.9190 0.9235 0.6205 0.5715 0.8290 1.4021 0.4969 113 | -------------------------------------------------------------------------------- /experiments/cifar10_dropepoch/preactresnet_90/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/29 22:49:59] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=110, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_dropepoch/preactresnet_90', half=False, l1=0, l2=0, lr_drop_epoch=90, lr_max=0.1, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/29 22:50:04] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/29 22:55:42] - 0 285.4 53.3 0.1000 1.9644 0.2821 2.1559 0.2004 1.7474 0.3708 1.9674 0.2584 4 | [2020/01/29 23:01:24] - 1 286.9 53.9 0.1000 1.7023 0.3863 1.9729 0.2586 1.5873 0.4395 1.8613 0.3137 5 | [2020/01/29 23:07:05] - 2 287.7 53.6 0.1000 1.5764 0.4465 1.8956 0.2889 1.5422 0.4590 1.8488 0.3120 6 | [2020/01/29 23:12:46] - 3 287.5 53.6 0.1000 1.4862 0.4820 1.8375 0.3047 1.4069 0.5244 1.7476 0.3447 7 | [2020/01/29 23:18:27] - 4 286.6 53.7 0.1000 1.4014 0.5252 1.7874 0.3266 1.3809 0.5333 1.7290 0.3572 8 | [2020/01/29 23:24:09] - 5 288.0 53.5 0.1000 1.3327 0.5561 1.7441 0.3435 1.2907 0.5589 1.6668 0.3771 9 | [2020/01/29 23:29:51] - 6 287.9 53.9 0.1000 1.2792 0.5836 1.7083 0.3557 1.1872 0.6072 1.6135 0.4007 10 | [2020/01/29 23:35:33] - 7 287.8 53.7 0.1000 1.2260 0.6063 1.6748 0.3654 1.1656 0.6212 1.6019 0.3927 11 | [2020/01/29 23:41:15] - 8 288.3 53.7 0.1000 1.1846 0.6258 1.6474 0.3742 1.1340 0.6153 1.5813 0.3970 12 | [2020/01/29 23:46:57] - 9 287.9 53.9 0.1000 1.1534 0.6402 1.6286 0.3823 1.0916 0.6545 1.5835 0.4048 13 | [2020/01/29 23:52:40] - 10 288.1 53.7 0.1000 1.1258 0.6509 1.6111 0.3914 1.1129 0.6221 1.5781 0.4097 14 | [2020/01/29 23:58:23] - 11 288.5 53.9 0.1000 1.1011 0.6634 1.5926 0.3991 1.1277 0.6528 1.5588 0.4207 15 | [2020/01/30 00:04:06] - 12 288.5 53.8 0.1000 1.0836 0.6710 1.5842 0.3999 1.0566 0.6722 1.5330 0.4239 16 | [2020/01/30 00:09:48] - 13 288.2 53.8 0.1000 1.0654 0.6790 1.5706 0.4071 1.0412 0.6643 1.5652 0.4100 17 | [2020/01/30 00:15:30] - 14 288.4 53.9 0.1000 1.0459 0.6868 1.5563 0.4108 1.0258 0.6790 1.5288 0.4219 18 | [2020/01/30 00:21:11] - 15 286.9 53.4 0.1000 1.0288 0.6967 1.5437 0.4141 1.0030 0.6927 1.4995 0.4343 19 | [2020/01/30 00:26:54] - 16 288.4 54.0 0.1000 1.0178 0.7015 1.5360 0.4179 1.0173 0.6530 1.5096 0.4379 20 | [2020/01/30 00:32:36] - 17 288.1 53.7 0.1000 1.0024 0.7053 1.5263 0.4210 1.0200 0.6932 1.4976 0.4440 21 | [2020/01/30 00:38:19] - 18 289.0 53.9 0.1000 0.9932 0.7077 1.5189 0.4208 0.9565 0.7078 1.5051 0.4270 22 | [2020/01/30 00:44:01] - 19 288.5 53.6 0.1000 0.9797 0.7157 1.5080 0.4284 0.9836 0.6961 1.4990 0.4376 23 | [2020/01/30 00:49:43] - 20 287.7 53.3 0.1000 0.9713 0.7183 1.5034 0.4303 0.9812 0.6968 1.4943 0.4374 24 | [2020/01/30 00:55:25] - 21 288.1 53.9 0.1000 0.9668 0.7164 1.4972 0.4332 0.9959 0.6840 1.4819 0.4456 25 | [2020/01/30 01:01:09] - 22 289.3 53.9 0.1000 0.9592 0.7219 1.4928 0.4359 0.9605 0.7121 1.4646 0.4514 26 | [2020/01/30 01:06:51] - 23 287.9 53.8 0.1000 0.9511 0.7260 1.4906 0.4348 0.9528 0.7174 1.4556 0.4454 27 | [2020/01/30 01:12:33] - 24 288.5 53.6 0.1000 0.9427 0.7303 1.4816 0.4368 0.9889 0.6812 1.4548 0.4556 28 | [2020/01/30 01:18:15] - 25 288.2 53.8 0.1000 0.9306 0.7357 1.4739 0.4417 0.9396 0.7003 1.4532 0.4577 29 | [2020/01/30 01:23:59] - 26 288.9 54.0 0.1000 0.9282 0.7337 1.4710 0.4410 0.9372 0.7214 1.4692 0.4525 30 | [2020/01/30 01:29:41] - 27 288.8 53.6 0.1000 0.9240 0.7384 1.4687 0.4430 0.9560 0.7143 1.4647 0.4552 31 | [2020/01/30 01:35:23] - 28 288.5 53.7 0.1000 0.9208 0.7377 1.4652 0.4436 0.9613 0.6991 1.4857 0.4444 32 | [2020/01/30 01:41:07] - 29 289.3 53.9 0.1000 0.9182 0.7402 1.4632 0.4460 0.9070 0.7322 1.4283 0.4659 33 | [2020/01/30 01:46:50] - 30 288.1 53.8 0.1000 0.9109 0.7447 1.4580 0.4443 0.9582 0.7056 1.4757 0.4448 34 | [2020/01/30 01:52:34] - 31 289.6 54.1 0.1000 0.9059 0.7459 1.4543 0.4490 0.9413 0.7246 1.4635 0.4521 35 | [2020/01/30 01:58:17] - 32 289.1 53.9 0.1000 0.8999 0.7455 1.4492 0.4505 0.9173 0.7223 1.4730 0.4465 36 | [2020/01/30 02:04:00] - 33 289.6 54.0 0.1000 0.9044 0.7441 1.4523 0.4482 0.9025 0.7288 1.4531 0.4581 37 | [2020/01/30 02:09:43] - 34 289.1 54.0 0.1000 0.8968 0.7484 1.4476 0.4508 0.8607 0.7462 1.4080 0.4687 38 | [2020/01/30 02:15:27] - 35 289.1 53.9 0.1000 0.8902 0.7525 1.4442 0.4516 0.9365 0.7245 1.4700 0.4549 39 | [2020/01/30 02:21:09] - 36 289.1 53.7 0.1000 0.8939 0.7483 1.4438 0.4520 0.9437 0.7279 1.4615 0.4536 40 | [2020/01/30 02:26:53] - 37 289.6 53.8 0.1000 0.8879 0.7529 1.4431 0.4527 0.9138 0.7357 1.4413 0.4613 41 | [2020/01/30 02:32:36] - 38 289.4 54.0 0.1000 0.8855 0.7537 1.4388 0.4526 0.9222 0.7264 1.4426 0.4563 42 | [2020/01/30 02:38:18] - 39 288.6 53.7 0.1000 0.8820 0.7570 1.4371 0.4537 0.8896 0.7272 1.4528 0.4506 43 | [2020/01/30 02:44:03] - 40 290.1 54.1 0.1000 0.8800 0.7550 1.4357 0.4530 0.9306 0.7233 1.4368 0.4669 44 | [2020/01/30 02:49:49] - 41 291.0 54.2 0.1000 0.8786 0.7560 1.4349 0.4531 0.8886 0.7373 1.4225 0.4687 45 | [2020/01/30 02:55:32] - 42 289.4 54.0 0.1000 0.8789 0.7556 1.4367 0.4556 0.9238 0.7113 1.4429 0.4692 46 | [2020/01/30 03:01:17] - 43 290.2 54.0 0.1000 0.8744 0.7574 1.4322 0.4536 0.8737 0.7352 1.4464 0.4433 47 | [2020/01/30 03:07:00] - 44 289.4 53.9 0.1000 0.8711 0.7586 1.4293 0.4556 0.9160 0.7285 1.4321 0.4735 48 | [2020/01/30 03:12:44] - 45 289.5 53.9 0.1000 0.8706 0.7581 1.4275 0.4592 0.8506 0.7315 1.4376 0.4591 49 | [2020/01/30 03:18:27] - 46 289.3 53.6 0.1000 0.8703 0.7600 1.4282 0.4571 0.8688 0.7264 1.4270 0.4667 50 | [2020/01/30 03:24:09] - 47 288.6 53.7 0.1000 0.8627 0.7633 1.4213 0.4591 0.8720 0.7301 1.4240 0.4630 51 | [2020/01/30 03:29:50] - 48 287.5 53.8 0.1000 0.8654 0.7630 1.4286 0.4571 0.9105 0.7179 1.4575 0.4573 52 | [2020/01/30 03:35:32] - 49 288.0 53.9 0.1000 0.8606 0.7642 1.4205 0.4584 0.9145 0.7183 1.4422 0.4617 53 | [2020/01/30 03:41:16] - 50 289.2 53.8 0.1000 0.8579 0.7660 1.4184 0.4594 0.8654 0.7168 1.4239 0.4625 54 | [2020/01/30 03:46:59] - 51 289.0 53.9 0.1000 0.8565 0.7660 1.4191 0.4581 0.8779 0.7254 1.4157 0.4713 55 | [2020/01/30 03:52:42] - 52 288.8 53.7 0.1000 0.8551 0.7681 1.4178 0.4613 0.8460 0.7419 1.4181 0.4551 56 | [2020/01/30 03:58:24] - 53 288.8 53.9 0.1000 0.8595 0.7643 1.4213 0.4597 0.9009 0.7235 1.4283 0.4716 57 | [2020/01/30 04:04:07] - 54 289.2 54.1 0.1000 0.8520 0.7661 1.4146 0.4622 0.8364 0.7422 1.4517 0.4526 58 | [2020/01/30 04:09:50] - 55 288.5 54.0 0.1000 0.8521 0.7676 1.4157 0.4614 0.8983 0.7379 1.4475 0.4709 59 | [2020/01/30 04:15:33] - 56 289.5 54.0 0.1000 0.8544 0.7660 1.4152 0.4599 0.8531 0.7419 1.4358 0.4591 60 | [2020/01/30 04:21:15] - 57 288.1 53.9 0.1000 0.8434 0.7702 1.4063 0.4651 0.8538 0.7350 1.4262 0.4670 61 | [2020/01/30 04:26:59] - 58 289.3 53.8 0.1000 0.8565 0.7646 1.4185 0.4613 0.8706 0.7491 1.4220 0.4675 62 | [2020/01/30 04:32:41] - 59 288.7 53.8 0.1000 0.8455 0.7716 1.4110 0.4613 0.8909 0.7331 1.4078 0.4709 63 | [2020/01/30 04:38:24] - 60 288.1 53.8 0.1000 0.8499 0.7676 1.4133 0.4630 0.8609 0.7487 1.4098 0.4713 64 | [2020/01/30 04:44:07] - 61 289.5 53.8 0.1000 0.8479 0.7724 1.4116 0.4641 0.8611 0.7436 1.4150 0.4736 65 | [2020/01/30 04:49:51] - 62 289.3 54.1 0.1000 0.8403 0.7731 1.4069 0.4651 0.8624 0.7300 1.4166 0.4685 66 | [2020/01/30 04:55:35] - 63 289.5 54.1 0.1000 0.8442 0.7709 1.4086 0.4617 0.8716 0.7427 1.4180 0.4698 67 | [2020/01/30 05:01:16] - 64 288.0 53.9 0.1000 0.8451 0.7724 1.4120 0.4618 0.8542 0.7426 1.4192 0.4658 68 | [2020/01/30 05:06:59] - 65 288.8 53.9 0.1000 0.8430 0.7703 1.4080 0.4634 0.8466 0.7369 1.4440 0.4621 69 | [2020/01/30 05:12:43] - 66 289.5 53.9 0.1000 0.8404 0.7741 1.4057 0.4643 0.8658 0.7333 1.4301 0.4585 70 | [2020/01/30 05:18:26] - 67 289.1 54.0 0.1000 0.8373 0.7758 1.4040 0.4657 0.8283 0.7544 1.4283 0.4635 71 | [2020/01/30 05:24:08] - 68 288.9 53.8 0.1000 0.8331 0.7757 1.4006 0.4646 0.9108 0.7287 1.4595 0.4592 72 | [2020/01/30 05:29:50] - 69 288.1 53.7 0.1000 0.8403 0.7724 1.4052 0.4632 0.8675 0.7415 1.4357 0.4591 73 | [2020/01/30 05:35:34] - 70 289.1 54.0 0.1000 0.8324 0.7751 1.4010 0.4681 0.8746 0.7232 1.4238 0.4691 74 | [2020/01/30 05:41:18] - 71 289.6 53.9 0.1000 0.8336 0.7764 1.4021 0.4663 0.8573 0.7454 1.4042 0.4760 75 | [2020/01/30 05:47:01] - 72 288.7 53.8 0.1000 0.8383 0.7749 1.4062 0.4654 0.8493 0.7440 1.4326 0.4546 76 | [2020/01/30 05:52:44] - 73 289.3 54.0 0.1000 0.8335 0.7754 1.4009 0.4657 0.8862 0.7310 1.4442 0.4607 77 | [2020/01/30 05:58:27] - 74 289.3 54.0 0.1000 0.8386 0.7723 1.4046 0.4655 0.8540 0.7223 1.4164 0.4699 78 | [2020/01/30 06:04:11] - 75 290.0 53.9 0.1000 0.8302 0.7758 1.3973 0.4678 0.8308 0.7508 1.4143 0.4697 79 | [2020/01/30 06:09:55] - 76 289.7 54.1 0.1000 0.8321 0.7767 1.4012 0.4662 0.8479 0.7455 1.4058 0.4785 80 | [2020/01/30 06:15:38] - 77 289.3 53.8 0.1000 0.8374 0.7753 1.4051 0.4635 0.8735 0.7207 1.4217 0.4676 81 | [2020/01/30 06:21:21] - 78 289.6 53.7 0.1000 0.8309 0.7746 1.3992 0.4670 0.8655 0.7540 1.4429 0.4590 82 | [2020/01/30 06:27:04] - 79 288.8 54.0 0.1000 0.8285 0.7790 1.3973 0.4674 0.8415 0.7499 1.4446 0.4666 83 | [2020/01/30 06:32:49] - 80 289.7 54.0 0.1000 0.8287 0.7758 1.3957 0.4705 0.8851 0.7312 1.4411 0.4654 84 | [2020/01/30 06:38:33] - 81 290.4 54.1 0.1000 0.8280 0.7761 1.3958 0.4672 0.8700 0.7423 1.4117 0.4731 85 | [2020/01/30 06:44:17] - 82 289.4 54.0 0.1000 0.8270 0.7781 1.3963 0.4660 0.8399 0.7555 1.4044 0.4745 86 | [2020/01/30 06:50:00] - 83 289.5 53.9 0.1000 0.8278 0.7765 1.3966 0.4673 0.8517 0.7294 1.4378 0.4579 87 | [2020/01/30 06:55:43] - 84 289.4 53.8 0.1000 0.8279 0.7804 1.3973 0.4665 0.8824 0.7374 1.4200 0.4703 88 | [2020/01/30 07:01:26] - 85 288.6 53.8 0.1000 0.8287 0.7778 1.3980 0.4708 0.8545 0.7255 1.4153 0.4706 89 | [2020/01/30 07:07:09] - 86 289.0 54.1 0.1000 0.8237 0.7766 1.3952 0.4696 0.9028 0.7215 1.4658 0.4449 90 | [2020/01/30 07:12:52] - 87 289.4 54.0 0.1000 0.8231 0.7824 1.3940 0.4672 0.8298 0.7525 1.4293 0.4644 91 | [2020/01/30 07:18:36] - 88 289.8 53.8 0.1000 0.8239 0.7799 1.3938 0.4686 0.9029 0.7155 1.4466 0.4521 92 | [2020/01/30 07:24:19] - 89 289.3 53.9 0.0100 0.8224 0.7782 1.3927 0.4687 0.8898 0.7495 1.4099 0.4740 93 | [2020/01/30 07:30:03] - 90 289.3 54.0 0.0100 0.7092 0.8172 1.2021 0.5365 0.6938 0.7999 1.2601 0.5234 94 | [2020/01/30 07:35:46] - 91 289.0 53.7 0.0100 0.6352 0.8359 1.1425 0.5543 0.6420 0.8144 1.2521 0.5220 95 | [2020/01/30 07:41:30] - 92 289.9 54.1 0.0100 0.6024 0.8464 1.1139 0.5635 0.6414 0.8149 1.2358 0.5257 96 | [2020/01/30 07:47:10] - 93 287.6 52.0 0.0100 0.5845 0.8536 1.0939 0.5712 0.5993 0.8251 1.2489 0.5204 97 | [2020/01/30 07:52:39] - 94 277.4 51.4 0.0100 0.5684 0.8582 1.0793 0.5748 0.6017 0.8228 1.2566 0.5220 98 | [2020/01/30 07:58:07] - 95 276.5 51.4 0.0100 0.5512 0.8644 1.0647 0.5807 0.6138 0.8160 1.2478 0.5256 99 | [2020/01/30 08:03:35] - 96 277.0 51.4 0.0100 0.5410 0.8664 1.0505 0.5850 0.6123 0.8242 1.2615 0.5218 100 | [2020/01/30 08:09:03] - 97 276.7 51.4 0.0100 0.5268 0.8722 1.0396 0.5879 0.6353 0.8173 1.2623 0.5214 101 | [2020/01/30 08:14:31] - 98 276.5 51.3 0.0100 0.5136 0.8759 1.0294 0.5906 0.5946 0.8186 1.2825 0.5167 102 | [2020/01/30 08:19:59] - 99 276.5 51.3 0.0100 0.5075 0.8803 1.0233 0.5949 0.5811 0.8291 1.2789 0.5221 103 | [2020/01/30 08:25:28] - 100 276.4 51.3 0.0100 0.5005 0.8807 1.0127 0.5969 0.5773 0.8232 1.3050 0.5135 104 | [2020/01/30 08:30:55] - 101 276.4 51.3 0.0100 0.4882 0.8866 1.0055 0.5975 0.6019 0.8262 1.2704 0.5221 105 | [2020/01/30 08:36:23] - 102 276.3 51.2 0.0100 0.4850 0.8874 1.0003 0.5981 0.5952 0.8257 1.2918 0.5199 106 | [2020/01/30 08:41:49] - 103 275.5 51.2 0.0100 0.4760 0.8899 0.9916 0.6034 0.5775 0.8297 1.2918 0.5190 107 | [2020/01/30 08:47:16] - 104 275.5 51.2 0.0100 0.4656 0.8945 0.9867 0.6007 0.6096 0.8137 1.3204 0.5139 108 | [2020/01/30 08:52:42] - 105 275.3 51.1 0.0100 0.4601 0.8958 0.9788 0.6062 0.6202 0.8146 1.2767 0.5256 109 | [2020/01/30 08:58:10] - 106 275.8 51.6 0.0100 0.4551 0.8980 0.9728 0.6069 0.5960 0.8235 1.2816 0.5273 110 | [2020/01/30 09:03:44] - 107 280.3 53.6 0.0100 0.4477 0.9006 0.9663 0.6089 0.5773 0.8283 1.3050 0.5155 111 | [2020/01/30 09:09:27] - 108 288.5 53.9 0.0100 0.4436 0.9025 0.9601 0.6109 0.5810 0.8263 1.3161 0.5133 112 | [2020/01/30 09:15:09] - 109 288.4 53.7 0.0100 0.4382 0.9034 0.9593 0.6110 0.5703 0.8350 1.3467 0.5059 113 | -------------------------------------------------------------------------------- /experiments/cifar10_droplr/preactresnet_0.006/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/31 09:12:06] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=150, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_droplr/preactresnet_0.006', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.1, lr_one_drop=0.006, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/31 09:12:11] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/31 09:24:15] - 0 610.4 114.0 0.1000 1.9581 0.2856 2.1527 0.2020 1.7368 0.3760 1.9735 0.2625 4 | [2020/01/31 09:36:10] - 1 601.2 112.8 0.1000 1.7001 0.3880 1.9714 0.2586 1.5815 0.4462 1.8569 0.3126 5 | [2020/01/31 09:48:17] - 2 611.7 115.2 0.1000 1.5714 0.4481 1.8925 0.2899 1.5421 0.4575 1.8487 0.3103 6 | [2020/01/31 10:00:25] - 3 613.0 114.4 0.1000 1.4798 0.4845 1.8340 0.3060 1.4101 0.5248 1.7497 0.3432 7 | [2020/01/31 10:12:30] - 4 611.1 114.1 0.1000 1.3946 0.5283 1.7838 0.3250 1.3779 0.5340 1.7276 0.3589 8 | [2020/01/31 10:24:40] - 5 614.3 114.7 0.1000 1.3249 0.5597 1.7391 0.3445 1.2828 0.5666 1.6606 0.3802 9 | [2020/01/31 10:36:43] - 6 610.1 113.2 0.1000 1.2704 0.5877 1.7027 0.3573 1.1859 0.6073 1.6092 0.4013 10 | [2020/01/31 10:48:47] - 7 610.2 113.0 0.1000 1.2192 0.6096 1.6698 0.3679 1.1607 0.6275 1.5974 0.3993 11 | [2020/01/31 11:00:55] - 8 614.2 113.9 0.1000 1.1797 0.6278 1.6436 0.3761 1.1314 0.6179 1.5849 0.3965 12 | [2020/01/31 11:12:59] - 9 610.1 113.7 0.1000 1.1504 0.6412 1.6262 0.3841 1.0812 0.6551 1.5714 0.4070 13 | [2020/01/31 11:25:06] - 10 611.8 114.2 0.1000 1.1218 0.6540 1.6077 0.3927 1.1155 0.6223 1.5708 0.4117 14 | [2020/01/31 11:37:13] - 11 611.5 114.3 0.1000 1.0981 0.6648 1.5904 0.3999 1.1268 0.6488 1.5624 0.4179 15 | [2020/01/31 11:49:20] - 12 613.4 113.9 0.1000 1.0813 0.6709 1.5813 0.3996 1.0426 0.6792 1.5181 0.4284 16 | [2020/01/31 12:01:30] - 13 614.5 114.7 0.1000 1.0624 0.6801 1.5678 0.4083 1.0375 0.6662 1.5634 0.4106 17 | [2020/01/31 12:13:39] - 14 613.8 114.6 0.1000 1.0432 0.6876 1.5544 0.4111 1.0243 0.6802 1.5342 0.4215 18 | [2020/01/31 12:25:45] - 15 612.6 114.2 0.1000 1.0289 0.6954 1.5427 0.4137 1.0047 0.6912 1.5120 0.4335 19 | [2020/01/31 12:37:46] - 16 606.8 113.6 0.1000 1.0142 0.7014 1.5331 0.4188 1.0098 0.6571 1.5021 0.4402 20 | [2020/01/31 12:49:55] - 17 613.4 114.7 0.1000 1.0003 0.7063 1.5243 0.4215 1.0241 0.6878 1.4966 0.4459 21 | [2020/01/31 13:02:01] - 18 611.3 114.1 0.1000 0.9936 0.7080 1.5191 0.4244 0.9486 0.7078 1.5176 0.4224 22 | [2020/01/31 13:14:07] - 19 612.8 113.1 0.1000 0.9791 0.7159 1.5074 0.4287 0.9827 0.6955 1.5036 0.4358 23 | [2020/01/31 13:26:13] - 20 612.0 114.0 0.1000 0.9711 0.7188 1.5027 0.4304 0.9846 0.6926 1.5006 0.4386 24 | [2020/01/31 13:38:18] - 21 610.8 113.4 0.1000 0.9661 0.7189 1.4961 0.4322 0.9948 0.6825 1.4823 0.4503 25 | [2020/01/31 13:50:24] - 22 611.7 113.9 0.1000 0.9586 0.7231 1.4922 0.4361 0.9698 0.7071 1.4684 0.4519 26 | [2020/01/31 14:02:33] - 23 614.4 114.6 0.1000 0.9499 0.7268 1.4892 0.4334 0.9416 0.7202 1.4559 0.4436 27 | [2020/01/31 14:14:42] - 24 615.0 114.3 0.1000 0.9439 0.7297 1.4827 0.4379 0.9893 0.6805 1.4542 0.4538 28 | [2020/01/31 14:26:49] - 25 611.5 114.3 0.1000 0.9306 0.7380 1.4731 0.4416 0.9330 0.7100 1.4524 0.4553 29 | [2020/01/31 14:38:57] - 26 613.5 114.6 0.1000 0.9304 0.7338 1.4729 0.4362 0.9475 0.7240 1.4754 0.4527 30 | [2020/01/31 14:51:04] - 27 613.1 114.3 0.1000 0.9252 0.7372 1.4692 0.4411 0.9503 0.7156 1.4559 0.4563 31 | [2020/01/31 15:03:12] - 28 612.7 114.4 0.1000 0.9203 0.7389 1.4648 0.4447 0.9561 0.7078 1.4777 0.4487 32 | [2020/01/31 15:15:19] - 29 613.7 113.3 0.1000 0.9174 0.7396 1.4619 0.4463 0.9209 0.7285 1.4378 0.4635 33 | [2020/01/31 15:27:28] - 30 613.7 114.3 0.1000 0.9113 0.7426 1.4582 0.4421 0.9497 0.7174 1.4712 0.4486 34 | [2020/01/31 15:39:36] - 31 613.4 114.8 0.1000 0.9065 0.7492 1.4553 0.4473 0.9429 0.7259 1.4638 0.4517 35 | [2020/01/31 15:51:41] - 32 610.8 113.9 0.1000 0.8995 0.7462 1.4486 0.4496 0.9170 0.7206 1.4700 0.4524 36 | [2020/01/31 16:03:47] - 33 611.8 114.2 0.1000 0.9051 0.7451 1.4532 0.4475 0.9142 0.7224 1.4695 0.4508 37 | [2020/01/31 16:15:51] - 34 609.3 114.4 0.1000 0.8956 0.7480 1.4463 0.4497 0.8738 0.7379 1.4161 0.4688 38 | [2020/01/31 16:27:55] - 35 609.5 114.4 0.1000 0.8918 0.7526 1.4443 0.4513 0.9351 0.7201 1.4684 0.4554 39 | [2020/01/31 16:40:04] - 36 614.5 114.1 0.1000 0.8947 0.7489 1.4446 0.4508 0.9424 0.7177 1.4641 0.4511 40 | [2020/01/31 16:52:14] - 37 615.4 114.9 0.1000 0.8897 0.7514 1.4438 0.4515 0.9266 0.7328 1.4522 0.4532 41 | [2020/01/31 17:04:22] - 38 614.1 114.2 0.1000 0.8866 0.7527 1.4398 0.4501 0.9280 0.7211 1.4518 0.4560 42 | [2020/01/31 17:15:31] - 39 611.0 58.2 0.1000 0.8842 0.7552 1.4384 0.4524 0.8983 0.7216 1.4503 0.4517 43 | [2020/01/31 17:21:09] - 40 283.6 53.4 0.1000 0.8793 0.7555 1.4342 0.4537 0.9326 0.7203 1.4451 0.4602 44 | [2020/01/31 17:26:47] - 41 284.2 53.4 0.1000 0.8793 0.7566 1.4352 0.4544 0.8954 0.7340 1.4251 0.4663 45 | [2020/01/31 17:32:25] - 42 285.0 53.6 0.1000 0.8793 0.7575 1.4376 0.4536 0.9249 0.7102 1.4477 0.4597 46 | [2020/01/31 17:38:04] - 43 285.2 53.3 0.1000 0.8741 0.7574 1.4312 0.4541 0.8828 0.7320 1.4485 0.4451 47 | [2020/01/31 17:43:43] - 44 285.5 53.4 0.1000 0.8722 0.7585 1.4299 0.4560 0.9223 0.7315 1.4317 0.4672 48 | [2020/01/31 17:49:22] - 45 285.4 53.5 0.1000 0.8686 0.7608 1.4253 0.4585 0.8500 0.7261 1.4422 0.4534 49 | [2020/01/31 17:55:01] - 46 285.5 53.7 0.1000 0.8719 0.7569 1.4289 0.4559 0.8623 0.7265 1.4175 0.4688 50 | [2020/01/31 18:00:41] - 47 286.7 53.7 0.1000 0.8673 0.7621 1.4255 0.4586 0.8740 0.7235 1.4255 0.4628 51 | [2020/01/31 18:06:21] - 48 286.4 53.8 0.1000 0.8691 0.7621 1.4296 0.4567 0.9019 0.7210 1.4618 0.4527 52 | [2020/01/31 18:12:02] - 49 286.8 53.6 0.1000 0.8619 0.7624 1.4215 0.4585 0.9242 0.7166 1.4492 0.4577 53 | [2020/01/31 18:17:43] - 50 286.6 53.7 0.1000 0.8591 0.7655 1.4187 0.4599 0.8636 0.7178 1.4191 0.4613 54 | [2020/01/31 18:23:24] - 51 287.0 53.7 0.1000 0.8576 0.7654 1.4195 0.4583 0.8770 0.7240 1.4223 0.4675 55 | [2020/01/31 18:29:04] - 52 287.0 53.6 0.1000 0.8551 0.7683 1.4169 0.4621 0.8555 0.7420 1.4129 0.4579 56 | [2020/01/31 18:34:45] - 53 286.8 53.8 0.1000 0.8597 0.7654 1.4216 0.4600 0.8911 0.7275 1.4335 0.4644 57 | [2020/01/31 18:40:25] - 54 287.2 53.3 0.1000 0.8554 0.7660 1.4165 0.4623 0.8370 0.7501 1.4463 0.4520 58 | [2020/01/31 18:46:06] - 55 286.8 53.5 0.1000 0.8530 0.7682 1.4148 0.4601 0.8949 0.7402 1.4478 0.4675 59 | [2020/01/31 18:51:46] - 56 286.8 53.8 0.1000 0.8537 0.7666 1.4152 0.4600 0.8444 0.7398 1.4318 0.4566 60 | [2020/01/31 18:57:27] - 57 287.0 53.7 0.1000 0.8428 0.7722 1.4054 0.4634 0.8701 0.7278 1.4349 0.4642 61 | [2020/01/31 19:03:07] - 58 286.2 53.9 0.1000 0.8578 0.7649 1.4195 0.4600 0.8651 0.7472 1.4241 0.4674 62 | [2020/01/31 19:08:47] - 59 286.6 53.7 0.1000 0.8455 0.7722 1.4109 0.4612 0.8798 0.7340 1.4128 0.4666 63 | [2020/01/31 19:14:29] - 60 286.7 53.7 0.1000 0.8509 0.7675 1.4144 0.4611 0.8644 0.7541 1.4072 0.4726 64 | [2020/01/31 19:20:09] - 61 286.7 53.3 0.1000 0.8476 0.7723 1.4115 0.4626 0.8507 0.7455 1.4114 0.4796 65 | [2020/01/31 19:25:50] - 62 286.9 53.9 0.1000 0.8391 0.7736 1.4061 0.4660 0.8593 0.7354 1.4125 0.4676 66 | [2020/01/31 19:31:30] - 63 286.6 53.5 0.1000 0.8446 0.7708 1.4088 0.4629 0.8683 0.7427 1.4156 0.4728 67 | [2020/01/31 19:37:11] - 64 287.2 53.9 0.1000 0.8494 0.7724 1.4144 0.4606 0.8585 0.7444 1.4121 0.4689 68 | [2020/01/31 19:42:53] - 65 287.1 53.9 0.1000 0.8418 0.7723 1.4068 0.4621 0.8390 0.7380 1.4361 0.4621 69 | [2020/01/31 19:48:34] - 66 288.1 53.8 0.1000 0.8397 0.7738 1.4047 0.4628 0.8704 0.7311 1.4238 0.4661 70 | [2020/01/31 19:54:15] - 67 286.5 53.7 0.1000 0.8385 0.7753 1.4044 0.4642 0.8202 0.7628 1.4252 0.4629 71 | [2020/01/31 19:59:55] - 68 286.8 54.0 0.1000 0.8333 0.7762 1.3996 0.4659 0.9020 0.7362 1.4461 0.4648 72 | [2020/01/31 20:05:36] - 69 286.2 53.9 0.1000 0.8417 0.7723 1.4062 0.4637 0.8652 0.7400 1.4361 0.4584 73 | [2020/01/31 20:11:17] - 70 287.0 53.2 0.1000 0.8352 0.7747 1.4021 0.4645 0.8793 0.7170 1.4217 0.4714 74 | [2020/01/31 20:16:57] - 71 286.8 53.7 0.1000 0.8357 0.7763 1.4039 0.4648 0.8658 0.7392 1.4003 0.4762 75 | [2020/01/31 20:22:38] - 72 287.2 53.7 0.1000 0.8358 0.7762 1.4038 0.4652 0.8548 0.7419 1.4350 0.4532 76 | [2020/01/31 20:28:19] - 73 287.2 53.7 0.1000 0.8338 0.7759 1.4008 0.4656 0.8920 0.7276 1.4514 0.4593 77 | [2020/01/31 20:33:57] - 74 284.3 53.5 0.1000 0.8408 0.7732 1.4066 0.4638 0.8628 0.7242 1.4232 0.4738 78 | [2020/01/31 20:39:37] - 75 286.8 53.6 0.1000 0.8320 0.7775 1.3994 0.4659 0.8389 0.7496 1.4041 0.4728 79 | [2020/01/31 20:45:18] - 76 287.5 53.7 0.1000 0.8327 0.7759 1.4004 0.4647 0.8446 0.7515 1.4142 0.4769 80 | [2020/01/31 20:51:00] - 77 287.3 54.0 0.1000 0.8359 0.7765 1.4042 0.4639 0.8637 0.7235 1.4248 0.4639 81 | [2020/01/31 20:56:42] - 78 288.0 53.9 0.1000 0.8305 0.7765 1.3981 0.4668 0.8620 0.7560 1.4360 0.4611 82 | [2020/01/31 21:02:21] - 79 285.9 53.9 0.1000 0.8306 0.7779 1.3981 0.4662 0.8434 0.7491 1.4259 0.4664 83 | [2020/01/31 21:08:03] - 80 287.4 53.7 0.1000 0.8302 0.7765 1.3953 0.4694 0.8832 0.7281 1.4437 0.4656 84 | [2020/01/31 21:13:44] - 81 286.8 53.8 0.1000 0.8297 0.7772 1.3980 0.4663 0.8756 0.7380 1.4167 0.4707 85 | [2020/01/31 21:19:24] - 82 286.2 53.9 0.1000 0.8274 0.7795 1.3967 0.4644 0.8462 0.7540 1.4092 0.4766 86 | [2020/01/31 21:25:04] - 83 286.4 53.9 0.1000 0.8286 0.7776 1.3959 0.4665 0.8553 0.7313 1.4384 0.4595 87 | [2020/01/31 21:30:45] - 84 287.4 53.5 0.1000 0.8270 0.7799 1.3962 0.4683 0.8963 0.7250 1.4264 0.4646 88 | [2020/01/31 21:36:27] - 85 287.6 53.9 0.1000 0.8282 0.7794 1.3981 0.4674 0.8575 0.7307 1.4047 0.4725 89 | [2020/01/31 21:42:08] - 86 287.8 53.7 0.1000 0.8246 0.7762 1.3955 0.4698 0.8893 0.7235 1.4511 0.4477 90 | [2020/01/31 21:47:49] - 87 286.8 54.1 0.1000 0.8218 0.7819 1.3914 0.4693 0.8411 0.7529 1.4591 0.4543 91 | [2020/01/31 21:53:31] - 88 287.7 53.9 0.1000 0.8240 0.7803 1.3938 0.4684 0.8938 0.7154 1.4445 0.4557 92 | [2020/01/31 21:59:11] - 89 286.7 53.7 0.1000 0.8247 0.7775 1.3950 0.4675 0.8773 0.7541 1.4036 0.4795 93 | [2020/01/31 22:04:52] - 90 286.6 54.0 0.1000 0.8160 0.7822 1.3889 0.4723 0.8710 0.7525 1.4113 0.4719 94 | [2020/01/31 22:10:34] - 91 287.7 53.8 0.1000 0.8242 0.7793 1.3923 0.4696 0.8194 0.7734 1.4023 0.4708 95 | [2020/01/31 22:16:15] - 92 287.4 53.7 0.1000 0.8211 0.7806 1.3934 0.4681 0.8812 0.7307 1.4141 0.4700 96 | [2020/01/31 22:21:56] - 93 287.1 53.5 0.1000 0.8202 0.7816 1.3897 0.4695 0.7907 0.7693 1.4131 0.4663 97 | [2020/01/31 22:27:37] - 94 287.0 54.0 0.1000 0.8243 0.7792 1.3940 0.4663 0.8362 0.7445 1.4235 0.4697 98 | [2020/01/31 22:33:17] - 95 286.7 53.6 0.1000 0.8192 0.7810 1.3922 0.4681 0.8867 0.7233 1.4357 0.4567 99 | [2020/01/31 22:38:57] - 96 286.5 53.9 0.1000 0.8231 0.7803 1.3913 0.4706 0.8483 0.7538 1.4438 0.4609 100 | [2020/01/31 22:44:39] - 97 287.3 54.0 0.1000 0.8158 0.7835 1.3885 0.4687 0.8718 0.7374 1.4272 0.4694 101 | [2020/01/31 22:50:19] - 98 286.8 53.3 0.1000 0.8186 0.7819 1.3913 0.4698 0.8569 0.7403 1.4120 0.4675 102 | [2020/01/31 22:56:00] - 99 287.3 54.0 0.0060 0.8196 0.7795 1.3882 0.4709 0.8468 0.7328 1.4280 0.4610 103 | [2020/01/31 23:01:42] - 100 287.4 54.0 0.0060 0.7278 0.8102 1.2118 0.5337 0.6722 0.8040 1.2727 0.5188 104 | [2020/01/31 23:07:25] - 101 288.0 54.0 0.0060 0.6423 0.8345 1.1435 0.5537 0.6599 0.8141 1.2406 0.5295 105 | [2020/01/31 23:13:07] - 102 287.8 53.7 0.0060 0.6179 0.8413 1.1186 0.5613 0.6513 0.8181 1.2455 0.5308 106 | [2020/01/31 23:18:48] - 103 287.1 53.7 0.0060 0.5955 0.8488 1.0970 0.5682 0.6225 0.8245 1.2402 0.5310 107 | [2020/01/31 23:24:29] - 104 287.1 53.9 0.0060 0.5775 0.8545 1.0820 0.5729 0.6329 0.8148 1.2599 0.5244 108 | [2020/01/31 23:30:11] - 105 287.9 54.0 0.0060 0.5601 0.8598 1.0636 0.5814 0.6454 0.8139 1.2290 0.5399 109 | [2020/01/31 23:35:53] - 106 287.4 54.1 0.0060 0.5501 0.8634 1.0531 0.5862 0.6202 0.8286 1.2236 0.5398 110 | [2020/01/31 23:41:34] - 107 286.8 53.8 0.0060 0.5386 0.8666 1.0408 0.5882 0.5911 0.8271 1.2428 0.5306 111 | [2020/01/31 23:47:06] - 108 280.6 51.6 0.0060 0.5274 0.8698 1.0275 0.5942 0.5793 0.8318 1.2463 0.5238 112 | [2020/01/31 23:52:34] - 109 276.9 51.5 0.0060 0.5155 0.8761 1.0181 0.5952 0.5760 0.8329 1.2520 0.5292 113 | [2020/01/31 23:58:04] - 110 277.2 51.5 0.0060 0.5076 0.8785 1.0105 0.5968 0.5867 0.8311 1.2354 0.5327 114 | [2020/02/01 00:03:32] - 111 276.7 51.8 0.0060 0.4978 0.8798 1.0007 0.6023 0.5844 0.8227 1.2600 0.5245 115 | [2020/02/01 00:09:00] - 112 276.4 51.3 0.0060 0.4853 0.8834 0.9870 0.6073 0.5558 0.8389 1.2767 0.5201 116 | [2020/02/01 00:14:27] - 113 275.6 51.3 0.0060 0.4790 0.8867 0.9799 0.6066 0.5955 0.8274 1.2439 0.5303 117 | [2020/02/01 00:19:53] - 114 275.4 50.8 0.0060 0.4671 0.8913 0.9688 0.6127 0.5538 0.8317 1.2927 0.5235 118 | [2020/02/01 00:25:20] - 115 275.5 51.2 0.0060 0.4618 0.8924 0.9621 0.6120 0.5441 0.8433 1.2983 0.5200 119 | [2020/02/01 00:30:46] - 116 275.0 51.6 0.0060 0.4525 0.8964 0.9537 0.6159 0.5955 0.8248 1.2571 0.5304 120 | [2020/02/01 00:36:13] - 117 275.4 51.3 0.0060 0.4464 0.8980 0.9450 0.6171 0.5387 0.8358 1.3179 0.5148 121 | [2020/02/01 00:41:42] - 118 276.9 51.5 0.0060 0.4373 0.8997 0.9368 0.6217 0.5663 0.8310 1.2896 0.5213 122 | [2020/02/01 00:47:10] - 119 277.0 51.4 0.0060 0.4299 0.9032 0.9302 0.6220 0.5720 0.8334 1.3339 0.5147 123 | [2020/02/01 00:52:40] - 120 277.7 51.5 0.0060 0.4229 0.9046 0.9205 0.6260 0.5590 0.8409 1.2850 0.5260 124 | [2020/02/01 00:58:08] - 121 276.5 51.2 0.0060 0.4145 0.9088 0.9133 0.6281 0.5363 0.8354 1.3408 0.5168 125 | [2020/02/01 01:03:33] - 122 273.9 51.2 0.0060 0.4098 0.9122 0.9070 0.6305 0.5378 0.8329 1.3380 0.5150 126 | [2020/02/01 01:09:00] - 123 275.7 51.4 0.0060 0.4021 0.9126 0.8993 0.6332 0.5564 0.8327 1.3324 0.5179 127 | [2020/02/01 01:14:27] - 124 276.2 51.3 0.0060 0.3957 0.9156 0.8947 0.6338 0.5483 0.8375 1.3144 0.5217 128 | [2020/02/01 01:19:56] - 125 276.6 51.6 0.0060 0.3903 0.9159 0.8884 0.6345 0.5536 0.8306 1.3261 0.5187 129 | [2020/02/01 01:25:23] - 126 276.6 51.2 0.0060 0.3850 0.9184 0.8819 0.6378 0.5713 0.8355 1.3141 0.5253 130 | [2020/02/01 01:30:48] - 127 274.1 50.7 0.0060 0.3804 0.9203 0.8767 0.6410 0.5355 0.8431 1.3814 0.5098 131 | [2020/02/01 01:36:14] - 128 274.3 51.2 0.0060 0.3716 0.9236 0.8681 0.6415 0.5343 0.8384 1.4096 0.5029 132 | [2020/02/01 01:41:40] - 129 275.7 51.1 0.0060 0.3687 0.9243 0.8650 0.6428 0.5397 0.8345 1.3788 0.5145 133 | [2020/02/01 01:47:08] - 130 275.2 51.3 0.0060 0.3627 0.9254 0.8562 0.6458 0.5465 0.8420 1.3572 0.5103 134 | [2020/02/01 01:52:35] - 131 275.8 51.3 0.0060 0.3604 0.9262 0.8543 0.6476 0.5242 0.8372 1.4504 0.4923 135 | [2020/02/01 01:58:02] - 132 276.0 51.4 0.0060 0.3533 0.9290 0.8472 0.6491 0.5328 0.8415 1.3797 0.5111 136 | [2020/02/01 02:03:28] - 133 274.8 51.1 0.0060 0.3497 0.9310 0.8452 0.6485 0.5284 0.8368 1.4267 0.5034 137 | [2020/02/01 02:08:55] - 134 275.1 51.5 0.0060 0.3441 0.9314 0.8404 0.6510 0.5185 0.8361 1.4255 0.5046 138 | [2020/02/01 02:14:23] - 135 276.2 51.4 0.0060 0.3413 0.9330 0.8354 0.6507 0.5446 0.8371 1.4098 0.5060 139 | [2020/02/01 02:19:52] - 136 277.3 51.6 0.0060 0.3354 0.9338 0.8292 0.6542 0.5472 0.8342 1.3860 0.5109 140 | [2020/02/01 02:25:19] - 137 276.2 51.0 0.0060 0.3340 0.9356 0.8306 0.6559 0.5475 0.8313 1.3979 0.5092 141 | [2020/02/01 02:30:45] - 138 275.0 51.1 0.0060 0.3301 0.9355 0.8229 0.6541 0.5364 0.8331 1.4648 0.4981 142 | [2020/02/01 02:36:13] - 139 276.5 51.4 0.0060 0.3281 0.9381 0.8226 0.6559 0.5410 0.8413 1.4184 0.5002 143 | [2020/02/01 02:41:41] - 140 276.3 51.2 0.0060 0.3228 0.9384 0.8158 0.6610 0.5237 0.8353 1.4345 0.5013 144 | [2020/02/01 02:47:09] - 141 276.1 51.6 0.0060 0.3192 0.9396 0.8130 0.6597 0.5254 0.8272 1.4591 0.5022 145 | [2020/02/01 02:52:37] - 142 276.5 51.5 0.0060 0.3142 0.9416 0.8082 0.6628 0.5308 0.8288 1.4705 0.4936 146 | [2020/02/01 02:58:05] - 143 276.6 51.2 0.0060 0.3119 0.9421 0.8056 0.6616 0.5303 0.8367 1.5022 0.4954 147 | [2020/02/01 03:03:33] - 144 276.8 51.5 0.0060 0.3081 0.9431 0.8000 0.6655 0.5292 0.8363 1.4698 0.4945 148 | [2020/02/01 03:09:01] - 145 277.0 51.4 0.0060 0.3033 0.9443 0.7965 0.6660 0.5287 0.8328 1.4920 0.4946 149 | [2020/02/01 03:14:27] - 146 274.8 51.0 0.0060 0.3017 0.9445 0.7904 0.6658 0.5171 0.8379 1.5101 0.4886 150 | [2020/02/01 03:19:54] - 147 275.7 51.2 0.0060 0.2984 0.9458 0.7862 0.6710 0.5522 0.8291 1.4689 0.4991 151 | [2020/02/01 03:25:22] - 148 276.5 51.8 0.0060 0.2914 0.9484 0.7800 0.6709 0.5231 0.8384 1.5413 0.4849 152 | [2020/02/01 03:30:52] - 149 277.8 51.6 0.0060 0.2906 0.9485 0.7814 0.6703 0.5522 0.8304 1.4510 0.5007 153 | -------------------------------------------------------------------------------- /experiments/cifar10_droplr/preactresnet_0.008/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/31 09:24:15] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=150, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_droplr/preactresnet_0.008', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.1, lr_one_drop=0.008, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/31 09:24:19] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/31 09:29:59] - 0 285.8 53.4 0.1000 1.9722 0.2787 2.1588 0.1981 1.7563 0.3742 1.9690 0.2620 4 | [2020/01/31 09:35:40] - 1 287.2 53.6 0.1000 1.7009 0.3875 1.9720 0.2589 1.5822 0.4403 1.8638 0.3089 5 | [2020/01/31 09:41:20] - 2 286.1 53.5 0.1000 1.5779 0.4450 1.8961 0.2888 1.5483 0.4570 1.8499 0.3098 6 | [2020/01/31 09:47:02] - 3 287.8 53.9 0.1000 1.4861 0.4817 1.8371 0.3048 1.4113 0.5204 1.7466 0.3470 7 | [2020/01/31 09:52:44] - 4 287.4 53.6 0.1000 1.3999 0.5263 1.7865 0.3269 1.3811 0.5350 1.7308 0.3559 8 | [2020/01/31 09:58:24] - 5 286.6 53.8 0.1000 1.3302 0.5565 1.7425 0.3432 1.2867 0.5636 1.6638 0.3771 9 | [2020/01/31 10:04:06] - 6 287.3 54.0 0.1000 1.2765 0.5856 1.7064 0.3562 1.1868 0.6047 1.6139 0.3988 10 | [2020/01/31 10:09:48] - 7 287.6 53.3 0.1000 1.2233 0.6070 1.6731 0.3671 1.1616 0.6252 1.6055 0.3897 11 | [2020/01/31 10:15:27] - 8 286.2 53.6 0.1000 1.1837 0.6274 1.6467 0.3752 1.1276 0.6277 1.5773 0.3978 12 | [2020/01/31 10:21:10] - 9 288.5 53.8 0.1000 1.1530 0.6402 1.6285 0.3823 1.0864 0.6510 1.5704 0.4109 13 | [2020/01/31 10:26:53] - 10 288.4 53.9 0.1000 1.1242 0.6516 1.6094 0.3926 1.1075 0.6223 1.5671 0.4082 14 | [2020/01/31 10:32:35] - 11 288.3 53.9 0.1000 1.1008 0.6620 1.5926 0.3984 1.1344 0.6467 1.5610 0.4175 15 | [2020/01/31 10:38:18] - 12 288.3 53.7 0.1000 1.0826 0.6710 1.5824 0.3994 1.0537 0.6724 1.5320 0.4190 16 | [2020/01/31 10:44:01] - 13 288.5 53.8 0.1000 1.0664 0.6799 1.5705 0.4077 1.0372 0.6659 1.5507 0.4173 17 | [2020/01/31 10:49:42] - 14 287.8 53.6 0.1000 1.0439 0.6871 1.5549 0.4112 1.0184 0.6804 1.5303 0.4175 18 | [2020/01/31 10:55:24] - 15 288.2 53.7 0.1000 1.0300 0.6953 1.5437 0.4138 1.0033 0.6917 1.5045 0.4379 19 | [2020/01/31 11:01:07] - 16 288.9 53.8 0.1000 1.0166 0.6995 1.5343 0.4181 1.0131 0.6544 1.5036 0.4392 20 | [2020/01/31 11:06:50] - 17 288.3 53.7 0.1000 1.0029 0.7043 1.5255 0.4208 1.0244 0.6885 1.4926 0.4482 21 | [2020/01/31 11:12:32] - 18 288.6 53.8 0.1000 0.9953 0.7062 1.5206 0.4222 0.9568 0.7035 1.5178 0.4257 22 | [2020/01/31 11:18:14] - 19 288.2 53.8 0.1000 0.9805 0.7148 1.5081 0.4275 0.9820 0.7014 1.4986 0.4379 23 | [2020/01/31 11:23:57] - 20 288.1 53.7 0.1000 0.9720 0.7186 1.5027 0.4310 0.9883 0.6900 1.4976 0.4405 24 | [2020/01/31 11:29:40] - 21 289.0 53.8 0.1000 0.9681 0.7168 1.4978 0.4306 1.0049 0.6799 1.4912 0.4401 25 | [2020/01/31 11:35:22] - 22 288.6 53.7 0.1000 0.9586 0.7232 1.4927 0.4337 0.9674 0.7057 1.4789 0.4465 26 | [2020/01/31 11:41:05] - 23 288.7 53.9 0.1000 0.9512 0.7268 1.4905 0.4336 0.9488 0.7137 1.4573 0.4450 27 | [2020/01/31 11:46:47] - 24 288.5 53.8 0.1000 0.9432 0.7308 1.4816 0.4370 0.9880 0.6813 1.4555 0.4539 28 | [2020/01/31 11:52:31] - 25 289.0 54.2 0.1000 0.9321 0.7348 1.4753 0.4395 0.9443 0.7046 1.4633 0.4530 29 | [2020/01/31 11:58:14] - 26 289.5 54.1 0.1000 0.9299 0.7336 1.4719 0.4392 0.9525 0.7120 1.4745 0.4435 30 | [2020/01/31 12:03:57] - 27 289.1 53.8 0.1000 0.9256 0.7377 1.4695 0.4401 0.9703 0.7084 1.4668 0.4548 31 | [2020/01/31 12:09:40] - 28 288.4 53.8 0.1000 0.9224 0.7374 1.4668 0.4437 0.9591 0.7029 1.4731 0.4466 32 | [2020/01/31 12:15:23] - 29 288.9 54.1 0.1000 0.9192 0.7398 1.4625 0.4433 0.9235 0.7302 1.4422 0.4567 33 | [2020/01/31 12:21:07] - 30 288.8 54.0 0.1000 0.9114 0.7436 1.4582 0.4435 0.9398 0.7161 1.4617 0.4475 34 | [2020/01/31 12:26:48] - 31 287.6 53.7 0.1000 0.9070 0.7457 1.4557 0.4469 0.9468 0.7287 1.4633 0.4611 35 | [2020/01/31 12:32:31] - 32 288.3 53.7 0.1000 0.9016 0.7460 1.4504 0.4488 0.9197 0.7270 1.4752 0.4519 36 | [2020/01/31 12:38:12] - 33 287.9 53.7 0.1000 0.9030 0.7444 1.4516 0.4482 0.9064 0.7288 1.4497 0.4657 37 | [2020/01/31 12:43:56] - 34 289.3 54.0 0.1000 0.8953 0.7479 1.4472 0.4493 0.8785 0.7394 1.4138 0.4727 38 | [2020/01/31 12:49:38] - 35 287.7 53.8 0.1000 0.8928 0.7508 1.4444 0.4498 0.9361 0.7241 1.4719 0.4562 39 | [2020/01/31 12:55:20] - 36 288.3 53.7 0.1000 0.8935 0.7491 1.4446 0.4509 0.9420 0.7278 1.4560 0.4547 40 | [2020/01/31 13:01:03] - 37 289.2 53.7 0.1000 0.8889 0.7519 1.4439 0.4525 0.9190 0.7359 1.4471 0.4617 41 | [2020/01/31 13:06:45] - 38 288.1 53.6 0.1000 0.8850 0.7529 1.4394 0.4522 0.9263 0.7237 1.4536 0.4560 42 | [2020/01/31 13:12:27] - 39 288.6 53.9 0.1000 0.8817 0.7556 1.4375 0.4533 0.8935 0.7230 1.4503 0.4461 43 | [2020/01/31 13:18:11] - 40 289.0 54.1 0.1000 0.8802 0.7561 1.4354 0.4544 0.9297 0.7225 1.4427 0.4623 44 | [2020/01/31 13:23:55] - 41 290.0 54.0 0.1000 0.8768 0.7586 1.4354 0.4549 0.8947 0.7359 1.4195 0.4683 45 | [2020/01/31 13:29:36] - 42 287.5 53.8 0.1000 0.8786 0.7573 1.4371 0.4543 0.9231 0.7110 1.4456 0.4671 46 | [2020/01/31 13:35:19] - 43 289.0 53.7 0.1000 0.8742 0.7590 1.4326 0.4530 0.8883 0.7278 1.4561 0.4450 47 | [2020/01/31 13:41:02] - 44 288.6 54.0 0.1000 0.8734 0.7584 1.4316 0.4561 0.9221 0.7311 1.4330 0.4748 48 | [2020/01/31 13:46:45] - 45 288.8 53.8 0.1000 0.8672 0.7603 1.4254 0.4589 0.8601 0.7228 1.4559 0.4483 49 | [2020/01/31 13:52:28] - 46 289.3 54.0 0.1000 0.8707 0.7597 1.4284 0.4570 0.8617 0.7258 1.4208 0.4716 50 | [2020/01/31 13:58:11] - 47 289.5 53.9 0.1000 0.8655 0.7632 1.4250 0.4586 0.8788 0.7254 1.4273 0.4615 51 | [2020/01/31 14:03:55] - 48 289.7 54.0 0.1000 0.8653 0.7627 1.4286 0.4562 0.9100 0.7204 1.4644 0.4555 52 | [2020/01/31 14:09:38] - 49 289.3 54.0 0.1000 0.8615 0.7627 1.4215 0.4592 0.9174 0.7192 1.4484 0.4586 53 | [2020/01/31 14:15:23] - 50 289.5 54.0 0.1000 0.8571 0.7659 1.4176 0.4594 0.8671 0.7174 1.4307 0.4612 54 | [2020/01/31 14:21:06] - 51 289.0 54.1 0.1000 0.8553 0.7673 1.4192 0.4592 0.8844 0.7213 1.4378 0.4623 55 | [2020/01/31 14:26:48] - 52 288.5 53.5 0.1000 0.8549 0.7670 1.4171 0.4606 0.8532 0.7368 1.4173 0.4606 56 | [2020/01/31 14:32:30] - 53 288.5 53.7 0.1000 0.8617 0.7632 1.4226 0.4608 0.8969 0.7270 1.4343 0.4654 57 | [2020/01/31 14:38:13] - 54 289.1 53.8 0.1000 0.8522 0.7664 1.4148 0.4605 0.8269 0.7477 1.4620 0.4494 58 | [2020/01/31 14:43:56] - 55 288.7 53.9 0.1000 0.8523 0.7693 1.4159 0.4603 0.8919 0.7397 1.4491 0.4671 59 | [2020/01/31 14:49:39] - 56 289.3 54.0 0.1000 0.8542 0.7660 1.4157 0.4585 0.8383 0.7419 1.4339 0.4566 60 | [2020/01/31 14:55:22] - 57 289.4 54.1 0.1000 0.8429 0.7708 1.4059 0.4634 0.8571 0.7314 1.4309 0.4682 61 | [2020/01/31 15:01:05] - 58 289.2 53.8 0.1000 0.8565 0.7655 1.4179 0.4600 0.8606 0.7527 1.4228 0.4683 62 | [2020/01/31 15:06:48] - 59 288.6 53.9 0.1000 0.8447 0.7719 1.4111 0.4603 0.8846 0.7365 1.4098 0.4715 63 | [2020/01/31 15:12:31] - 60 288.5 53.9 0.1000 0.8505 0.7679 1.4144 0.4616 0.8650 0.7534 1.4109 0.4674 64 | [2020/01/31 15:18:15] - 61 289.3 54.0 0.1000 0.8457 0.7736 1.4105 0.4630 0.8424 0.7450 1.4111 0.4764 65 | [2020/01/31 15:23:59] - 62 289.9 53.8 0.1000 0.8421 0.7729 1.4098 0.4635 0.8650 0.7325 1.4157 0.4674 66 | [2020/01/31 15:29:41] - 63 288.6 53.5 0.1000 0.8426 0.7725 1.4075 0.4625 0.8678 0.7434 1.4161 0.4737 67 | [2020/01/31 15:35:23] - 64 288.6 53.6 0.1000 0.8450 0.7736 1.4115 0.4621 0.8577 0.7441 1.4237 0.4668 68 | [2020/01/31 15:41:06] - 65 289.1 54.2 0.1000 0.8446 0.7711 1.4100 0.4614 0.8422 0.7383 1.4383 0.4639 69 | [2020/01/31 15:46:50] - 66 290.2 53.8 0.1000 0.8387 0.7746 1.4044 0.4638 0.8602 0.7350 1.4336 0.4616 70 | [2020/01/31 15:52:35] - 67 290.1 54.1 0.1000 0.8383 0.7757 1.4056 0.4624 0.8276 0.7609 1.4356 0.4595 71 | [2020/01/31 15:58:18] - 68 289.0 54.0 0.1000 0.8330 0.7751 1.4007 0.4652 0.9050 0.7322 1.4499 0.4616 72 | [2020/01/31 16:04:01] - 69 289.2 54.1 0.1000 0.8385 0.7727 1.4043 0.4644 0.8675 0.7406 1.4566 0.4515 73 | [2020/01/31 16:09:45] - 70 288.9 53.9 0.1000 0.8341 0.7750 1.4031 0.4660 0.8727 0.7174 1.4266 0.4667 74 | [2020/01/31 16:15:28] - 71 289.4 54.0 0.1000 0.8371 0.7751 1.4057 0.4652 0.8528 0.7483 1.3960 0.4788 75 | [2020/01/31 16:21:12] - 72 289.7 53.9 0.1000 0.8347 0.7776 1.4039 0.4663 0.8576 0.7470 1.4380 0.4553 76 | [2020/01/31 16:26:53] - 73 287.5 53.6 0.1000 0.8334 0.7751 1.4024 0.4666 0.8845 0.7408 1.4429 0.4627 77 | [2020/01/31 16:32:35] - 74 288.1 53.6 0.1000 0.8384 0.7732 1.4058 0.4649 0.8630 0.7216 1.4188 0.4748 78 | [2020/01/31 16:38:17] - 75 288.2 53.6 0.1000 0.8312 0.7771 1.3975 0.4676 0.8209 0.7595 1.4045 0.4713 79 | [2020/01/31 16:43:59] - 76 288.2 53.6 0.1000 0.8316 0.7756 1.4018 0.4663 0.8436 0.7493 1.4104 0.4743 80 | [2020/01/31 16:49:41] - 77 288.3 53.9 0.1000 0.8358 0.7766 1.4040 0.4634 0.8564 0.7256 1.4215 0.4659 81 | [2020/01/31 16:55:24] - 78 289.4 54.0 0.1000 0.8306 0.7759 1.3995 0.4666 0.8706 0.7485 1.4421 0.4577 82 | [2020/01/31 17:01:07] - 79 289.2 53.8 0.1000 0.8288 0.7782 1.3968 0.4673 0.8450 0.7451 1.4285 0.4698 83 | [2020/01/31 17:06:50] - 80 288.5 53.8 0.1000 0.8303 0.7766 1.3972 0.4713 0.8796 0.7351 1.4357 0.4672 84 | [2020/01/31 17:12:34] - 81 289.7 53.9 0.1000 0.8286 0.7777 1.3969 0.4666 0.8699 0.7426 1.4121 0.4712 85 | [2020/01/31 17:18:17] - 82 289.0 53.7 0.1000 0.8282 0.7793 1.3973 0.4661 0.8296 0.7592 1.4041 0.4718 86 | [2020/01/31 17:24:00] - 83 288.9 54.1 0.1000 0.8255 0.7796 1.3947 0.4690 0.8491 0.7363 1.4321 0.4624 87 | [2020/01/31 17:29:41] - 84 287.6 53.8 0.1000 0.8287 0.7793 1.3987 0.4668 0.8875 0.7342 1.4178 0.4679 88 | [2020/01/31 17:35:24] - 85 288.8 53.8 0.1000 0.8259 0.7795 1.3961 0.4692 0.8451 0.7353 1.4069 0.4761 89 | [2020/01/31 17:41:05] - 86 287.7 53.5 0.1000 0.8237 0.7770 1.3945 0.4687 0.8913 0.7269 1.4563 0.4456 90 | [2020/01/31 17:46:47] - 87 288.8 53.7 0.1000 0.8233 0.7826 1.3931 0.4691 0.8374 0.7499 1.4274 0.4680 91 | [2020/01/31 17:52:30] - 88 288.9 53.8 0.1000 0.8241 0.7794 1.3938 0.4671 0.8921 0.7151 1.4524 0.4494 92 | [2020/01/31 17:58:14] - 89 289.7 53.7 0.1000 0.8250 0.7761 1.3947 0.4692 0.8740 0.7598 1.4018 0.4741 93 | [2020/01/31 18:03:57] - 90 289.1 53.7 0.1000 0.8160 0.7829 1.3882 0.4716 0.8725 0.7515 1.4110 0.4681 94 | [2020/01/31 18:09:41] - 91 289.4 54.1 0.1000 0.8230 0.7803 1.3917 0.4694 0.8190 0.7706 1.4104 0.4705 95 | [2020/01/31 18:15:23] - 92 288.4 53.9 0.1000 0.8217 0.7809 1.3938 0.4693 0.8790 0.7322 1.4109 0.4686 96 | [2020/01/31 18:21:06] - 93 289.1 53.9 0.1000 0.8221 0.7805 1.3911 0.4682 0.8000 0.7678 1.4239 0.4633 97 | [2020/01/31 18:26:49] - 94 289.1 53.6 0.1000 0.8228 0.7796 1.3923 0.4698 0.8378 0.7516 1.4198 0.4675 98 | [2020/01/31 18:32:31] - 95 288.6 53.8 0.1000 0.8172 0.7839 1.3904 0.4679 0.8765 0.7285 1.4236 0.4586 99 | [2020/01/31 18:38:12] - 96 287.9 53.4 0.1000 0.8219 0.7804 1.3906 0.4706 0.8323 0.7586 1.4419 0.4499 100 | [2020/01/31 18:43:55] - 97 288.4 53.9 0.1000 0.8164 0.7843 1.3883 0.4696 0.8827 0.7315 1.4366 0.4612 101 | [2020/01/31 18:49:36] - 98 287.4 53.9 0.1000 0.8190 0.7823 1.3924 0.4691 0.8584 0.7410 1.4159 0.4679 102 | [2020/01/31 18:55:18] - 99 288.3 53.8 0.0080 0.8190 0.7818 1.3881 0.4713 0.8395 0.7351 1.4258 0.4621 103 | [2020/01/31 19:01:02] - 100 289.1 53.6 0.0080 0.7119 0.8149 1.2017 0.5350 0.6656 0.8032 1.2742 0.5200 104 | [2020/01/31 19:06:45] - 101 288.7 53.6 0.0080 0.6317 0.8381 1.1386 0.5557 0.6543 0.8130 1.2414 0.5266 105 | [2020/01/31 19:12:26] - 102 287.9 53.5 0.0080 0.6075 0.8467 1.1121 0.5655 0.6509 0.8182 1.2467 0.5323 106 | [2020/01/31 19:18:09] - 103 288.0 53.8 0.0080 0.5839 0.8534 1.0890 0.5733 0.6170 0.8266 1.2413 0.5320 107 | [2020/01/31 19:23:52] - 104 289.5 54.0 0.0080 0.5663 0.8591 1.0757 0.5769 0.6304 0.8145 1.2624 0.5247 108 | [2020/01/31 19:29:36] - 105 289.7 54.0 0.0080 0.5489 0.8642 1.0573 0.5837 0.6429 0.8158 1.2309 0.5389 109 | [2020/01/31 19:35:19] - 106 289.0 54.1 0.0080 0.5380 0.8682 1.0449 0.5866 0.6150 0.8284 1.2335 0.5386 110 | [2020/01/31 19:41:00] - 107 287.2 53.7 0.0080 0.5255 0.8729 1.0323 0.5900 0.5900 0.8285 1.2489 0.5290 111 | [2020/01/31 19:46:43] - 108 289.1 54.1 0.0080 0.5137 0.8763 1.0189 0.5944 0.5787 0.8345 1.2491 0.5259 112 | [2020/01/31 19:52:26] - 109 289.4 53.7 0.0080 0.5016 0.8810 1.0094 0.5982 0.5779 0.8368 1.2717 0.5204 113 | [2020/01/31 19:58:11] - 110 289.3 54.0 0.0080 0.4932 0.8839 1.0039 0.5971 0.5868 0.8303 1.2461 0.5290 114 | [2020/01/31 20:03:52] - 111 287.9 53.7 0.0080 0.4843 0.8876 0.9935 0.6030 0.5846 0.8227 1.2780 0.5220 115 | [2020/01/31 20:09:34] - 112 287.6 53.8 0.0080 0.4724 0.8895 0.9815 0.6074 0.5584 0.8351 1.2931 0.5170 116 | [2020/01/31 20:15:16] - 113 289.1 53.7 0.0080 0.4660 0.8933 0.9753 0.6079 0.6019 0.8232 1.2615 0.5278 117 | [2020/01/31 20:20:58] - 114 288.1 53.6 0.0080 0.4561 0.8965 0.9656 0.6121 0.5609 0.8300 1.3089 0.5172 118 | [2020/01/31 20:26:41] - 115 288.9 53.8 0.0080 0.4508 0.8985 0.9614 0.6128 0.5508 0.8420 1.3143 0.5143 119 | [2020/01/31 20:32:24] - 116 289.1 54.0 0.0080 0.4436 0.8998 0.9554 0.6157 0.6052 0.8233 1.2843 0.5260 120 | [2020/01/31 20:38:07] - 117 289.5 53.9 0.0080 0.4358 0.9035 0.9445 0.6189 0.5450 0.8322 1.3382 0.5152 121 | [2020/01/31 20:43:50] - 118 288.7 54.0 0.0080 0.4291 0.9054 0.9392 0.6199 0.5784 0.8273 1.3085 0.5168 122 | [2020/01/31 20:49:33] - 119 289.0 53.9 0.0080 0.4238 0.9073 0.9338 0.6196 0.5888 0.8306 1.3522 0.5062 123 | [2020/01/31 20:55:17] - 120 289.1 54.0 0.0080 0.4186 0.9080 0.9271 0.6221 0.5669 0.8382 1.3025 0.5177 124 | [2020/01/31 21:01:00] - 121 289.6 53.6 0.0080 0.4098 0.9122 0.9215 0.6255 0.5542 0.8292 1.3628 0.5105 125 | [2020/01/31 21:06:42] - 122 288.4 53.5 0.0080 0.4056 0.9148 0.9159 0.6258 0.5430 0.8282 1.3881 0.5040 126 | [2020/01/31 21:12:25] - 123 288.8 53.9 0.0080 0.4005 0.9133 0.9110 0.6285 0.5626 0.8345 1.3598 0.5090 127 | [2020/01/31 21:18:07] - 124 288.6 53.8 0.0080 0.3957 0.9179 0.9085 0.6273 0.5524 0.8395 1.3281 0.5149 128 | [2020/01/31 21:23:50] - 125 288.9 53.8 0.0080 0.3910 0.9178 0.9017 0.6299 0.5677 0.8226 1.3576 0.5087 129 | [2020/01/31 21:29:31] - 126 287.8 53.6 0.0080 0.3860 0.9200 0.8965 0.6307 0.5897 0.8270 1.3486 0.5091 130 | [2020/01/31 21:35:14] - 127 289.1 54.0 0.0080 0.3831 0.9201 0.8930 0.6351 0.5587 0.8320 1.4062 0.5042 131 | [2020/01/31 21:40:57] - 128 288.7 54.0 0.0080 0.3772 0.9234 0.8873 0.6355 0.5561 0.8360 1.4170 0.4992 132 | [2020/01/31 21:46:40] - 129 289.0 54.0 0.0080 0.3734 0.9237 0.8865 0.6354 0.5537 0.8329 1.4039 0.5031 133 | [2020/01/31 21:52:25] - 130 290.0 53.9 0.0080 0.3675 0.9268 0.8768 0.6380 0.5629 0.8354 1.3622 0.5085 134 | [2020/01/31 21:58:05] - 131 286.9 53.5 0.0080 0.3651 0.9269 0.8755 0.6400 0.5407 0.8344 1.4785 0.4834 135 | [2020/01/31 22:03:48] - 132 288.7 54.0 0.0080 0.3604 0.9289 0.8720 0.6413 0.5524 0.8350 1.4031 0.5020 136 | [2020/01/31 22:09:31] - 133 289.4 53.9 0.0080 0.3589 0.9300 0.8701 0.6393 0.5406 0.8332 1.4181 0.4969 137 | [2020/01/31 22:15:14] - 134 289.0 53.8 0.0080 0.3510 0.9308 0.8631 0.6425 0.5357 0.8355 1.4572 0.4915 138 | [2020/01/31 22:20:57] - 135 288.6 54.0 0.0080 0.3509 0.9325 0.8638 0.6410 0.5554 0.8330 1.4070 0.5012 139 | [2020/01/31 22:26:39] - 136 288.7 53.8 0.0080 0.3461 0.9334 0.8558 0.6455 0.5664 0.8301 1.3805 0.5033 140 | [2020/01/31 22:32:21] - 137 288.4 53.7 0.0080 0.3422 0.9339 0.8527 0.6475 0.5565 0.8318 1.3964 0.5086 141 | [2020/01/31 22:38:03] - 138 288.3 53.9 0.0080 0.3385 0.9350 0.8469 0.6462 0.5436 0.8319 1.4878 0.4847 142 | [2020/01/31 22:43:46] - 139 289.0 54.0 0.0080 0.3378 0.9362 0.8490 0.6443 0.5593 0.8358 1.4323 0.4957 143 | [2020/01/31 22:49:29] - 140 287.7 53.9 0.0080 0.3320 0.9387 0.8422 0.6507 0.5323 0.8346 1.4301 0.4999 144 | [2020/01/31 22:55:12] - 141 288.9 53.9 0.0080 0.3288 0.9386 0.8355 0.6506 0.5495 0.8226 1.4612 0.4937 145 | [2020/01/31 23:00:55] - 142 289.6 54.0 0.0080 0.3231 0.9399 0.8322 0.6526 0.5363 0.8287 1.4654 0.4896 146 | [2020/01/31 23:06:37] - 143 289.0 53.4 0.0080 0.3228 0.9413 0.8316 0.6531 0.5327 0.8363 1.4997 0.4832 147 | [2020/01/31 23:12:19] - 144 288.2 53.7 0.0080 0.3194 0.9406 0.8277 0.6545 0.5279 0.8374 1.4840 0.4872 148 | [2020/01/31 23:18:02] - 145 288.9 54.1 0.0080 0.3166 0.9430 0.8262 0.6531 0.5417 0.8309 1.4541 0.4988 149 | [2020/01/31 23:23:47] - 146 290.3 54.2 0.0080 0.3143 0.9425 0.8198 0.6559 0.5344 0.8362 1.5091 0.4846 150 | [2020/01/31 23:29:30] - 147 289.0 54.1 0.0080 0.3104 0.9450 0.8158 0.6583 0.5668 0.8280 1.4597 0.4875 151 | [2020/01/31 23:35:13] - 148 289.1 54.0 0.0080 0.3051 0.9464 0.8094 0.6597 0.5336 0.8386 1.5225 0.4783 152 | [2020/01/31 23:40:54] - 149 287.4 53.7 0.0080 0.3026 0.9455 0.8094 0.6582 0.5652 0.8299 1.4379 0.4988 153 | -------------------------------------------------------------------------------- /experiments/cifar10_startlr/preactresnet_0.3/output.log: -------------------------------------------------------------------------------- 1 | [2020/01/29 13:22:47] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=110, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_startlr/preactresnet_0.3', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.3, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=0, seed=0, val=False, width_factor=10) 2 | [2020/01/29 13:22:51] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 3 | [2020/01/29 13:28:12] - 0 269.4 50.9 0.3000 2.1152 0.2153 2.2408 0.1675 1.9912 0.2589 2.1332 0.1989 4 | [2020/01/29 13:33:39] - 1 275.5 51.4 0.3000 1.8502 0.3238 2.0557 0.2310 1.6960 0.3932 1.9553 0.2773 5 | [2020/01/29 13:39:07] - 2 275.9 51.7 0.3000 1.6751 0.3997 1.9559 0.2660 1.6229 0.4251 1.9035 0.2882 6 | [2020/01/29 13:44:35] - 3 275.9 51.5 0.3000 1.5675 0.4461 1.8961 0.2853 1.4742 0.4876 1.7964 0.3150 7 | [2020/01/29 13:50:02] - 4 275.6 51.5 0.3000 1.4919 0.4858 1.8561 0.3016 1.4769 0.4651 1.7947 0.3281 8 | [2020/01/29 13:55:30] - 5 275.8 51.4 0.3000 1.4342 0.5129 1.8236 0.3144 1.4157 0.5039 1.7544 0.3497 9 | [2020/01/29 14:00:57] - 6 275.5 51.4 0.3000 1.3954 0.5326 1.8009 0.3230 1.2972 0.5677 1.7034 0.3632 10 | [2020/01/29 14:06:24] - 7 274.9 51.1 0.3000 1.3566 0.5485 1.7782 0.3290 1.3422 0.5463 1.7393 0.3495 11 | [2020/01/29 14:11:50] - 8 275.1 51.1 0.3000 1.3342 0.5594 1.7664 0.3311 1.2832 0.5500 1.7147 0.3510 12 | [2020/01/29 14:17:18] - 9 276.4 51.4 0.3000 1.3064 0.5738 1.7521 0.3408 1.2336 0.5907 1.7108 0.3593 13 | [2020/01/29 14:22:46] - 10 276.0 51.4 0.3000 1.2901 0.5814 1.7420 0.3432 1.2585 0.5676 1.6857 0.3720 14 | [2020/01/29 14:28:14] - 11 275.9 51.4 0.3000 1.2764 0.5858 1.7327 0.3484 1.2913 0.5669 1.6958 0.3689 15 | [2020/01/29 14:33:42] - 12 276.5 51.3 0.3000 1.2707 0.5915 1.7332 0.3471 1.2430 0.6054 1.6546 0.3802 16 | [2020/01/29 14:39:10] - 13 275.9 51.6 0.3000 1.2562 0.5954 1.7233 0.3527 1.2135 0.5813 1.6966 0.3612 17 | [2020/01/29 14:44:38] - 14 276.5 51.5 0.3000 1.2443 0.6026 1.7156 0.3538 1.2618 0.5720 1.7050 0.3550 18 | [2020/01/29 14:50:05] - 15 275.9 51.3 0.3000 1.2368 0.6083 1.7113 0.3534 1.1636 0.6213 1.6277 0.3870 19 | [2020/01/29 14:55:34] - 16 276.6 51.7 0.3000 1.2281 0.6136 1.7071 0.3575 1.1973 0.5593 1.6446 0.3858 20 | [2020/01/29 15:01:01] - 17 276.2 51.4 0.3000 1.2242 0.6119 1.7046 0.3592 1.2468 0.5885 1.6565 0.3731 21 | [2020/01/29 15:06:29] - 18 276.6 51.4 0.3000 1.2216 0.6102 1.7047 0.3557 1.2046 0.5850 1.6717 0.3648 22 | [2020/01/29 15:11:58] - 19 277.0 51.6 0.3000 1.2097 0.6188 1.6960 0.3630 1.2080 0.6142 1.6657 0.3700 23 | [2020/01/29 15:17:27] - 20 277.2 51.5 0.3000 1.2051 0.6243 1.6933 0.3626 1.2053 0.6044 1.6456 0.3866 24 | [2020/01/29 15:22:56] - 21 276.8 51.4 0.3000 1.2057 0.6193 1.6921 0.3644 1.2292 0.5916 1.6295 0.3872 25 | [2020/01/29 15:28:23] - 22 276.2 51.3 0.3000 1.2001 0.6249 1.6889 0.3636 1.1810 0.6163 1.6469 0.3740 26 | [2020/01/29 15:33:51] - 23 276.3 51.3 0.3000 1.2017 0.6223 1.6939 0.3619 1.2008 0.5889 1.6248 0.3761 27 | [2020/01/29 15:39:18] - 24 275.8 51.3 0.3000 1.1900 0.6290 1.6827 0.3670 1.2127 0.5863 1.6142 0.3988 28 | [2020/01/29 15:44:46] - 25 276.3 51.4 0.3000 1.1898 0.6300 1.6844 0.3661 1.1926 0.6035 1.6268 0.3912 29 | [2020/01/29 15:50:14] - 26 276.0 51.5 0.3000 1.1853 0.6281 1.6812 0.3647 1.2003 0.5835 1.6530 0.3875 30 | [2020/01/29 15:55:42] - 27 277.0 51.4 0.3000 1.1855 0.6305 1.6819 0.3669 1.2117 0.6120 1.6307 0.3884 31 | [2020/01/29 16:01:10] - 28 276.4 51.3 0.3000 1.1890 0.6279 1.6845 0.3662 1.1662 0.6328 1.6177 0.3997 32 | [2020/01/29 16:06:38] - 29 276.3 51.5 0.3000 1.1868 0.6307 1.6806 0.3667 1.1805 0.6024 1.6228 0.4013 33 | [2020/01/29 16:12:08] - 30 277.0 51.5 0.3000 1.1839 0.6303 1.6804 0.3661 1.2104 0.5859 1.6678 0.3722 34 | [2020/01/29 16:17:36] - 31 276.5 51.5 0.3000 1.1778 0.6349 1.6772 0.3672 1.1843 0.6280 1.6221 0.3948 35 | [2020/01/29 16:23:05] - 32 277.3 51.5 0.3000 1.1755 0.6367 1.6756 0.3671 1.1715 0.6470 1.6103 0.3991 36 | [2020/01/29 16:28:34] - 33 277.5 51.9 0.3000 1.1764 0.6341 1.6765 0.3714 1.2078 0.6078 1.6507 0.3800 37 | [2020/01/29 16:34:02] - 34 276.4 51.6 0.3000 1.1720 0.6364 1.6736 0.3694 1.1296 0.6348 1.5801 0.4008 38 | [2020/01/29 16:39:30] - 35 276.6 51.5 0.3000 1.1671 0.6399 1.6711 0.3717 1.1779 0.6366 1.6295 0.3954 39 | [2020/01/29 16:44:59] - 36 277.1 51.6 0.3000 1.1730 0.6372 1.6724 0.3702 1.1888 0.6265 1.6321 0.3847 40 | [2020/01/29 16:50:28] - 37 277.4 51.6 0.3000 1.1671 0.6409 1.6732 0.3705 1.1549 0.6200 1.6188 0.3887 41 | [2020/01/29 16:55:56] - 38 276.5 51.5 0.3000 1.1675 0.6384 1.6721 0.3705 1.1725 0.6585 1.6173 0.3984 42 | [2020/01/29 17:01:24] - 39 276.6 51.5 0.3000 1.1662 0.6425 1.6710 0.3722 1.1523 0.6184 1.6146 0.3969 43 | [2020/01/29 17:06:53] - 40 276.5 51.4 0.3000 1.1637 0.6429 1.6689 0.3710 1.1591 0.6268 1.6197 0.3940 44 | [2020/01/29 17:12:21] - 41 276.4 51.5 0.3000 1.1629 0.6420 1.6689 0.3719 1.1621 0.6295 1.6123 0.4015 45 | [2020/01/29 17:17:49] - 42 275.9 51.4 0.3000 1.1681 0.6390 1.6729 0.3690 1.1649 0.6119 1.6230 0.4015 46 | [2020/01/29 17:23:16] - 43 275.8 51.6 0.3000 1.1669 0.6399 1.6714 0.3691 1.1573 0.6231 1.6463 0.3662 47 | [2020/01/29 17:28:44] - 44 276.7 51.5 0.3000 1.1621 0.6434 1.6693 0.3705 1.2011 0.6355 1.6160 0.4095 48 | [2020/01/29 17:34:13] - 45 277.0 51.5 0.3000 1.1612 0.6414 1.6673 0.3698 1.1262 0.6182 1.6245 0.3879 49 | [2020/01/29 17:39:41] - 46 277.0 51.5 0.3000 1.1677 0.6425 1.6731 0.3710 1.1530 0.6277 1.6105 0.3993 50 | [2020/01/29 17:45:09] - 47 276.4 51.4 0.3000 1.1598 0.6447 1.6645 0.3717 1.1724 0.6239 1.6316 0.3933 51 | [2020/01/29 17:50:38] - 48 276.8 51.5 0.3000 1.1642 0.6427 1.6721 0.3710 1.1741 0.6416 1.6419 0.3953 52 | [2020/01/29 17:56:06] - 49 276.9 51.3 0.3000 1.1612 0.6415 1.6659 0.3713 1.1715 0.6159 1.6262 0.3833 53 | [2020/01/29 18:01:35] - 50 276.8 51.4 0.3000 1.1569 0.6423 1.6637 0.3724 1.1409 0.6116 1.6161 0.3894 54 | [2020/01/29 18:07:02] - 51 275.7 51.5 0.3000 1.1602 0.6441 1.6688 0.3698 1.1954 0.6044 1.6148 0.4023 55 | [2020/01/29 18:12:30] - 52 276.9 51.3 0.3000 1.1563 0.6461 1.6657 0.3745 1.1247 0.6477 1.5892 0.4029 56 | [2020/01/29 18:17:58] - 53 276.2 51.3 0.3000 1.1620 0.6427 1.6688 0.3739 1.2341 0.5898 1.6926 0.3689 57 | [2020/01/29 18:23:25] - 54 276.1 51.3 0.3000 1.1570 0.6439 1.6643 0.3735 1.1317 0.6362 1.6530 0.3748 58 | [2020/01/29 18:28:52] - 55 275.7 51.1 0.3000 1.1601 0.6457 1.6673 0.3700 1.1775 0.6307 1.6255 0.3947 59 | [2020/01/29 18:34:19] - 56 276.0 51.3 0.3000 1.1603 0.6405 1.6660 0.3725 1.1486 0.6202 1.6294 0.3822 60 | [2020/01/29 18:39:47] - 57 276.3 51.1 0.3000 1.1520 0.6469 1.6604 0.3729 1.1538 0.6543 1.6161 0.4124 61 | [2020/01/29 18:45:15] - 58 276.6 51.4 0.3000 1.1606 0.6436 1.6671 0.3718 1.1116 0.6523 1.6329 0.3886 62 | [2020/01/29 18:50:43] - 59 276.5 51.4 0.3000 1.1543 0.6467 1.6642 0.3715 1.1432 0.6218 1.6063 0.3904 63 | [2020/01/29 18:56:10] - 60 275.3 51.0 0.3000 1.1623 0.6385 1.6683 0.3710 1.1753 0.6325 1.6337 0.3880 64 | [2020/01/29 19:01:37] - 61 276.0 51.3 0.3000 1.1588 0.6451 1.6678 0.3720 1.1437 0.6319 1.6046 0.4036 65 | [2020/01/29 19:07:04] - 62 275.4 51.1 0.3000 1.1490 0.6490 1.6617 0.3734 1.1578 0.6332 1.6086 0.3979 66 | [2020/01/29 19:12:30] - 63 274.9 51.3 0.3000 1.1538 0.6482 1.6651 0.3739 1.1722 0.6280 1.6144 0.4030 67 | [2020/01/29 19:17:56] - 64 275.0 51.3 0.3000 1.1599 0.6451 1.6696 0.3700 1.1692 0.6472 1.6128 0.4021 68 | [2020/01/29 19:23:23] - 65 275.6 51.4 0.3000 1.1511 0.6493 1.6618 0.3749 1.1547 0.6132 1.6466 0.3847 69 | [2020/01/29 19:28:50] - 66 275.4 51.4 0.3000 1.1594 0.6439 1.6677 0.3707 1.1722 0.6088 1.6588 0.3791 70 | [2020/01/29 19:34:18] - 67 276.4 51.4 0.3000 1.1514 0.6497 1.6621 0.3737 1.0975 0.6409 1.6017 0.3967 71 | [2020/01/29 19:39:45] - 68 275.8 51.2 0.3000 1.1454 0.6506 1.6582 0.3751 1.1895 0.6291 1.6352 0.3949 72 | [2020/01/29 19:45:11] - 69 275.4 51.1 0.3000 1.1550 0.6450 1.6638 0.3730 1.1659 0.6308 1.6345 0.3967 73 | [2020/01/29 19:50:38] - 70 274.9 51.0 0.3000 1.1492 0.6508 1.6614 0.3751 1.1736 0.6243 1.6314 0.3945 74 | [2020/01/29 19:56:05] - 71 275.3 51.1 0.3000 1.1540 0.6472 1.6651 0.3733 1.1962 0.6239 1.6363 0.3914 75 | [2020/01/29 20:01:32] - 72 275.6 51.3 0.3000 1.1529 0.6461 1.6639 0.3729 1.1690 0.6150 1.6181 0.3883 76 | [2020/01/29 20:06:59] - 73 276.2 51.2 0.3000 1.1527 0.6479 1.6631 0.3761 1.2075 0.6207 1.6545 0.3864 77 | [2020/01/29 20:12:26] - 74 276.2 51.2 0.3000 1.1569 0.6456 1.6662 0.3721 1.1255 0.6414 1.5934 0.4066 78 | [2020/01/29 20:17:52] - 75 274.8 51.1 0.3000 1.1504 0.6461 1.6571 0.3750 1.0821 0.6530 1.6122 0.3808 79 | [2020/01/29 20:23:19] - 76 275.6 51.4 0.3000 1.1504 0.6488 1.6633 0.3720 1.1493 0.6121 1.6227 0.3818 80 | [2020/01/29 20:28:48] - 77 276.7 51.6 0.3000 1.1527 0.6481 1.6638 0.3708 1.1469 0.6203 1.6103 0.4086 81 | [2020/01/29 20:34:15] - 78 276.3 51.4 0.3000 1.1497 0.6472 1.6620 0.3761 1.1831 0.6303 1.6474 0.3690 82 | [2020/01/29 20:39:43] - 79 276.5 51.2 0.3000 1.1503 0.6487 1.6608 0.3755 1.1296 0.6334 1.6098 0.3955 83 | [2020/01/29 20:45:11] - 80 276.2 50.8 0.3000 1.1485 0.6479 1.6585 0.3761 1.1410 0.6454 1.6132 0.4048 84 | [2020/01/29 20:50:36] - 81 274.1 51.1 0.3000 1.1504 0.6477 1.6618 0.3718 1.1621 0.6333 1.6273 0.3896 85 | [2020/01/29 20:56:02] - 82 274.9 51.1 0.3000 1.1495 0.6490 1.6596 0.3744 1.1617 0.6440 1.6226 0.3990 86 | [2020/01/29 21:01:27] - 83 273.7 51.0 0.3000 1.1494 0.6482 1.6614 0.3736 1.1693 0.6231 1.6437 0.3846 87 | [2020/01/29 21:06:53] - 84 275.1 51.1 0.3000 1.1547 0.6468 1.6638 0.3753 1.1491 0.6213 1.6097 0.3998 88 | [2020/01/29 21:12:20] - 85 276.0 51.3 0.3000 1.1576 0.6457 1.6673 0.3721 1.1429 0.6022 1.6155 0.3859 89 | [2020/01/29 21:17:47] - 86 275.2 51.2 0.3000 1.1528 0.6479 1.6624 0.3748 1.1928 0.6164 1.6392 0.3745 90 | [2020/01/29 21:23:14] - 87 275.6 51.6 0.3000 1.1488 0.6499 1.6602 0.3707 1.1260 0.6395 1.6162 0.4010 91 | [2020/01/29 21:28:41] - 88 276.0 50.9 0.3000 1.1439 0.6511 1.6553 0.3753 1.1724 0.6025 1.6348 0.3856 92 | [2020/01/29 21:34:06] - 89 274.6 51.2 0.3000 1.1513 0.6462 1.6624 0.3739 1.1640 0.6527 1.6111 0.4026 93 | [2020/01/29 21:39:34] - 90 275.6 51.2 0.3000 1.1448 0.6515 1.6576 0.3743 1.1704 0.6329 1.6243 0.3847 94 | [2020/01/29 21:45:01] - 91 275.6 51.2 0.3000 1.1505 0.6485 1.6620 0.3734 1.1187 0.6634 1.6320 0.3838 95 | [2020/01/29 21:50:28] - 92 275.8 51.2 0.3000 1.1483 0.6498 1.6579 0.3727 1.1364 0.6479 1.5973 0.4074 96 | [2020/01/29 21:55:55] - 93 275.6 51.1 0.3000 1.1439 0.6519 1.6563 0.3775 1.1518 0.5954 1.6366 0.3806 97 | [2020/01/29 22:01:21] - 94 275.3 51.1 0.3000 1.1525 0.6451 1.6627 0.3745 1.1762 0.6224 1.6648 0.3844 98 | [2020/01/29 22:06:47] - 95 274.4 51.6 0.3000 1.1459 0.6526 1.6600 0.3743 1.1520 0.6243 1.6203 0.3756 99 | [2020/01/29 22:12:14] - 96 275.9 51.3 0.3000 1.1489 0.6492 1.6601 0.3750 1.1404 0.6340 1.6693 0.3688 100 | [2020/01/29 22:17:39] - 97 273.9 51.1 0.3000 1.1473 0.6501 1.6589 0.3737 1.1670 0.5941 1.6409 0.3921 101 | [2020/01/29 22:23:04] - 98 273.8 51.1 0.3000 1.1535 0.6476 1.6638 0.3737 1.1278 0.6442 1.6033 0.3932 102 | [2020/01/29 22:28:29] - 99 274.5 50.5 0.0100 1.1484 0.6490 1.6589 0.3730 1.1383 0.6395 1.6057 0.3950 103 | [2020/01/29 22:33:55] - 100 273.6 51.1 0.0100 1.0365 0.6962 1.4841 0.4392 0.9122 0.7226 1.4262 0.4635 104 | [2020/01/29 22:39:21] - 101 274.5 51.1 0.0100 0.9313 0.7284 1.4165 0.4599 0.8809 0.7362 1.3822 0.4801 105 | [2020/01/29 22:44:47] - 102 274.8 51.2 0.0100 0.9007 0.7424 1.3899 0.4679 0.8579 0.7483 1.3751 0.4851 106 | [2020/01/29 22:50:13] - 103 274.6 51.0 0.0100 0.8750 0.7514 1.3698 0.4763 0.8309 0.7552 1.3603 0.4908 107 | [2020/01/29 22:55:39] - 104 274.5 51.2 0.0100 0.8586 0.7549 1.3587 0.4812 0.8313 0.7438 1.3781 0.4803 108 | [2020/01/29 23:01:04] - 105 274.1 50.8 0.0100 0.8373 0.7630 1.3408 0.4862 0.8336 0.7502 1.3459 0.4950 109 | [2020/01/29 23:06:30] - 106 274.3 51.4 0.0100 0.8270 0.7673 1.3303 0.4910 0.8178 0.7638 1.3338 0.5004 110 | [2020/01/29 23:11:57] - 107 275.0 51.2 0.0100 0.8115 0.7733 1.3191 0.4929 0.7869 0.7652 1.3369 0.4910 111 | [2020/01/29 23:17:23] - 108 274.9 51.0 0.0100 0.8001 0.7756 1.3076 0.4978 0.7796 0.7733 1.3256 0.4982 112 | [2020/01/29 23:22:49] - 109 275.3 51.1 0.0100 0.7877 0.7807 1.3003 0.4992 0.7720 0.7778 1.3269 0.4981 113 | [2020/01/30 18:24:48] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=150, epsilon=8, eval=False, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_startlr/preactresnet_0.3', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.3, lr_one_drop=0.01, lr_schedule='onedrop', mixup=False, mixup_alpha=None, model='PreActResNet18', norm='l_inf', pgd_alpha=2, restarts=1, resume=110, seed=0, val=False, width_factor=10) 114 | [2020/01/30 18:24:52] - Resuming at epoch 110 115 | [2020/01/30 18:24:52] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 116 | [2020/01/30 18:30:17] - 110 273.3 52.3 0.0100 0.7765 0.7850 1.2897 0.5048 0.7937 0.7574 1.3281 0.4963 117 | [2020/01/30 18:35:48] - 111 278.8 51.5 0.0100 0.7686 0.7890 1.2822 0.5067 0.7336 0.7842 1.3155 0.4962 118 | [2020/01/30 18:41:17] - 112 277.9 51.5 0.0100 0.7542 0.7941 1.2722 0.5115 0.7264 0.7800 1.3320 0.4923 119 | [2020/01/30 18:46:49] - 113 279.2 52.4 0.0100 0.7469 0.7971 1.2655 0.5119 0.7457 0.7757 1.3227 0.4969 120 | [2020/01/30 18:52:20] - 114 279.5 51.6 0.0100 0.7381 0.7988 1.2587 0.5143 0.7753 0.7696 1.3201 0.5029 121 | [2020/01/30 18:57:51] - 115 278.6 51.7 0.0100 0.7280 0.8043 1.2513 0.5168 0.7577 0.7805 1.3043 0.5040 122 | [2020/01/30 19:03:22] - 116 278.7 52.0 0.0100 0.7205 0.8079 1.2437 0.5182 0.7442 0.7814 1.2948 0.5129 123 | [2020/01/30 19:08:54] - 117 279.5 52.1 0.0100 0.7106 0.8111 1.2371 0.5209 0.7291 0.7804 1.3080 0.5020 124 | [2020/01/30 19:14:25] - 118 278.6 52.1 0.0100 0.7016 0.8150 1.2291 0.5245 0.7136 0.7882 1.2983 0.5073 125 | [2020/01/30 19:19:57] - 119 280.2 52.3 0.0100 0.6946 0.8185 1.2240 0.5256 0.7194 0.7854 1.3283 0.4941 126 | [2020/01/30 19:25:30] - 120 279.9 52.3 0.0100 0.6856 0.8206 1.2191 0.5275 0.7097 0.7904 1.2976 0.5121 127 | [2020/01/30 19:31:04] - 121 281.3 52.3 0.0100 0.6797 0.8222 1.2122 0.5310 0.7515 0.7818 1.3196 0.5095 128 | [2020/01/30 19:36:36] - 122 280.1 52.1 0.0100 0.6723 0.8258 1.2082 0.5304 0.7077 0.7769 1.3258 0.5013 129 | [2020/01/30 19:42:09] - 123 281.1 52.5 0.0100 0.6677 0.8280 1.2032 0.5324 0.6887 0.8009 1.3218 0.4972 130 | [2020/01/30 19:47:44] - 124 281.7 52.5 0.0100 0.6606 0.8294 1.1954 0.5360 0.7136 0.7842 1.2995 0.5039 131 | [2020/01/30 19:53:17] - 125 281.5 51.9 0.0100 0.6528 0.8331 1.1875 0.5381 0.6962 0.7897 1.3235 0.5042 132 | [2020/01/30 19:58:49] - 126 279.7 52.1 0.0100 0.6495 0.8336 1.1875 0.5399 0.6955 0.8006 1.3043 0.5029 133 | [2020/01/30 20:04:21] - 127 279.7 52.1 0.0100 0.6438 0.8372 1.1833 0.5378 0.7010 0.7982 1.3086 0.5074 134 | [2020/01/30 20:09:54] - 128 281.1 52.4 0.0100 0.6369 0.8389 1.1759 0.5394 0.6764 0.8009 1.3607 0.4865 135 | [2020/01/30 20:15:29] - 129 281.8 52.5 0.0100 0.6313 0.8421 1.1719 0.5426 0.6987 0.7839 1.3255 0.5001 136 | [2020/01/30 20:21:02] - 130 280.5 52.1 0.0100 0.6242 0.8427 1.1662 0.5456 0.6939 0.8039 1.3084 0.5091 137 | [2020/01/30 20:26:36] - 131 281.1 52.4 0.0100 0.6227 0.8444 1.1627 0.5445 0.6976 0.8009 1.3031 0.5105 138 | [2020/01/30 20:32:09] - 132 281.3 52.4 0.0100 0.6165 0.8470 1.1596 0.5463 0.6888 0.7999 1.3012 0.5064 139 | [2020/01/30 20:37:42] - 133 280.4 52.4 0.0100 0.6132 0.8476 1.1574 0.5470 0.6567 0.8131 1.3037 0.5028 140 | [2020/01/30 20:43:16] - 134 281.6 52.5 0.0100 0.6048 0.8519 1.1484 0.5506 0.7123 0.7942 1.3106 0.5077 141 | [2020/01/30 20:48:50] - 135 281.7 52.3 0.0100 0.5991 0.8536 1.1424 0.5518 0.6739 0.7973 1.3249 0.5040 142 | [2020/01/30 20:54:25] - 136 281.7 52.7 0.0100 0.5966 0.8542 1.1408 0.5498 0.6684 0.8102 1.3249 0.5008 143 | [2020/01/30 20:59:59] - 137 282.0 52.6 0.0100 0.5934 0.8548 1.1396 0.5514 0.6803 0.8072 1.3115 0.5077 144 | [2020/01/30 21:05:33] - 138 281.6 52.2 0.0100 0.5895 0.8565 1.1347 0.5551 0.6727 0.8018 1.3159 0.5051 145 | [2020/01/30 21:11:07] - 139 281.7 52.4 0.0100 0.5858 0.8571 1.1298 0.5565 0.6684 0.8047 1.3181 0.5051 146 | [2020/01/30 21:16:42] - 140 281.7 52.5 0.0100 0.5793 0.8606 1.1242 0.5584 0.6804 0.8031 1.3108 0.5088 147 | [2020/01/30 21:22:16] - 141 281.8 52.4 0.0100 0.5743 0.8622 1.1197 0.5595 0.6680 0.8085 1.3286 0.5032 148 | [2020/01/30 21:27:48] - 142 279.6 52.0 0.0100 0.5721 0.8614 1.1173 0.5588 0.6380 0.8099 1.3655 0.5004 149 | [2020/01/30 21:33:21] - 143 280.3 52.5 0.0100 0.5705 0.8636 1.1156 0.5601 0.6749 0.8022 1.3312 0.5060 150 | [2020/01/30 21:38:55] - 144 281.7 52.4 0.0100 0.5630 0.8653 1.1123 0.5623 0.6489 0.8142 1.3228 0.5030 151 | [2020/01/30 21:44:29] - 145 281.7 52.4 0.0100 0.5611 0.8675 1.1061 0.5607 0.6443 0.8055 1.3417 0.5017 152 | [2020/01/30 21:50:03] - 146 281.6 52.3 0.0100 0.5561 0.8686 1.1029 0.5643 0.6860 0.7998 1.3379 0.5008 153 | [2020/01/30 21:55:36] - 147 280.8 52.4 0.0100 0.5541 0.8700 1.1046 0.5637 0.6477 0.8102 1.3278 0.4985 154 | [2020/01/30 22:01:10] - 148 281.1 52.3 0.0100 0.5484 0.8710 1.0938 0.5667 0.6546 0.8098 1.3234 0.5049 155 | [2020/01/30 22:06:43] - 149 281.0 52.5 0.0100 0.5440 0.8732 1.0933 0.5668 0.6447 0.8085 1.3496 0.4986 156 | -------------------------------------------------------------------------------- /experiments/cifar10_trades/eval.log: -------------------------------------------------------------------------------- 1 | [2020/02/03 04:33:19] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=200, epsilon=8, eval=True, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_trades/trades', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.1, lr_one_drop=0.01, lr_schedule='piecewise', mixup=False, mixup_alpha=None, model='WideResNet', norm='l_inf', pgd_alpha=2, restarts=1, resume=76, seed=0, val=False, width_factor=10) 2 | [2020/02/03 04:33:31] - Resuming at epoch 76 3 | [2020/02/03 04:33:31] - [Evaluation mode] 4 | [2020/02/03 04:33:31] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 5 | [2020/02/03 04:38:26] - 76 1.0 293.6 -1.0000 -1.0000 -1.0000 -1.0000 -1.0000 0.6297 0.8423 0.8940 0.5502 6 | [2020/02/03 04:44:39] - Namespace(attack='pgd', attack_iters=10, batch_size=128, chkpt_iters=10, cutout=False, cutout_len=None, data_dir='../cifar-data', epochs=250, epsilon=8, eval=True, fgsm_alpha=1.25, fgsm_init='random', fname='experiments/cifar10_trades/trades', half=False, l1=0, l2=0, lr_drop_epoch=100, lr_max=0.1, lr_one_drop=0.01, lr_schedule='piecewise', mixup=False, mixup_alpha=None, model='WideResNet', norm='l_inf', pgd_alpha=2, restarts=1, resume=201, seed=0, val=False, width_factor=10) 7 | [2020/02/03 04:44:48] - Resuming at epoch 201 8 | [2020/02/03 04:44:48] - [Evaluation mode] 9 | [2020/02/03 04:44:48] - Epoch Train Time Test Time LR Train Loss Train Acc Train Robust Loss Train Robust Acc Test Loss Test Acc Test Robust Loss Test Robust Acc 10 | [2020/02/03 04:49:42] - 201 0.9 293.3 -1.0000 -1.0000 -1.0000 -1.0000 -1.0000 0.5455 0.8482 0.9918 0.4940 11 | -------------------------------------------------------------------------------- /experiments/imagenet/l2_eps_3/data.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/locuslab/robust_overfitting/c47a25c5e00c8b2bb35488d962c04dd771b7e9af/experiments/imagenet/l2_eps_3/data.pth -------------------------------------------------------------------------------- /experiments/imagenet/linf_eps_4/data.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/locuslab/robust_overfitting/c47a25c5e00c8b2bb35488d962c04dd771b7e9af/experiments/imagenet/linf_eps_4/data.pth -------------------------------------------------------------------------------- /generate_validation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import numpy as np 4 | 5 | np.random.seed(0) 6 | m = 50000 7 | P = np.random.permutation(m) 8 | 9 | n = 1000 10 | 11 | 12 | def cifar10(root): 13 | train_set = torchvision.datasets.CIFAR10(root=root, train=True, download=True) 14 | test_set = torchvision.datasets.CIFAR10(root=root, train=False, download=True) 15 | return { 16 | 'train': {'data': train_set.data, 'labels': train_set.targets}, 17 | 'test': {'data': test_set.data, 'labels': test_set.targets} 18 | } 19 | 20 | 21 | dataset = cifar10('../cifar10-data') 22 | 23 | val_data = dataset['train']['data'][P[:n]] 24 | val_labels = [dataset['train']['labels'][p] for p in P[:n]] 25 | train_data = dataset['train']['data'][P[n:]] 26 | train_labels = [dataset['train']['labels'][p] for p in P[n:]] 27 | 28 | dataset['train']['data'] = train_data 29 | dataset['train']['labels'] = train_labels 30 | dataset['val'] = { 31 | 'data' : val_data, 32 | 'labels' : val_labels 33 | } 34 | dataset['split'] = n 35 | dataset['permutation'] = P 36 | 37 | torch.save(dataset, 'cifar10_validation_split.pth') -------------------------------------------------------------------------------- /imagenet_scripts/resume_l2.sh: -------------------------------------------------------------------------------- 1 | CHECKPOINT=~/robustness/imagenet_l2_eps_30/109_checkpoint.pt 2 | IMAGENET=~/imagenet 3 | OUT=~/robustness/imagenet_l2_eps_30_resume 4 | 5 | CUDA_VISIBLE_DEVICES=0,1,2,3 python -m robustness.main --dataset imagenet \ 6 | --data $IMAGENET --adv-train 1 --arch resnet50 \ 7 | --eps 3.0 --attack-lr 0.5 \ 8 | --attack-steps 7 --constraint 2 \ 9 | --resume $CHECKPOINT \ 10 | --step-lr 75 --epochs 250 \ 11 | --save-ckpt-iters 1 --log-iters 4 \ 12 | --out-dir $OUT 13 | -------------------------------------------------------------------------------- /imagenet_scripts/resume_linf.sh: -------------------------------------------------------------------------------- 1 | CHECKPOINT=~/robustness/imagenet_linf_eps_4/checkpoint.pt.best 2 | IMAGENET=~/imagenet 3 | OUT=~/robustness/imagenet_linf_eps_4_resume/ 4 | 5 | CUDA_VISIBLE_DEVICES=0,1,2,3 python -m robustness.main --dataset imagenet \ 6 | --data $IMAGENET --adv-train 1 --arch resnet50 \ 7 | --eps 0.0156862745 --attack-lr 0.004 \ 8 | --attack-steps 5 --constraint inf \ 9 | --resume $CHECKPOINT \ 10 | --lr 0.001 --epochs 150 \ 11 | --save-ckpt-iters 1 --log-iters 1 \ 12 | --out-dir $OUT 13 | -------------------------------------------------------------------------------- /preactresnet.py: -------------------------------------------------------------------------------- 1 | '''Pre-activation ResNet in PyTorch. 2 | 3 | Reference: 4 | [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 5 | Identity Mappings in Deep Residual Networks. arXiv:1603.05027 6 | ''' 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | 12 | class PreActBlock(nn.Module): 13 | '''Pre-activation version of the BasicBlock.''' 14 | expansion = 1 15 | 16 | def __init__(self, in_planes, planes, stride=1): 17 | super(PreActBlock, self).__init__() 18 | self.bn1 = nn.BatchNorm2d(in_planes) 19 | self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 20 | self.bn2 = nn.BatchNorm2d(planes) 21 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) 22 | 23 | if stride != 1 or in_planes != self.expansion*planes: 24 | self.shortcut = nn.Sequential( 25 | nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) 26 | ) 27 | 28 | def forward(self, x): 29 | out = F.relu(self.bn1(x)) 30 | shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x 31 | out = self.conv1(out) 32 | out = self.conv2(F.relu(self.bn2(out))) 33 | out += shortcut 34 | return out 35 | 36 | 37 | class PreActBottleneck(nn.Module): 38 | '''Pre-activation version of the original Bottleneck module.''' 39 | expansion = 4 40 | 41 | def __init__(self, in_planes, planes, stride=1): 42 | super(PreActBottleneck, self).__init__() 43 | self.bn1 = nn.BatchNorm2d(in_planes) 44 | self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) 45 | self.bn2 = nn.BatchNorm2d(planes) 46 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 47 | self.bn3 = nn.BatchNorm2d(planes) 48 | self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) 49 | 50 | if stride != 1 or in_planes != self.expansion*planes: 51 | self.shortcut = nn.Sequential( 52 | nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) 53 | ) 54 | 55 | def forward(self, x): 56 | out = F.relu(self.bn1(x)) 57 | shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x 58 | out = self.conv1(out) 59 | out = self.conv2(F.relu(self.bn2(out))) 60 | out = self.conv3(F.relu(self.bn3(out))) 61 | out += shortcut 62 | return out 63 | 64 | 65 | class PreActResNet(nn.Module): 66 | def __init__(self, block, num_blocks, num_classes=10): 67 | super(PreActResNet, self).__init__() 68 | self.in_planes = 64 69 | 70 | self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) 71 | self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) 72 | self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) 73 | self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) 74 | self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) 75 | self.bn = nn.BatchNorm2d(512 * block.expansion) 76 | self.linear = nn.Linear(512*block.expansion, num_classes) 77 | 78 | def _make_layer(self, block, planes, num_blocks, stride): 79 | strides = [stride] + [1]*(num_blocks-1) 80 | layers = [] 81 | for stride in strides: 82 | layers.append(block(self.in_planes, planes, stride)) 83 | self.in_planes = planes * block.expansion 84 | return nn.Sequential(*layers) 85 | 86 | def forward(self, x): 87 | out = self.conv1(x) 88 | out = self.layer1(out) 89 | out = self.layer2(out) 90 | out = self.layer3(out) 91 | out = self.layer4(out) 92 | out = F.relu(self.bn(out)) 93 | out = F.avg_pool2d(out, 4) 94 | out = out.view(out.size(0), -1) 95 | out = self.linear(out) 96 | return out 97 | 98 | 99 | def PreActResNet18(num_classes=10): 100 | return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes) 101 | 102 | def PreActResNet34(): 103 | return PreActResNet(PreActBlock, [3,4,6,3]) 104 | 105 | def PreActResNet50(): 106 | return PreActResNet(PreActBottleneck, [3,4,6,3]) 107 | 108 | def PreActResNet101(): 109 | return PreActResNet(PreActBottleneck, [3,4,23,3]) 110 | 111 | def PreActResNet152(): 112 | return PreActResNet(PreActBottleneck, [3,8,36,3]) 113 | 114 | 115 | def test(): 116 | net = PreActResNet18() 117 | y = net((torch.randn(1,3,32,32))) 118 | print(y.size()) 119 | 120 | # test() 121 | -------------------------------------------------------------------------------- /train_cifar.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import time 5 | import math 6 | 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | from torch.autograd import Variable 12 | 13 | import os 14 | 15 | from wideresnet import WideResNet 16 | from preactresnet import PreActResNet18 17 | 18 | from utils import * 19 | 20 | mu = torch.tensor(cifar10_mean).view(3,1,1).cuda() 21 | std = torch.tensor(cifar10_std).view(3,1,1).cuda() 22 | 23 | def normalize(X): 24 | return (X - mu)/std 25 | 26 | upper_limit, lower_limit = 1,0 27 | 28 | 29 | def clamp(X, lower_limit, upper_limit): 30 | return torch.max(torch.min(X, upper_limit), lower_limit) 31 | 32 | 33 | class Batches(): 34 | def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False): 35 | self.dataset = dataset 36 | self.batch_size = batch_size 37 | self.set_random_choices = set_random_choices 38 | self.dataloader = torch.utils.data.DataLoader( 39 | dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last 40 | ) 41 | 42 | def __iter__(self): 43 | if self.set_random_choices: 44 | self.dataset.set_random_choices() 45 | return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader) 46 | 47 | def __len__(self): 48 | return len(self.dataloader) 49 | 50 | 51 | def mixup_data(x, y, alpha=1.0): 52 | '''Returns mixed inputs, pairs of targets, and lambda''' 53 | if alpha > 0: 54 | lam = np.random.beta(alpha, alpha) 55 | else: 56 | lam = 1 57 | 58 | batch_size = x.size()[0] 59 | index = torch.randperm(batch_size).cuda() 60 | 61 | mixed_x = lam * x + (1 - lam) * x[index, :] 62 | y_a, y_b = y, y[index] 63 | return mixed_x, y_a, y_b, lam 64 | 65 | 66 | def mixup_criterion(criterion, pred, y_a, y_b, lam): 67 | return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) 68 | 69 | 70 | def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, 71 | norm, early_stop=False, 72 | mixup=False, y_a=None, y_b=None, lam=None): 73 | max_loss = torch.zeros(y.shape[0]).cuda() 74 | max_delta = torch.zeros_like(X).cuda() 75 | for _ in range(restarts): 76 | delta = torch.zeros_like(X).cuda() 77 | if norm == "l_inf": 78 | delta.uniform_(-epsilon, epsilon) 79 | elif norm == "l_2": 80 | delta.normal_() 81 | d_flat = delta.view(delta.size(0),-1) 82 | n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1) 83 | r = torch.zeros_like(n).uniform_(0, 1) 84 | delta *= r/n*epsilon 85 | else: 86 | raise ValueError 87 | delta = clamp(delta, lower_limit-X, upper_limit-X) 88 | delta.requires_grad = True 89 | for _ in range(attack_iters): 90 | output = model(normalize(X + delta)) 91 | if early_stop: 92 | index = torch.where(output.max(1)[1] == y)[0] 93 | else: 94 | index = slice(None,None,None) 95 | if not isinstance(index, slice) and len(index) == 0: 96 | break 97 | if mixup: 98 | criterion = nn.CrossEntropyLoss() 99 | loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 100 | else: 101 | loss = F.cross_entropy(output, y) 102 | loss.backward() 103 | grad = delta.grad.detach() 104 | d = delta[index, :, :, :] 105 | g = grad[index, :, :, :] 106 | x = X[index, :, :, :] 107 | if norm == "l_inf": 108 | d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon) 109 | elif norm == "l_2": 110 | g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1) 111 | scaled_g = g/(g_norm + 1e-10) 112 | d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d) 113 | d = clamp(d, lower_limit - x, upper_limit - x) 114 | delta.data[index, :, :, :] = d 115 | delta.grad.zero_() 116 | if mixup: 117 | criterion = nn.CrossEntropyLoss(reduction='none') 118 | all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 119 | else: 120 | all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none') 121 | max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss] 122 | max_loss = torch.max(max_loss, all_loss) 123 | return max_delta 124 | 125 | 126 | def get_args(): 127 | parser = argparse.ArgumentParser() 128 | parser.add_argument('--model', default='PreActResNet18') 129 | parser.add_argument('--l2', default=0, type=float) 130 | parser.add_argument('--l1', default=0, type=float) 131 | parser.add_argument('--batch-size', default=128, type=int) 132 | parser.add_argument('--data-dir', default='../cifar-data', type=str) 133 | parser.add_argument('--epochs', default=200, type=int) 134 | parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine']) 135 | parser.add_argument('--lr-max', default=0.1, type=float) 136 | parser.add_argument('--lr-one-drop', default=0.01, type=float) 137 | parser.add_argument('--lr-drop-epoch', default=100, type=int) 138 | parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none']) 139 | parser.add_argument('--epsilon', default=8, type=int) 140 | parser.add_argument('--attack-iters', default=10, type=int) 141 | parser.add_argument('--restarts', default=1, type=int) 142 | parser.add_argument('--pgd-alpha', default=2, type=float) 143 | parser.add_argument('--fgsm-alpha', default=1.25, type=float) 144 | parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2']) 145 | parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous']) 146 | parser.add_argument('--fname', default='cifar_model', type=str) 147 | parser.add_argument('--seed', default=0, type=int) 148 | parser.add_argument('--half', action='store_true') 149 | parser.add_argument('--width-factor', default=10, type=int) 150 | parser.add_argument('--resume', default=0, type=int) 151 | parser.add_argument('--cutout', action='store_true') 152 | parser.add_argument('--cutout-len', type=int) 153 | parser.add_argument('--mixup', action='store_true') 154 | parser.add_argument('--mixup-alpha', type=float) 155 | parser.add_argument('--eval', action='store_true') 156 | parser.add_argument('--val', action='store_true') 157 | parser.add_argument('--chkpt-iters', default=10, type=int) 158 | return parser.parse_args() 159 | 160 | 161 | def main(): 162 | args = get_args() 163 | 164 | if not os.path.exists(args.fname): 165 | os.makedirs(args.fname) 166 | 167 | logger = logging.getLogger(__name__) 168 | logging.basicConfig( 169 | format='[%(asctime)s] - %(message)s', 170 | datefmt='%Y/%m/%d %H:%M:%S', 171 | level=logging.DEBUG, 172 | handlers=[ 173 | logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')), 174 | logging.StreamHandler() 175 | ]) 176 | 177 | logger.info(args) 178 | 179 | np.random.seed(args.seed) 180 | torch.manual_seed(args.seed) 181 | torch.cuda.manual_seed(args.seed) 182 | 183 | transforms = [Crop(32, 32), FlipLR()] 184 | if args.cutout: 185 | transforms.append(Cutout(args.cutout_len, args.cutout_len)) 186 | if args.val: 187 | try: 188 | dataset = torch.load("cifar10_validation_split.pth") 189 | except: 190 | print("Couldn't find a dataset with a validation split, did you run " 191 | "generate_validation.py?") 192 | return 193 | val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels'])) 194 | val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=2) 195 | else: 196 | dataset = cifar10(args.data_dir) 197 | train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.), 198 | dataset['train']['labels'])) 199 | train_set_x = Transform(train_set, transforms) 200 | train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=2) 201 | 202 | test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels'])) 203 | test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=2) 204 | 205 | epsilon = (args.epsilon / 255.) 206 | pgd_alpha = (args.pgd_alpha / 255.) 207 | 208 | if args.model == 'PreActResNet18': 209 | model = PreActResNet18() 210 | elif args.model == 'WideResNet': 211 | model = WideResNet(34, 10, widen_factor=args.width_factor, dropRate=0.0) 212 | else: 213 | raise ValueError("Unknown model") 214 | 215 | model = nn.DataParallel(model).cuda() 216 | model.train() 217 | 218 | if args.l2: 219 | decay, no_decay = [], [] 220 | for name,param in model.named_parameters(): 221 | if 'bn' not in name and 'bias' not in name: 222 | decay.append(param) 223 | else: 224 | no_decay.append(param) 225 | params = [{'params':decay, 'weight_decay':args.l2}, 226 | {'params':no_decay, 'weight_decay': 0 }] 227 | else: 228 | params = model.parameters() 229 | 230 | opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=5e-4) 231 | 232 | criterion = nn.CrossEntropyLoss() 233 | 234 | if args.attack == 'free': 235 | delta = torch.zeros(args.batch_size, 3, 32, 32).cuda() 236 | delta.requires_grad = True 237 | elif args.attack == 'fgsm' and args.fgsm_init == 'previous': 238 | delta = torch.zeros(args.batch_size, 3, 32, 32).cuda() 239 | delta.requires_grad = True 240 | 241 | if args.attack == 'free': 242 | epochs = int(math.ceil(args.epochs / args.attack_iters)) 243 | else: 244 | epochs = args.epochs 245 | 246 | if args.lr_schedule == 'superconverge': 247 | lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0] 248 | elif args.lr_schedule == 'piecewise': 249 | def lr_schedule(t): 250 | if t / args.epochs < 0.5: 251 | return args.lr_max 252 | elif t / args.epochs < 0.75: 253 | return args.lr_max / 10. 254 | else: 255 | return args.lr_max / 100. 256 | elif args.lr_schedule == 'linear': 257 | lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0] 258 | elif args.lr_schedule == 'onedrop': 259 | def lr_schedule(t): 260 | if t < args.lr_drop_epoch: 261 | return args.lr_max 262 | else: 263 | return args.lr_one_drop 264 | elif args.lr_schedule == 'multipledecay': 265 | def lr_schedule(t): 266 | return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10) 267 | elif args.lr_schedule == 'cosine': 268 | def lr_schedule(t): 269 | return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi)) 270 | 271 | 272 | best_test_robust_acc = 0 273 | best_val_robust_acc = 0 274 | if args.resume: 275 | start_epoch = args.resume 276 | model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth'))) 277 | opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth'))) 278 | logger.info(f'Resuming at epoch {start_epoch}') 279 | 280 | best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc'] 281 | if args.val: 282 | best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc'] 283 | else: 284 | start_epoch = 0 285 | 286 | if args.eval: 287 | if not args.resume: 288 | logger.info("No model loaded to evaluate, specify with --resume FNAME") 289 | return 290 | logger.info("[Evaluation mode]") 291 | 292 | logger.info('Epoch \t Train Time \t Test Time \t LR \t \t Train Loss \t Train Acc \t Train Robust Loss \t Train Robust Acc \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc') 293 | for epoch in range(start_epoch, epochs): 294 | model.train() 295 | start_time = time.time() 296 | train_loss = 0 297 | train_acc = 0 298 | train_robust_loss = 0 299 | train_robust_acc = 0 300 | train_n = 0 301 | for i, batch in enumerate(train_batches): 302 | if args.eval: 303 | break 304 | X, y = batch['input'], batch['target'] 305 | if args.mixup: 306 | X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha) 307 | X, y_a, y_b = map(Variable, (X, y_a, y_b)) 308 | lr = lr_schedule(epoch + (i + 1) / len(train_batches)) 309 | opt.param_groups[0].update(lr=lr) 310 | 311 | if args.attack == 'pgd': 312 | # Random initialization 313 | if args.mixup: 314 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, mixup=True, y_a=y_a, y_b=y_b, lam=lam) 315 | else: 316 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm) 317 | delta = delta.detach() 318 | elif args.attack == 'fgsm': 319 | delta = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm) 320 | # Standard training 321 | elif args.attack == 'none': 322 | delta = torch.zeros_like(X) 323 | 324 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 325 | if args.mixup: 326 | robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam) 327 | else: 328 | robust_loss = criterion(robust_output, y) 329 | 330 | if args.l1: 331 | for name,param in model.named_parameters(): 332 | if 'bn' not in name and 'bias' not in name: 333 | robust_loss += args.l1*param.abs().sum() 334 | 335 | opt.zero_grad() 336 | robust_loss.backward() 337 | opt.step() 338 | 339 | output = model(normalize(X)) 340 | if args.mixup: 341 | loss = mixup_criterion(criterion, output, y_a, y_b, lam) 342 | else: 343 | loss = criterion(output, y) 344 | 345 | train_robust_loss += robust_loss.item() * y.size(0) 346 | train_robust_acc += (robust_output.max(1)[1] == y).sum().item() 347 | train_loss += loss.item() * y.size(0) 348 | train_acc += (output.max(1)[1] == y).sum().item() 349 | train_n += y.size(0) 350 | 351 | train_time = time.time() 352 | 353 | model.eval() 354 | test_loss = 0 355 | test_acc = 0 356 | test_robust_loss = 0 357 | test_robust_acc = 0 358 | test_n = 0 359 | for i, batch in enumerate(test_batches): 360 | X, y = batch['input'], batch['target'] 361 | 362 | # Random initialization 363 | if args.attack == 'none': 364 | delta = torch.zeros_like(X) 365 | else: 366 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval) 367 | delta = delta.detach() 368 | 369 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 370 | robust_loss = criterion(robust_output, y) 371 | 372 | output = model(normalize(X)) 373 | loss = criterion(output, y) 374 | 375 | test_robust_loss += robust_loss.item() * y.size(0) 376 | test_robust_acc += (robust_output.max(1)[1] == y).sum().item() 377 | test_loss += loss.item() * y.size(0) 378 | test_acc += (output.max(1)[1] == y).sum().item() 379 | test_n += y.size(0) 380 | 381 | test_time = time.time() 382 | 383 | if args.val: 384 | val_loss = 0 385 | val_acc = 0 386 | val_robust_loss = 0 387 | val_robust_acc = 0 388 | val_n = 0 389 | for i, batch in enumerate(val_batches): 390 | X, y = batch['input'], batch['target'] 391 | 392 | # Random initialization 393 | if args.attack == 'none': 394 | delta = torch.zeros_like(X) 395 | else: 396 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval) 397 | delta = delta.detach() 398 | 399 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 400 | robust_loss = criterion(robust_output, y) 401 | 402 | output = model(normalize(X)) 403 | loss = criterion(output, y) 404 | 405 | val_robust_loss += robust_loss.item() * y.size(0) 406 | val_robust_acc += (robust_output.max(1)[1] == y).sum().item() 407 | val_loss += loss.item() * y.size(0) 408 | val_acc += (output.max(1)[1] == y).sum().item() 409 | val_n += y.size(0) 410 | 411 | if not args.eval: 412 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 413 | epoch, train_time - start_time, test_time - train_time, lr, 414 | train_loss/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n, 415 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 416 | 417 | if args.val: 418 | logger.info('validation %.4f \t %.4f \t %.4f \t %.4f', 419 | val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n) 420 | 421 | if val_robust_acc/val_n > best_val_robust_acc: 422 | torch.save({ 423 | 'state_dict':model.state_dict(), 424 | 'test_robust_acc':test_robust_acc/test_n, 425 | 'test_robust_loss':test_robust_loss/test_n, 426 | 'test_loss':test_loss/test_n, 427 | 'test_acc':test_acc/test_n, 428 | 'val_robust_acc':val_robust_acc/val_n, 429 | 'val_robust_loss':val_robust_loss/val_n, 430 | 'val_loss':val_loss/val_n, 431 | 'val_acc':val_acc/val_n, 432 | }, os.path.join(args.fname, f'model_val.pth')) 433 | best_val_robust_acc = val_robust_acc/val_n 434 | 435 | # save checkpoint 436 | if (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs: 437 | torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth')) 438 | torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth')) 439 | 440 | # save best 441 | if test_robust_acc/test_n > best_test_robust_acc: 442 | torch.save({ 443 | 'state_dict':model.state_dict(), 444 | 'test_robust_acc':test_robust_acc/test_n, 445 | 'test_robust_loss':test_robust_loss/test_n, 446 | 'test_loss':test_loss/test_n, 447 | 'test_acc':test_acc/test_n, 448 | }, os.path.join(args.fname, f'model_best.pth')) 449 | best_test_robust_acc = test_robust_acc/test_n 450 | else: 451 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 452 | epoch, train_time - start_time, test_time - train_time, -1, 453 | -1, -1, -1, -1, 454 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 455 | return 456 | 457 | 458 | if __name__ == "__main__": 459 | main() 460 | -------------------------------------------------------------------------------- /train_cifar100.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import logging 4 | import sys 5 | import time 6 | import math 7 | 8 | import numpy as np 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | from torch.autograd import Variable 13 | from torchvision import datasets, transforms 14 | 15 | import os 16 | 17 | from wideresnet import WideResNet 18 | from preactresnet import PreActResNet18 19 | 20 | CIFAR100_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343) 21 | CIFAR100_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404) 22 | 23 | mu = torch.tensor(CIFAR100_MEAN).view(3,1,1).cuda() 24 | std = torch.tensor(CIFAR100_STD).view(3,1,1).cuda() 25 | 26 | def normalize(X): 27 | return (X - mu)/std 28 | 29 | upper_limit, lower_limit = 1,0 30 | 31 | 32 | def clamp(X, lower_limit, upper_limit): 33 | return torch.max(torch.min(X, upper_limit), lower_limit) 34 | 35 | def mixup_data(x, y, alpha=1.0): 36 | '''Returns mixed inputs, pairs of targets, and lambda''' 37 | if alpha > 0: 38 | lam = np.random.beta(alpha, alpha) 39 | else: 40 | lam = 1 41 | 42 | batch_size = x.size()[0] 43 | index = torch.randperm(batch_size).cuda() 44 | 45 | mixed_x = lam * x + (1 - lam) * x[index, :] 46 | y_a, y_b = y, y[index] 47 | return mixed_x, y_a, y_b, lam 48 | 49 | 50 | def mixup_criterion(criterion, pred, y_a, y_b, lam): 51 | return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) 52 | 53 | 54 | def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, 55 | norm, early_stop=False, 56 | mixup=False, y_a=None, y_b=None, lam=None): 57 | max_loss = torch.zeros(y.shape[0]).cuda() 58 | max_delta = torch.zeros_like(X).cuda() 59 | for _ in range(restarts): 60 | delta = torch.zeros_like(X).cuda() 61 | if norm == "l_inf": 62 | delta.uniform_(-epsilon, epsilon) 63 | elif norm == "l_2": 64 | delta.normal_() 65 | d_flat = delta.view(delta.size(0),-1) 66 | n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1) 67 | r = torch.zeros_like(n).uniform_(0, 1) 68 | delta *= r/n*epsilon 69 | else: 70 | raise ValueError 71 | delta = clamp(delta, lower_limit-X, upper_limit-X) 72 | delta.requires_grad = True 73 | for _ in range(attack_iters): 74 | output = model(normalize(X + delta)) 75 | if early_stop: 76 | index = torch.where(output.max(1)[1] == y)[0] 77 | else: 78 | index = slice(None,None,None) 79 | if not isinstance(index, slice) and len(index) == 0: 80 | break 81 | if mixup: 82 | criterion = nn.CrossEntropyLoss() 83 | loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 84 | else: 85 | loss = F.cross_entropy(output, y) 86 | loss.backward() 87 | grad = delta.grad.detach() 88 | d = delta[index, :, :, :] 89 | g = grad[index, :, :, :] 90 | x = X[index, :, :, :] 91 | if norm == "l_inf": 92 | d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon) 93 | elif norm == "l_2": 94 | g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1) 95 | scaled_g = g/(g_norm + 1e-10) 96 | d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d) 97 | d = clamp(d, lower_limit - x, upper_limit - x) 98 | delta.data[index, :, :, :] = d 99 | delta.grad.zero_() 100 | if mixup: 101 | criterion = nn.CrossEntropyLoss(reduction='none') 102 | all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 103 | else: 104 | all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none') 105 | max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss] 106 | max_loss = torch.max(max_loss, all_loss) 107 | return max_delta 108 | 109 | 110 | def get_args(): 111 | parser = argparse.ArgumentParser() 112 | parser.add_argument('--model', default='PreActResNet18') 113 | parser.add_argument('--l2', default=0, type=float) 114 | parser.add_argument('--l1', default=0, type=float) 115 | parser.add_argument('--batch-size', default=128, type=int) 116 | parser.add_argument('--data-dir', default='../cifar100-data', type=str) 117 | parser.add_argument('--epochs', default=200, type=int) 118 | parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise']) 119 | parser.add_argument('--lr-max', default=0.1, type=float) 120 | parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'none']) 121 | parser.add_argument('--epsilon', default=8, type=int) 122 | parser.add_argument('--attack-iters', default=10, type=int) 123 | parser.add_argument('--restarts', default=1, type=int) 124 | parser.add_argument('--pgd-alpha', default=2, type=float) 125 | parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2']) 126 | parser.add_argument('--fname', default='cifar100_model', type=str) 127 | parser.add_argument('--seed', default=0, type=int) 128 | parser.add_argument('--half', action='store_true') 129 | parser.add_argument('--width-factor', default=10, type=int) 130 | parser.add_argument('--resume', default=0, type=int) 131 | parser.add_argument('--cutout', action='store_true') 132 | parser.add_argument('--cutout-len', type=int) 133 | parser.add_argument('--mixup', action='store_true') 134 | parser.add_argument('--mixup-alpha', type=float) 135 | return parser.parse_args() 136 | 137 | 138 | def main(): 139 | args = get_args() 140 | 141 | if not os.path.exists(args.fname): 142 | os.makedirs(args.fname) 143 | 144 | logger = logging.getLogger(__name__) 145 | logging.basicConfig( 146 | format='[%(asctime)s] - %(message)s', 147 | datefmt='%Y/%m/%d %H:%M:%S', 148 | level=logging.DEBUG, 149 | handlers=[ 150 | logging.FileHandler(os.path.join(args.fname,'output.log')), 151 | logging.StreamHandler() 152 | ]) 153 | 154 | logger.info(args) 155 | 156 | np.random.seed(args.seed) 157 | torch.manual_seed(args.seed) 158 | torch.cuda.manual_seed(args.seed) 159 | 160 | train_transform = transforms.Compose([ 161 | transforms.RandomCrop(32, padding=4), 162 | transforms.RandomHorizontalFlip(), 163 | transforms.RandomRotation(15), 164 | transforms.ToTensor(), 165 | ]) 166 | test_transform = transforms.Compose([ 167 | transforms.ToTensor(), 168 | ]) 169 | num_workers = 2 170 | train_dataset = datasets.CIFAR100( 171 | args.data_dir, train=True, transform=train_transform, download=True) 172 | test_dataset = datasets.CIFAR100( 173 | args.data_dir, train=False, transform=test_transform, download=True) 174 | train_loader = torch.utils.data.DataLoader( 175 | dataset=train_dataset, 176 | batch_size=args.batch_size, 177 | shuffle=True, 178 | pin_memory=True, 179 | num_workers=num_workers, 180 | ) 181 | test_loader = torch.utils.data.DataLoader( 182 | dataset=test_dataset, 183 | batch_size=args.batch_size, 184 | shuffle=False, 185 | pin_memory=True, 186 | num_workers=2, 187 | ) 188 | 189 | epsilon = (args.epsilon / 255.) 190 | pgd_alpha = (args.pgd_alpha / 255.) 191 | 192 | if args.model == 'PreActResNet18': 193 | model = PreActResNet18(num_classes=100) 194 | elif args.model == 'WideResNet': 195 | model = WideResNet(34, 10, widen_factor=args.width_factor, dropRate=0.0) 196 | else: 197 | raise ValueError("Unknown model") 198 | 199 | model = model.cuda() 200 | model.train() 201 | 202 | if args.l2: 203 | decay, no_decay = [], [] 204 | for name,param in model.named_parameters(): 205 | if 'bn' not in name and 'bias' not in name: 206 | decay.append(param) 207 | else: 208 | no_decay.append(param) 209 | params = [{'params':decay, 'weight_decay':args.l2}, 210 | {'params':no_decay, 'weight_decay': 0 }] 211 | else: 212 | params = model.parameters() 213 | 214 | opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=5e-4) 215 | 216 | criterion = nn.CrossEntropyLoss() 217 | 218 | epochs = args.epochs 219 | 220 | if args.lr_schedule == 'superconverge': 221 | lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0] 222 | # lr_schedule = lambda t: np.interp([t], [0, args.epochs], [0, args.lr_max])[0] 223 | elif args.lr_schedule == 'piecewise': 224 | def lr_schedule(t): 225 | if t / args.epochs < 0.5: 226 | return args.lr_max 227 | elif t / args.epochs < 0.75: 228 | return args.lr_max / 10. 229 | else: 230 | return args.lr_max / 100. 231 | 232 | 233 | if args.resume: 234 | start_epoch = args.resume 235 | model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth'))) 236 | opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth'))) 237 | logger.info(f'Resuming at epoch {start_epoch}') 238 | else: 239 | start_epoch = 0 240 | 241 | logger.info('Epoch \t Train Time \t Test Time \t LR \t \t Train Loss \t Train Acc \t Train Robust Loss \t Train Robust Acc \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc') 242 | for epoch in range(start_epoch, epochs): 243 | model.train() 244 | start_time = time.time() 245 | train_loss = 0 246 | train_acc = 0 247 | train_robust_loss = 0 248 | train_robust_acc = 0 249 | train_n = 0 250 | for i, (X, y) in enumerate(train_loader): 251 | X, y = X.cuda(), y.cuda() 252 | if args.mixup: 253 | X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha) 254 | X, y_a, y_b = map(Variable, (X, y_a, y_b)) 255 | lr = lr_schedule(epoch + (i + 1) / len(train_loader)) 256 | opt.param_groups[0].update(lr=lr) 257 | 258 | if args.attack == 'pgd': 259 | # Random initialization 260 | if args.mixup: 261 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, mixup=True, y_a=y_a, y_b=y_b, lam=lam) 262 | else: 263 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm) 264 | delta = delta.detach() 265 | 266 | # Standard training 267 | elif args.attack == 'none': 268 | delta = torch.zeros_like(X) 269 | 270 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 271 | if args.mixup: 272 | robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam) 273 | else: 274 | robust_loss = criterion(robust_output, y) 275 | 276 | if args.l1: 277 | for name,param in model.named_parameters(): 278 | if 'bn' not in name and 'bias' not in name: 279 | robust_loss += args.l1*param.abs().sum() 280 | 281 | opt.zero_grad() 282 | robust_loss.backward() 283 | opt.step() 284 | 285 | output = model(normalize(X)) 286 | if args.mixup: 287 | loss = mixup_criterion(criterion, output, y_a, y_b, lam) 288 | else: 289 | loss = criterion(output, y) 290 | 291 | train_robust_loss += robust_loss.item() * y.size(0) 292 | train_robust_acc += (robust_output.max(1)[1] == y).sum().item() 293 | train_loss += loss.item() * y.size(0) 294 | train_acc += (output.max(1)[1] == y).sum().item() 295 | train_n += y.size(0) 296 | 297 | train_time = time.time() 298 | 299 | model.eval() 300 | test_loss = 0 301 | test_acc = 0 302 | test_robust_loss = 0 303 | test_robust_acc = 0 304 | test_n = 0 305 | for i, (X, y) in enumerate(test_loader): 306 | X, y = X.cuda(), y.cuda() 307 | 308 | # Random initialization 309 | if args.attack == 'none': 310 | delta = torch.zeros_like(X) 311 | else: 312 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm) 313 | delta = delta.detach() 314 | 315 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 316 | robust_loss = criterion(robust_output, y) 317 | 318 | output = model(normalize(X)) 319 | loss = criterion(output, y) 320 | 321 | test_robust_loss += robust_loss.item() * y.size(0) 322 | test_robust_acc += (robust_output.max(1)[1] == y).sum().item() 323 | test_loss += loss.item() * y.size(0) 324 | test_acc += (output.max(1)[1] == y).sum().item() 325 | test_n += y.size(0) 326 | 327 | test_time = time.time() 328 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 329 | epoch, train_time - start_time, test_time - train_time, lr, 330 | train_loss/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n, 331 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 332 | torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth')) 333 | torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth')) 334 | 335 | 336 | if __name__ == "__main__": 337 | main() 338 | -------------------------------------------------------------------------------- /train_cifar_semisupervised_half.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import time 5 | import math 6 | import pickle 7 | 8 | import numpy as np 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | from torch.autograd import Variable 13 | 14 | import os 15 | 16 | from wideresnet import WideResNet 17 | from preactresnet import PreActResNet18 18 | 19 | from utils import * 20 | 21 | mu = torch.tensor(cifar10_mean).view(3,1,1).cuda() 22 | std = torch.tensor(cifar10_std).view(3,1,1).cuda() 23 | 24 | def normalize(X): 25 | return (X - mu)/std 26 | 27 | upper_limit, lower_limit = 1,0 28 | 29 | def cifar10_unlabeled(aux_path, aux_take_amount=None): 30 | with open(aux_path, 'rb') as f: 31 | aux = pickle.load(f) 32 | aux_data = aux['data'] 33 | aux_targets = aux['extrapolated_targets'] 34 | if aux_take_amount is not None: 35 | take_inds = np.random.choice(len(aux_data), aux_take_amount, replace=False) 36 | aux_data = aux_data[take_inds] 37 | aux_targets = aux_targets[take_inds] 38 | return { 39 | 'train': {'data': aux_data, 'labels': aux_targets} 40 | } 41 | 42 | def clamp(X, lower_limit, upper_limit): 43 | return torch.max(torch.min(X, upper_limit), lower_limit) 44 | 45 | 46 | class Batches(): 47 | def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False): 48 | self.dataset = dataset 49 | self.batch_size = batch_size 50 | self.set_random_choices = set_random_choices 51 | self.dataloader = torch.utils.data.DataLoader( 52 | dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last 53 | ) 54 | 55 | def __iter__(self): 56 | if self.set_random_choices: 57 | self.dataset.set_random_choices() 58 | return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader) 59 | 60 | def __next__(self): 61 | if self.set_random_choices: 62 | self.dataset.set_random_choices() 63 | x, y = next(iter(self.dataloader)) 64 | return {'input': x.to(device).float(), 'target': y.to(device).long()} 65 | 66 | def __len__(self): 67 | return len(self.dataloader) 68 | 69 | 70 | def mixup_data(x, y, alpha=1.0): 71 | '''Returns mixed inputs, pairs of targets, and lambda''' 72 | if alpha > 0: 73 | lam = np.random.beta(alpha, alpha) 74 | else: 75 | lam = 1 76 | 77 | batch_size = x.size()[0] 78 | index = torch.randperm(batch_size).cuda() 79 | 80 | mixed_x = lam * x + (1 - lam) * x[index, :] 81 | y_a, y_b = y, y[index] 82 | return mixed_x, y_a, y_b, lam 83 | 84 | 85 | def mixup_criterion(criterion, pred, y_a, y_b, lam): 86 | return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) 87 | 88 | 89 | def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, 90 | norm, early_stop=False, 91 | mixup=False, y_a=None, y_b=None, lam=None): 92 | max_loss = torch.zeros(y.shape[0]).cuda() 93 | max_delta = torch.zeros_like(X).cuda() 94 | for _ in range(restarts): 95 | delta = torch.zeros_like(X).cuda() 96 | if norm == "l_inf": 97 | delta.uniform_(-epsilon, epsilon) 98 | elif norm == "l_2": 99 | delta.normal_() 100 | d_flat = delta.view(delta.size(0),-1) 101 | n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1) 102 | r = torch.zeros_like(n).uniform_(0, 1) 103 | delta *= r/n*epsilon 104 | else: 105 | raise ValueError 106 | delta = clamp(delta, lower_limit-X, upper_limit-X) 107 | delta.requires_grad = True 108 | for _ in range(attack_iters): 109 | output = model(normalize(X + delta)) 110 | if early_stop: 111 | index = torch.where(output.max(1)[1] == y)[0] 112 | else: 113 | index = slice(None,None,None) 114 | if not isinstance(index, slice) and len(index) == 0: 115 | break 116 | if mixup: 117 | criterion = nn.CrossEntropyLoss() 118 | loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 119 | else: 120 | loss = F.cross_entropy(output, y) 121 | loss.backward() 122 | grad = delta.grad.detach() 123 | d = delta[index, :, :, :] 124 | g = grad[index, :, :, :] 125 | x = X[index, :, :, :] 126 | if norm == "l_inf": 127 | d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon) 128 | elif norm == "l_2": 129 | g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1) 130 | scaled_g = g/(g_norm + 1e-10) 131 | d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d) 132 | d = clamp(d, lower_limit - x, upper_limit - x) 133 | delta.data[index, :, :, :] = d 134 | delta.grad.zero_() 135 | if mixup: 136 | criterion = nn.CrossEntropyLoss(reduction='none') 137 | all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 138 | else: 139 | all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none') 140 | max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss] 141 | max_loss = torch.max(max_loss, all_loss) 142 | return max_delta 143 | 144 | 145 | def get_args(): 146 | parser = argparse.ArgumentParser() 147 | parser.add_argument('--model', default='PreActResNet18') 148 | parser.add_argument('--l2', default=0, type=float) 149 | parser.add_argument('--l1', default=0, type=float) 150 | parser.add_argument('--batch-size', default=128, type=int) 151 | parser.add_argument('--data-dir', default='../cifar-data', type=str) 152 | parser.add_argument('--epochs', default=200, type=int) 153 | parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise']) 154 | parser.add_argument('--lr-max', default=0.1, type=float) 155 | parser.add_argument('--lr-one-drop', default=0.01, type=float) 156 | parser.add_argument('--lr-drop-epoch', default=100, type=int) 157 | parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'none']) 158 | parser.add_argument('--epsilon', default=8, type=int) 159 | parser.add_argument('--attack-iters', default=10, type=int) 160 | parser.add_argument('--restarts', default=1, type=int) 161 | parser.add_argument('--pgd-alpha', default=2, type=float) 162 | parser.add_argument('--fgsm-alpha', default=1, type=float) 163 | parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2']) 164 | parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous']) 165 | parser.add_argument('--fname', default='cifar_model', type=str) 166 | parser.add_argument('--seed', default=0, type=int) 167 | parser.add_argument('--half', action='store_true') 168 | parser.add_argument('--width-factor', default=10, type=int) 169 | parser.add_argument('--resume', default=0, type=int) 170 | parser.add_argument('--cutout', action='store_true') 171 | parser.add_argument('--cutout-len', type=int) 172 | parser.add_argument('--mixup', action='store_true') 173 | parser.add_argument('--mixup-alpha', type=float) 174 | parser.add_argument('--eval', action='store_true') 175 | parser.add_argument('--val', action='store_true') 176 | parser.add_argument('--chkpt-iters', default=10, type=int) 177 | parser.add_argument('--aux-take-amount', type=int) 178 | parser.add_argument('--aux-path', default='../ti_500K_pseudo_labeled.pickle') 179 | return parser.parse_args() 180 | 181 | 182 | def main(): 183 | args = get_args() 184 | 185 | if not os.path.exists(args.fname): 186 | os.makedirs(args.fname) 187 | 188 | logger = logging.getLogger(__name__) 189 | logging.basicConfig( 190 | format='[%(asctime)s] - %(message)s', 191 | datefmt='%Y/%m/%d %H:%M:%S', 192 | level=logging.DEBUG, 193 | handlers=[ 194 | logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')), 195 | logging.StreamHandler() 196 | ]) 197 | 198 | logger.info(args) 199 | 200 | np.random.seed(args.seed) 201 | torch.manual_seed(args.seed) 202 | torch.cuda.manual_seed(args.seed) 203 | 204 | transforms = [Crop(32, 32), FlipLR()] 205 | if args.cutout: 206 | transforms.append(Cutout(args.cutout_len, args.cutout_len)) 207 | if args.val: 208 | try: 209 | dataset = torch.load("cifar10_validation_split.pth") 210 | except: 211 | print("Couldn't find a dataset with a validation split, did you run " 212 | "generate_validation.py?") 213 | return 214 | val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels'])) 215 | val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=2) 216 | else: 217 | dataset = cifar10(args.data_dir) 218 | unlabeled_dataset = cifar10_unlabeled(args.aux_path, args.aux_take_amount) 219 | 220 | train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.), 221 | dataset['train']['labels'])) 222 | train_set_x = Transform(train_set, transforms) 223 | train_batches = Batches(train_set_x, int(args.batch_size/2), shuffle=True, set_random_choices=True, num_workers=2) 224 | 225 | train_u_set = list(zip(transpose(pad(unlabeled_dataset['train']['data'], 4)/255.), 226 | dataset['train']['labels'])) 227 | train_u_set_x = Transform(train_u_set, transforms) 228 | train_u_batches = Batches(train_u_set_x, int(args.batch_size/2), shuffle=True, set_random_choices=True, num_workers=2) 229 | 230 | test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels'])) 231 | test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=2) 232 | 233 | epsilon = (args.epsilon / 255.) 234 | pgd_alpha = (args.pgd_alpha / 255.) 235 | 236 | if args.model == 'PreActResNet18': 237 | model = PreActResNet18() 238 | elif args.model == 'WideResNet': 239 | model = WideResNet(34, 10, widen_factor=args.width_factor, dropRate=0.0) 240 | else: 241 | raise ValueError("Unknown model") 242 | 243 | model = nn.DataParallel(model).cuda() 244 | model.train() 245 | 246 | if args.l2: 247 | decay, no_decay = [], [] 248 | for name,param in model.named_parameters(): 249 | if 'bn' not in name and 'bias' not in name: 250 | decay.append(param) 251 | else: 252 | no_decay.append(param) 253 | params = [{'params':decay, 'weight_decay':args.l2}, 254 | {'params':no_decay, 'weight_decay': 0 }] 255 | else: 256 | params = model.parameters() 257 | 258 | opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=5e-4) 259 | 260 | criterion = nn.CrossEntropyLoss() 261 | 262 | if args.attack == 'fgsm' and args.fgsm_init == 'previous': 263 | delta = torch.zeros(args.batch_size, 3, 32, 32).cuda() 264 | delta.requires_grad = True 265 | 266 | epochs = args.epochs 267 | 268 | if args.lr_schedule == 'superconverge': 269 | lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0] 270 | elif args.lr_schedule == 'piecewise': 271 | def lr_schedule(t): 272 | if t / args.epochs < 0.5: 273 | return args.lr_max 274 | elif t / args.epochs < 0.75: 275 | return args.lr_max / 10. 276 | else: 277 | return args.lr_max / 100. 278 | 279 | best_test_robust_acc = 0 280 | best_val_robust_acc = 0 281 | if args.resume: 282 | start_epoch = args.resume 283 | model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth'))) 284 | opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth'))) 285 | logger.info(f'Resuming at epoch {start_epoch}') 286 | 287 | best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc'] 288 | if args.val: 289 | best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc'] 290 | else: 291 | start_epoch = 0 292 | 293 | if args.eval: 294 | if not args.resume: 295 | logger.info("No model loaded to evaluate, specify with --resume FNAME") 296 | return 297 | logger.info("[Evaluation mode]") 298 | 299 | logger.info('Epoch \t Train Time \t Test Time \t LR \t \t Train Loss \t Train Acc \t Train Robust Loss \t Train Robust Acc \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc') 300 | for epoch in range(start_epoch, epochs): 301 | model.train() 302 | start_time = time.time() 303 | train_loss = 0 304 | train_acc = 0 305 | train_robust_loss = 0 306 | train_robust_acc = 0 307 | train_n = 0 308 | for i, batch in enumerate(train_batches): 309 | if args.eval: 310 | break 311 | 312 | u_batch = next(train_u_batches) 313 | X_u, y_u = u_batch['input'], u_batch['target'] 314 | 315 | X, y = batch['input'], batch['target'] 316 | X = torch.cat((X, X_u), 0) 317 | y = torch.cat((y, y_u), 0) 318 | if args.mixup: 319 | X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha) 320 | X, y_a, y_b = map(Variable, (X, y_a, y_b)) 321 | lr = lr_schedule(epoch + (i + 1) / len(train_batches)) 322 | opt.param_groups[0].update(lr=lr) 323 | 324 | if args.attack == 'pgd': 325 | # Random initialization 326 | if args.mixup: 327 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, mixup=True, y_a=y_a, y_b=y_b, lam=lam) 328 | else: 329 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm) 330 | delta = delta.detach() 331 | elif args.attack == 'fgsm': 332 | delta = attack_pgd(model, X, y, epsilon, 1.25*epsilon, 1, 1, args.norm) 333 | # Standard training 334 | elif args.attack == 'none': 335 | delta = torch.zeros_like(X) 336 | 337 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 338 | if args.mixup: 339 | robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam) 340 | else: 341 | robust_loss = criterion(robust_output, y) 342 | 343 | if args.l1: 344 | for name,param in model.named_parameters(): 345 | if 'bn' not in name and 'bias' not in name: 346 | robust_loss += args.l1*param.abs().sum() 347 | 348 | opt.zero_grad() 349 | robust_loss.backward() 350 | opt.step() 351 | 352 | output = model(normalize(X)) 353 | if args.mixup: 354 | loss = mixup_criterion(criterion, output, y_a, y_b, lam) 355 | else: 356 | loss = criterion(output, y) 357 | 358 | train_robust_loss += robust_loss.item() * y.size(0) 359 | train_robust_acc += (robust_output.max(1)[1] == y).sum().item() 360 | train_loss += loss.item() * y.size(0) 361 | train_acc += (output.max(1)[1] == y).sum().item() 362 | train_n += y.size(0) 363 | 364 | train_time = time.time() 365 | 366 | model.eval() 367 | test_loss = 0 368 | test_acc = 0 369 | test_robust_loss = 0 370 | test_robust_acc = 0 371 | test_n = 0 372 | for i, batch in enumerate(test_batches): 373 | X, y = batch['input'], batch['target'] 374 | 375 | # Random initialization 376 | if args.attack == 'none': 377 | delta = torch.zeros_like(X) 378 | else: 379 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval) 380 | delta = delta.detach() 381 | 382 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 383 | robust_loss = criterion(robust_output, y) 384 | 385 | output = model(normalize(X)) 386 | loss = criterion(output, y) 387 | 388 | test_robust_loss += robust_loss.item() * y.size(0) 389 | test_robust_acc += (robust_output.max(1)[1] == y).sum().item() 390 | test_loss += loss.item() * y.size(0) 391 | test_acc += (output.max(1)[1] == y).sum().item() 392 | test_n += y.size(0) 393 | 394 | test_time = time.time() 395 | 396 | if args.val: 397 | val_loss = 0 398 | val_acc = 0 399 | val_robust_loss = 0 400 | val_robust_acc = 0 401 | val_n = 0 402 | for i, batch in enumerate(val_batches): 403 | X, y = batch['input'], batch['target'] 404 | 405 | # Random initialization 406 | if args.attack == 'none': 407 | delta = torch.zeros_like(X) 408 | else: 409 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval) 410 | delta = delta.detach() 411 | 412 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 413 | robust_loss = criterion(robust_output, y) 414 | 415 | output = model(normalize(X)) 416 | loss = criterion(output, y) 417 | 418 | val_robust_loss += robust_loss.item() * y.size(0) 419 | val_robust_acc += (robust_output.max(1)[1] == y).sum().item() 420 | val_loss += loss.item() * y.size(0) 421 | val_acc += (output.max(1)[1] == y).sum().item() 422 | val_n += y.size(0) 423 | 424 | if not args.eval: 425 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 426 | epoch, train_time - start_time, test_time - train_time, lr, 427 | train_loss/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n, 428 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 429 | 430 | if args.val: 431 | logger.info('validation %.4f \t %.4f \t %.4f \t %.4f', 432 | val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n) 433 | 434 | if val_robust_acc/val_n > best_val_robust_acc: 435 | torch.save({ 436 | 'state_dict':model.state_dict(), 437 | 'test_robust_acc':test_robust_acc/test_n, 438 | 'test_robust_loss':test_robust_loss/test_n, 439 | 'test_loss':test_loss/test_n, 440 | 'test_acc':test_acc/test_n, 441 | 'val_robust_acc':val_robust_acc/val_n, 442 | 'val_robust_loss':val_robust_loss/val_n, 443 | 'val_loss':val_loss/val_n, 444 | 'val_acc':val_acc/val_n, 445 | }, os.path.join(args.fname, f'model_val.pth')) 446 | best_val_robust_acc = val_robust_acc/val_n 447 | 448 | # save checkpoint 449 | if (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs: 450 | torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth')) 451 | torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth')) 452 | 453 | # save best 454 | if test_robust_acc/test_n > best_test_robust_acc: 455 | torch.save({ 456 | 'state_dict':model.state_dict(), 457 | 'test_robust_acc':test_robust_acc/test_n, 458 | 'test_robust_loss':test_robust_loss/test_n, 459 | 'test_loss':test_loss/test_n, 460 | 'test_acc':test_acc/test_n, 461 | }, os.path.join(args.fname, f'model_best.pth')) 462 | best_test_robust_acc = test_robust_acc/test_n 463 | else: 464 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 465 | epoch, train_time - start_time, test_time - train_time, -1, 466 | -1, -1, -1, -1, 467 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 468 | return 469 | 470 | 471 | if __name__ == "__main__": 472 | main() 473 | -------------------------------------------------------------------------------- /train_svhn.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import time 5 | import math 6 | 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | from torch.autograd import Variable 12 | from torchvision import datasets, transforms 13 | 14 | import os 15 | 16 | from wideresnet import WideResNet 17 | from preactresnet import PreActResNet18 18 | 19 | svhn_mean = (0.5, 0.5, 0.5) 20 | svhn_std = (0.5, 0.5, 0.5) 21 | 22 | mu = torch.tensor(svhn_mean).view(3,1,1).cuda() 23 | std = torch.tensor(svhn_std).view(3,1,1).cuda() 24 | 25 | def normalize(X): 26 | return (X - mu)/std 27 | 28 | upper_limit, lower_limit = 1,0 29 | 30 | 31 | def clamp(X, lower_limit, upper_limit): 32 | return torch.max(torch.min(X, upper_limit), lower_limit) 33 | 34 | def mixup_data(x, y, alpha=1.0): 35 | '''Returns mixed inputs, pairs of targets, and lambda''' 36 | if alpha > 0: 37 | lam = np.random.beta(alpha, alpha) 38 | else: 39 | lam = 1 40 | 41 | batch_size = x.size()[0] 42 | index = torch.randperm(batch_size).cuda() 43 | 44 | mixed_x = lam * x + (1 - lam) * x[index, :] 45 | y_a, y_b = y, y[index] 46 | return mixed_x, y_a, y_b, lam 47 | 48 | 49 | def mixup_criterion(criterion, pred, y_a, y_b, lam): 50 | return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) 51 | 52 | 53 | def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, 54 | norm, early_stop=False, 55 | mixup=False, y_a=None, y_b=None, lam=None): 56 | max_loss = torch.zeros(y.shape[0]).cuda() 57 | max_delta = torch.zeros_like(X).cuda() 58 | for _ in range(restarts): 59 | delta = torch.zeros_like(X).cuda() 60 | if norm == "l_inf": 61 | delta.uniform_(-epsilon, epsilon) 62 | elif norm == "l_2": 63 | delta.uniform_(-0.5,0.5).renorm(p=2, dim=1, maxnorm=epsilon) 64 | else: 65 | raise ValueError 66 | delta = clamp(delta, lower_limit-X, upper_limit-X) 67 | delta.requires_grad = True 68 | for _ in range(attack_iters): 69 | output = model(normalize(X + delta)) 70 | if early_stop: 71 | index = torch.where(output.max(1)[1] == y)[0] 72 | else: 73 | index = slice(None,None,None) 74 | if not isinstance(index, slice) and len(index) == 0: 75 | break 76 | if mixup: 77 | criterion = nn.CrossEntropyLoss() 78 | loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 79 | else: 80 | loss = F.cross_entropy(output, y) 81 | loss.backward() 82 | grad = delta.grad.detach() 83 | d = delta[index, :, :, :] 84 | g = grad[index, :, :, :] 85 | x = X[index, :, :, :] 86 | if norm == "l_inf": 87 | d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon) 88 | elif norm == "l_2": 89 | g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1) 90 | scaled_g = g/(g_norm + 1e-10) 91 | d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d) 92 | d = clamp(d, lower_limit - x, upper_limit - x) 93 | delta.data[index, :, :, :] = d 94 | delta.grad.zero_() 95 | if mixup: 96 | criterion = nn.CrossEntropyLoss(reduction='none') 97 | all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam) 98 | else: 99 | all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none') 100 | max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss] 101 | max_loss = torch.max(max_loss, all_loss) 102 | return max_delta 103 | 104 | 105 | def get_args(): 106 | parser = argparse.ArgumentParser() 107 | parser.add_argument('--model', default='PreActResNet18') 108 | parser.add_argument('--l2', default=0, type=float) 109 | parser.add_argument('--l1', default=0, type=float) 110 | parser.add_argument('--batch-size', default=128, type=int) 111 | parser.add_argument('--data-dir', default='../svhn-data', type=str) 112 | parser.add_argument('--epochs', default=200, type=int) 113 | parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise']) 114 | parser.add_argument('--lr-max', default=0.01, type=float) 115 | parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'none']) 116 | parser.add_argument('--epsilon', default=8, type=int) 117 | parser.add_argument('--attack-iters', default=10, type=int) 118 | parser.add_argument('--restarts', default=1, type=int) 119 | parser.add_argument('--pgd-alpha', default=2, type=float) 120 | parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2']) 121 | parser.add_argument('--fname', default='svhn_model', type=str) 122 | parser.add_argument('--seed', default=0, type=int) 123 | parser.add_argument('--half', action='store_true') 124 | parser.add_argument('--width-factor', default=10, type=int) 125 | parser.add_argument('--resume', default=0, type=int) 126 | parser.add_argument('--cutout', action='store_true') 127 | parser.add_argument('--cutout-len', type=int) 128 | parser.add_argument('--mixup', action='store_true') 129 | parser.add_argument('--mixup-alpha', type=float) 130 | parser.add_argument('--eval', action='store_true') 131 | parser.add_argument('--chkpt-iters', default=10, type=int) 132 | return parser.parse_args() 133 | 134 | 135 | def main(): 136 | args = get_args() 137 | 138 | if not os.path.exists(args.fname): 139 | os.makedirs(args.fname) 140 | 141 | logger = logging.getLogger(__name__) 142 | logging.basicConfig( 143 | format='[%(asctime)s] - %(message)s', 144 | datefmt='%Y/%m/%d %H:%M:%S', 145 | level=logging.DEBUG, 146 | handlers=[ 147 | logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')), 148 | logging.StreamHandler() 149 | ]) 150 | 151 | logger.info(args) 152 | 153 | np.random.seed(args.seed) 154 | torch.manual_seed(args.seed) 155 | torch.cuda.manual_seed(args.seed) 156 | 157 | train_transform = transforms.Compose([ 158 | transforms.ToTensor(), 159 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 160 | ]) 161 | test_transform = transforms.Compose([ 162 | transforms.ToTensor(), 163 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 164 | ]) 165 | num_workers = 2 166 | train_dataset = datasets.SVHN( 167 | args.data_dir, split='train', transform=train_transform, download=True) 168 | test_dataset = datasets.SVHN( 169 | args.data_dir, split='test', transform=test_transform, download=True) 170 | train_loader = torch.utils.data.DataLoader( 171 | dataset=train_dataset, 172 | batch_size=args.batch_size, 173 | shuffle=True, 174 | pin_memory=True, 175 | num_workers=num_workers, 176 | ) 177 | test_loader = torch.utils.data.DataLoader( 178 | dataset=test_dataset, 179 | batch_size=args.batch_size, 180 | shuffle=False, 181 | pin_memory=True, 182 | num_workers=2, 183 | ) 184 | 185 | epsilon = (args.epsilon / 255.) 186 | pgd_alpha = (args.pgd_alpha / 255.) 187 | 188 | # model = models_dict[args.architecture]().cuda() 189 | # model.apply(initialize_weights) 190 | if args.model == 'PreActResNet18': 191 | model = PreActResNet18() 192 | elif args.model == 'WideResNet': 193 | model = WideResNet(34, 10, widen_factor=args.width_factor, dropRate=0.0) 194 | else: 195 | raise ValueError("Unknown model") 196 | 197 | model = model.cuda() 198 | model.train() 199 | 200 | if args.l2: 201 | decay, no_decay = [], [] 202 | for name,param in model.named_parameters(): 203 | if 'bn' not in name and 'bias' not in name: 204 | decay.append(param) 205 | else: 206 | no_decay.append(param) 207 | params = [{'params':decay, 'weight_decay':args.l2}, 208 | {'params':no_decay, 'weight_decay': 0 }] 209 | else: 210 | params = model.parameters() 211 | 212 | opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=5e-4) 213 | 214 | criterion = nn.CrossEntropyLoss() 215 | 216 | epochs = args.epochs 217 | 218 | if args.lr_schedule == 'superconverge': 219 | lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0] 220 | # lr_schedule = lambda t: np.interp([t], [0, args.epochs], [0, args.lr_max])[0] 221 | elif args.lr_schedule == 'piecewise': 222 | def lr_schedule(t): 223 | if t / args.epochs < 0.5: 224 | return args.lr_max 225 | elif t / args.epochs < 0.75: 226 | return args.lr_max / 10. 227 | else: 228 | return args.lr_max / 100. 229 | 230 | if args.resume: 231 | start_epoch = args.resume 232 | model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth'))) 233 | opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth'))) 234 | logger.info(f'Resuming at epoch {start_epoch}') 235 | else: 236 | start_epoch = 0 237 | 238 | 239 | if args.eval: 240 | if not args.resume: 241 | logger.info("No model loaded to evaluate, specify with --resume FNAME") 242 | return 243 | logger.info("[Evaluation mode]") 244 | 245 | logger.info('Epoch \t Train Time \t Test Time \t LR \t \t Train Loss \t Train Acc \t Train Robust Loss \t Train Robust Acc \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc') 246 | for epoch in range(start_epoch, epochs): 247 | model.train() 248 | start_time = time.time() 249 | train_loss = 0 250 | train_acc = 0 251 | train_robust_loss = 0 252 | train_robust_acc = 0 253 | train_n = 0 254 | for i, (X, y) in enumerate(train_loader): 255 | if args.eval: 256 | break 257 | X, y = X.cuda(), y.cuda() 258 | if args.mixup: 259 | X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha) 260 | X, y_a, y_b = map(Variable, (X, y_a, y_b)) 261 | lr = lr_schedule(epoch + (i + 1) / len(train_loader)) 262 | opt.param_groups[0].update(lr=lr) 263 | 264 | if args.attack == 'pgd': 265 | # Random initialization 266 | if args.mixup: 267 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, mixup=True, y_a=y_a, y_b=y_b, lam=lam) 268 | else: 269 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm) 270 | delta = delta.detach() 271 | 272 | # Standard training 273 | elif args.attack == 'none': 274 | delta = torch.zeros_like(X) 275 | 276 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 277 | if args.mixup: 278 | robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam) 279 | else: 280 | robust_loss = criterion(robust_output, y) 281 | 282 | if args.l1: 283 | for name,param in model.named_parameters(): 284 | if 'bn' not in name and 'bias' not in name: 285 | robust_loss += args.l1*param.abs().sum() 286 | 287 | opt.zero_grad() 288 | robust_loss.backward() 289 | opt.step() 290 | 291 | output = model(normalize(X)) 292 | if args.mixup: 293 | loss = mixup_criterion(criterion, output, y_a, y_b, lam) 294 | else: 295 | loss = criterion(output, y) 296 | 297 | train_robust_loss += robust_loss.item() * y.size(0) 298 | train_robust_acc += (robust_output.max(1)[1] == y).sum().item() 299 | train_loss += loss.item() * y.size(0) 300 | train_acc += (output.max(1)[1] == y).sum().item() 301 | train_n += y.size(0) 302 | 303 | train_time = time.time() 304 | 305 | model.eval() 306 | test_loss = 0 307 | test_acc = 0 308 | test_robust_loss = 0 309 | test_robust_acc = 0 310 | test_n = 0 311 | for i, (X, y) in enumerate(test_loader): 312 | X, y = X.cuda(), y.cuda() 313 | 314 | # Random initialization 315 | if args.attack == 'none': 316 | delta = torch.zeros_like(X) 317 | else: 318 | delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval) 319 | delta = delta.detach() 320 | 321 | robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))) 322 | robust_loss = criterion(robust_output, y) 323 | 324 | output = model(normalize(X)) 325 | loss = criterion(output, y) 326 | 327 | test_robust_loss += robust_loss.item() * y.size(0) 328 | test_robust_acc += (robust_output.max(1)[1] == y).sum().item() 329 | test_loss += loss.item() * y.size(0) 330 | test_acc += (output.max(1)[1] == y).sum().item() 331 | test_n += y.size(0) 332 | 333 | test_time = time.time() 334 | if not args.eval: 335 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 336 | epoch, train_time - start_time, test_time - train_time, lr, 337 | train_loss/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n, 338 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 339 | 340 | if (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs: 341 | torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth')) 342 | torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth')) 343 | else: 344 | logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f', 345 | epoch, train_time - start_time, test_time - train_time, -1, 346 | -1, -1, -1, -1, 347 | test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n) 348 | return 349 | 350 | 351 | if __name__ == "__main__": 352 | main() 353 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import namedtuple 3 | import torch 4 | from torch import nn 5 | import torchvision 6 | 7 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 8 | 9 | ################################################################ 10 | ## Components from https://github.com/davidcpage/cifar10-fast ## 11 | ################################################################ 12 | 13 | ##################### 14 | ## data preprocessing 15 | ##################### 16 | 17 | cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255 18 | cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255 19 | 20 | def normalise(x, mean=cifar10_mean, std=cifar10_std): 21 | x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)] 22 | x -= mean*255 23 | x *= 1.0/(255*std) 24 | return x 25 | 26 | def pad(x, border=4): 27 | return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect') 28 | 29 | def transpose(x, source='NHWC', target='NCHW'): 30 | return x.transpose([source.index(d) for d in target]) 31 | 32 | ##################### 33 | ## data augmentation 34 | ##################### 35 | 36 | class Crop(namedtuple('Crop', ('h', 'w'))): 37 | def __call__(self, x, x0, y0): 38 | return x[:,y0:y0+self.h,x0:x0+self.w] 39 | 40 | def options(self, x_shape): 41 | C, H, W = x_shape 42 | return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)} 43 | 44 | def output_shape(self, x_shape): 45 | C, H, W = x_shape 46 | return (C, self.h, self.w) 47 | 48 | class FlipLR(namedtuple('FlipLR', ())): 49 | def __call__(self, x, choice): 50 | return x[:, :, ::-1].copy() if choice else x 51 | 52 | def options(self, x_shape): 53 | return {'choice': [True, False]} 54 | 55 | class Cutout(namedtuple('Cutout', ('h', 'w'))): 56 | def __call__(self, x, x0, y0): 57 | x = x.copy() 58 | x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0) 59 | return x 60 | 61 | def options(self, x_shape): 62 | C, H, W = x_shape 63 | return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)} 64 | 65 | 66 | class Transform(): 67 | def __init__(self, dataset, transforms): 68 | self.dataset, self.transforms = dataset, transforms 69 | self.choices = None 70 | 71 | def __len__(self): 72 | return len(self.dataset) 73 | 74 | def __getitem__(self, index): 75 | data, labels = self.dataset[index] 76 | for choices, f in zip(self.choices, self.transforms): 77 | args = {k: v[index] for (k,v) in choices.items()} 78 | data = f(data, **args) 79 | return data, labels 80 | 81 | def set_random_choices(self): 82 | self.choices = [] 83 | x_shape = self.dataset[0][0].shape 84 | N = len(self) 85 | for t in self.transforms: 86 | options = t.options(x_shape) 87 | x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape 88 | self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()}) 89 | 90 | ##################### 91 | ## dataset 92 | ##################### 93 | 94 | def cifar10(root): 95 | train_set = torchvision.datasets.CIFAR10(root=root, train=True, download=True) 96 | test_set = torchvision.datasets.CIFAR10(root=root, train=False, download=True) 97 | return { 98 | 'train': {'data': train_set.data, 'labels': train_set.targets}, 99 | 'test': {'data': test_set.data, 'labels': test_set.targets} 100 | } 101 | 102 | ##################### 103 | ## data loading 104 | ##################### 105 | 106 | class Batches(): 107 | def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False): 108 | self.dataset = dataset 109 | self.batch_size = batch_size 110 | self.set_random_choices = set_random_choices 111 | self.dataloader = torch.utils.data.DataLoader( 112 | dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last 113 | ) 114 | 115 | def __iter__(self): 116 | if self.set_random_choices: 117 | self.dataset.set_random_choices() 118 | return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader) 119 | 120 | def __len__(self): 121 | return len(self.dataloader) -------------------------------------------------------------------------------- /wideresnet.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class BasicBlock(nn.Module): 8 | def __init__(self, in_planes, out_planes, stride, dropRate=0.0): 9 | super(BasicBlock, self).__init__() 10 | self.bn1 = nn.BatchNorm2d(in_planes) 11 | self.relu1 = nn.ReLU(inplace=True) 12 | self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 13 | padding=1, bias=False) 14 | self.bn2 = nn.BatchNorm2d(out_planes) 15 | self.relu2 = nn.ReLU(inplace=True) 16 | self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, 17 | padding=1, bias=False) 18 | self.droprate = dropRate 19 | self.equalInOut = (in_planes == out_planes) 20 | self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, 21 | padding=0, bias=False) or None 22 | def forward(self, x): 23 | if not self.equalInOut: 24 | x = self.relu1(self.bn1(x)) 25 | else: 26 | out = self.relu1(self.bn1(x)) 27 | out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) 28 | if self.droprate > 0: 29 | out = F.dropout(out, p=self.droprate, training=self.training) 30 | out = self.conv2(out) 31 | return torch.add(x if self.equalInOut else self.convShortcut(x), out) 32 | 33 | class NetworkBlock(nn.Module): 34 | def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): 35 | super(NetworkBlock, self).__init__() 36 | self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) 37 | def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): 38 | layers = [] 39 | for i in range(int(nb_layers)): 40 | layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate)) 41 | return nn.Sequential(*layers) 42 | def forward(self, x): 43 | return self.layer(x) 44 | 45 | class WideResNet(nn.Module): 46 | def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0): 47 | super(WideResNet, self).__init__() 48 | nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor] 49 | assert((depth - 4) % 6 == 0) 50 | n = (depth - 4) / 6 51 | block = BasicBlock 52 | # 1st conv before any network block 53 | self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, 54 | padding=1, bias=False) 55 | # 1st block 56 | self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) 57 | # 2nd block 58 | self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) 59 | # 3rd block 60 | self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) 61 | # global average pooling and classifier 62 | self.bn1 = nn.BatchNorm2d(nChannels[3]) 63 | self.relu = nn.ReLU(inplace=True) 64 | self.fc = nn.Linear(nChannels[3], num_classes) 65 | self.nChannels = nChannels[3] 66 | 67 | for m in self.modules(): 68 | if isinstance(m, nn.Conv2d): 69 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 70 | elif isinstance(m, nn.BatchNorm2d): 71 | m.weight.data.fill_(1) 72 | m.bias.data.zero_() 73 | elif isinstance(m, nn.Linear): 74 | m.bias.data.zero_() 75 | def forward(self, x): 76 | out = self.conv1(x) 77 | out = self.block1(out) 78 | out = self.block2(out) 79 | out = self.block3(out) 80 | out = self.relu(self.bn1(out)) 81 | out = F.avg_pool2d(out, 8) 82 | out = out.view(-1, self.nChannels) 83 | return self.fc(out) 84 | --------------------------------------------------------------------------------