├── .github └── ISSUE_TEMPLATE │ ├── new-external-evaluations.md │ └── new-model.md ├── .gitignore ├── CITATION.bib ├── LICENSE ├── MANIFEST.in ├── README.md ├── images ├── aa_robustness_overestimation_vs_venues.png ├── aa_robustness_vs_reported.png ├── aa_robustness_vs_standard.png ├── aa_robustness_vs_venues.png ├── aa_robustness_vs_years.png ├── leaderboard_screenshot_corruptions.png ├── leaderboard_screenshot_l2.png ├── leaderboard_screenshot_linf.png └── plots_analysis_jsons.png ├── model_info ├── cifar10 │ ├── L2 │ │ ├── Amini2024MeanSparse_S-WRN-70-16.json │ │ ├── Augustin2020Adversarial.json │ │ ├── Augustin2020Adversarial_34_10.json │ │ ├── Augustin2020Adversarial_34_10_extra.json │ │ ├── Ding2020MMA.json │ │ ├── Engstrom2019Robustness.json │ │ ├── Gowal2020Uncovering.json │ │ ├── Gowal2020Uncovering_extra.json │ │ ├── Rade2021Helper_R18_ddpm.json │ │ ├── Rebuffi2021Fixing_28_10_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_extra.json │ │ ├── Rebuffi2021Fixing_R18_cutmix_ddpm.json │ │ ├── Rice2020Overfitting.json │ │ ├── Rony2019Decoupling.json │ │ ├── Sehwag2021Proxy.json │ │ ├── Sehwag2021Proxy_R18.json │ │ ├── Standard.json │ │ ├── Wang2023Better_WRN-28-10.json │ │ ├── Wang2023Better_WRN-70-16.json │ │ └── Wu2020Adversarial.json │ ├── Linf │ │ ├── Addepalli2021Towards_RN18.json │ │ ├── Addepalli2021Towards_WRN34.json │ │ ├── Addepalli2022Efficient_RN18.json │ │ ├── Addepalli2022Efficient_WRN_34_10.json │ │ ├── Alayrac2019Labels.json │ │ ├── Alfarra2020ClusTR.json │ │ ├── Amini2024MeanSparse_Ra_WRN_70_16.json │ │ ├── Amini2024MeanSparse_S-WRN-94-16.json │ │ ├── Andriushchenko2020Understanding.json │ │ ├── Atzmon2019Controlling.json │ │ ├── Bai2023Improving_edm.json │ │ ├── Bai2024MixedNUTS.json │ │ ├── Bartoldson2024Adversarial_WRN-82-8.json │ │ ├── Bartoldson2024Adversarial_WRN-94-16.json │ │ ├── Carmon2019Unlabeled.json │ │ ├── Chan2020Jacobian.json │ │ ├── Chen2020Adversarial.json │ │ ├── Chen2020Efficient.json │ │ ├── Chen2021LTD_WRN34_10.json │ │ ├── Chen2021LTD_WRN34_20.json │ │ ├── Chen2024Data_WRN_34_10.json │ │ ├── Chen2024Data_WRN_34_20.json │ │ ├── Cui2020Learnable_34_10.json │ │ ├── Cui2020Learnable_34_20.json │ │ ├── Cui2023Decoupled_WRN-28-10.json │ │ ├── Cui2023Decoupled_WRN-34-10.json │ │ ├── Dai2021Parameterizing.json │ │ ├── Debenedetti2022Light_XCiT-L12.json │ │ ├── Debenedetti2022Light_XCiT-M12.json │ │ ├── Debenedetti2022Light_XCiT-S12.json │ │ ├── Ding2020MMA.json │ │ ├── Engstrom2019Robustness.json │ │ ├── Gowal2020Uncovering_28_10_extra.json │ │ ├── Gowal2020Uncovering_34_20.json │ │ ├── Gowal2020Uncovering_70_16.json │ │ ├── Gowal2020Uncovering_70_16_extra.json │ │ ├── Gowal2021Improving_28_10_ddpm_100m.json │ │ ├── Gowal2021Improving_70_16_ddpm_100m.json │ │ ├── Gowal2021Improving_R18_ddpm_100m.json │ │ ├── Hendrycks2019Using.json │ │ ├── Huang2020Self.json │ │ ├── Huang2021Exploring.json │ │ ├── Huang2021Exploring_ema.json │ │ ├── Huang2022Revisiting_WRN-A4.json │ │ ├── Jang2019Adversarial.json │ │ ├── Jia2022LAS-AT_34_10.json │ │ ├── Jia2022LAS-AT_70_16.json │ │ ├── JinRinard2020Manifold.json │ │ ├── Kang2021Stable.json │ │ ├── KimWang2020Sensible.json │ │ ├── Kumari2019Harnessing.json │ │ ├── Kundu2020Tunable.json │ │ ├── Madry2018Towards.json │ │ ├── Mao2019Metric.json │ │ ├── Moosavi-Dezfooli2019Robustness.json │ │ ├── Mustafa2019Adversarial.json │ │ ├── Pang2020Bag.json │ │ ├── Pang2020Boosting.json │ │ ├── Pang2020Rethinking.json │ │ ├── Pang2022Robustness_WRN28_10.json │ │ ├── Pang2022Robustness_WRN70_16.json │ │ ├── Peng2023Robust.json │ │ ├── Qin2019Adversarial.json │ │ ├── Rade2021Helper_R18_ddpm.json │ │ ├── Rade2021Helper_R18_extra.json │ │ ├── Rade2021Helper_ddpm.json │ │ ├── Rade2021Helper_extra.json │ │ ├── Rebuffi2021Fixing_106_16_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_28_10_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_extra.json │ │ ├── Rebuffi2021Fixing_R18_ddpm.json │ │ ├── Rice2020Overfitting.json │ │ ├── Sehwag2020Hydra.json │ │ ├── Sehwag2021Proxy.json │ │ ├── Sehwag2021Proxy_R18.json │ │ ├── Sehwag2021Proxy_ResNest152.json │ │ ├── Shafahi2019Adversarial.json │ │ ├── Sitawarin2020Improving.json │ │ ├── Sridhar2021Robust.json │ │ ├── Sridhar2021Robust_34_15.json │ │ ├── Standard.json │ │ ├── Wang2020Improving.json │ │ ├── Wang2023Better_WRN-28-10.json │ │ ├── Wang2023Better_WRN-70-16.json │ │ ├── WangZhang2019Bilateral.json │ │ ├── Wong2020Fast.json │ │ ├── Wu2020Adversarial.json │ │ ├── Wu2020Adversarial_extra.json │ │ ├── Wu2020Do.json │ │ ├── Xiao2020Enhancing.json │ │ ├── Xu2023Exploring_WRN-28-10.json │ │ ├── Zhang2019Theoretically.json │ │ ├── Zhang2019You.json │ │ ├── Zhang2020Attacks.json │ │ ├── Zhang2020Geometry.json │ │ ├── Zhang2020Towards.json │ │ ├── ZhangWang2019Defense.json │ │ └── ZhangXu2020Adversarial.json │ └── corruptions │ │ ├── Addepalli2021Towards_WRN34.json │ │ ├── Addepalli2022Efficient_WRN_34_10.json │ │ ├── Calian2021Defending.json │ │ ├── Diffenderfer2021Winning_Binary.json │ │ ├── Diffenderfer2021Winning_Binary_CARD_Deck.json │ │ ├── Diffenderfer2021Winning_LRR.json │ │ ├── Diffenderfer2021Winning_LRR_CARD_Deck.json │ │ ├── Erichson2022NoisyMix.json │ │ ├── Gowal2020Uncovering_70_16_L2.json │ │ ├── Gowal2020Uncovering_70_16_Linf.json │ │ ├── Gowal2020Uncovering_70_16_extra_L2.json │ │ ├── Gowal2020Uncovering_70_16_extra_Linf.json │ │ ├── Hendrycks2020AugMix_ResNeXt.json │ │ ├── Hendrycks2020AugMix_WRN.json │ │ ├── Kireev2021Effectiveness_AugMixNoJSD.json │ │ ├── Kireev2021Effectiveness_Gauss50percent.json │ │ ├── Kireev2021Effectiveness_RLAT.json │ │ ├── Kireev2021Effectiveness_RLATAugMix.json │ │ ├── Kireev2021Effectiveness_RLATAugMixNoJSD.json │ │ ├── Modas2021PRIMEResNet18.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_extra_L2.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_extra_Linf.json │ │ ├── Standard.json │ │ └── unaggregated_results.csv ├── cifar100 │ ├── Linf │ │ ├── Addepalli2021Towards_PARN18.json │ │ ├── Addepalli2021Towards_WRN34.json │ │ ├── Addepalli2022Efficient_RN18.json │ │ ├── Addepalli2022Efficient_WRN_34_10.json │ │ ├── Amini2024MeanSparse_S-WRN-70-16.json │ │ ├── Bai2023Improving_edm.json │ │ ├── Bai2023Improving_trades.json │ │ ├── Bai2024MixedNUTS.json │ │ ├── Chen2020Efficient.json │ │ ├── Chen2021LTD_WRN34_10.json │ │ ├── Chen2024Data_WRN_34_10.json │ │ ├── Cui2020Learnable_34_10_LBGAT0.json │ │ ├── Cui2020Learnable_34_10_LBGAT6.json │ │ ├── Cui2020Learnable_34_10_LBGAT9_eps_8_255.json │ │ ├── Cui2020Learnable_34_20_LBGAT6.json │ │ ├── Cui2023Decoupled_WRN-28-10.json │ │ ├── Cui2023Decoupled_WRN-34-10.json │ │ ├── Cui2023Decoupled_WRN-34-10_autoaug.json │ │ ├── Debenedetti2022Light_XCiT-L12.json │ │ ├── Debenedetti2022Light_XCiT-M12.json │ │ ├── Debenedetti2022Light_XCiT-S12.json │ │ ├── Gowal2020Uncovering.json │ │ ├── Gowal2020Uncovering_extra.json │ │ ├── Hendrycks2019Using.json │ │ ├── Jia2022LAS-AT_34_10.json │ │ ├── Jia2022LAS-AT_34_20.json │ │ ├── Pang2022Robustness_WRN28_10.json │ │ ├── Pang2022Robustness_WRN70_16.json │ │ ├── Rade2021Helper_R18_ddpm.json │ │ ├── Rebuffi2021Fixing_28_10_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_70_16_cutmix_ddpm.json │ │ ├── Rebuffi2021Fixing_R18_ddpm.json │ │ ├── Rice2020Overfitting.json │ │ ├── Sehwag2021Proxy.json │ │ ├── Sitawarin2020Improving.json │ │ ├── Wang2023Better_WRN-28-10.json │ │ ├── Wang2023Better_WRN-70-16.json │ │ └── Wu2020Adversarial.json │ └── corruptions │ │ ├── Addepalli2021Towards_PARN18.json │ │ ├── Addepalli2021Towards_WRN34.json │ │ ├── Addepalli2022Efficient_WRN_34_10.json │ │ ├── Diffenderfer2021Winning_Binary.json │ │ ├── Diffenderfer2021Winning_Binary_CARD_Deck.json │ │ ├── Diffenderfer2021Winning_LRR.json │ │ ├── Diffenderfer2021Winning_LRR_CARD_Deck.json │ │ ├── Erichson2022NoisyMix.json │ │ ├── Gowal2020Uncovering_Linf.json │ │ ├── Gowal2020Uncovering_extra_Linf.json │ │ ├── Hendrycks2020AugMix_ResNeXt.json │ │ ├── Hendrycks2020AugMix_WRN.json │ │ ├── Modas2021PRIMEResNet18.json │ │ └── unaggregated_results.csv └── imagenet │ ├── Linf │ ├── Amini2024MeanSparse_ConvNeXt-L.json │ ├── Amini2024MeanSparse_Swin-L.json │ ├── Bai2024MixedNUTS.json │ ├── Chen2024Data_WRN_50_2.json │ ├── Debenedetti2022Light_XCiT-L12.json │ ├── Debenedetti2022Light_XCiT-M12.json │ ├── Debenedetti2022Light_XCiT-S12.json │ ├── Engstrom2019Robustness.json │ ├── Liu2023Comprehensive_ConvNeXt-B.json │ ├── Liu2023Comprehensive_ConvNeXt-L.json │ ├── Liu2023Comprehensive_Swin-B.json │ ├── Liu2023Comprehensive_Swin-L.json │ ├── Mo2022When_Swin-B.json │ ├── Mo2022When_ViT-B.json │ ├── Peng2023Robust.json │ ├── RodriguezMunoz2024Characterizing_Swin-B.json │ ├── RodriguezMunoz2024Characterizing_Swin-L.json │ ├── Salman2020Do_50_2.json │ ├── Salman2020Do_R18.json │ ├── Salman2020Do_R50.json │ ├── Singh2023Revisiting_ConvNeXt-B-ConvStem.json │ ├── Singh2023Revisiting_ConvNeXt-L-ConvStem.json │ ├── Singh2023Revisiting_ConvNeXt-S-ConvStem.json │ ├── Singh2023Revisiting_ConvNeXt-T-ConvStem.json │ ├── Singh2023Revisiting_ViT-B-ConvStem.json │ ├── Singh2023Revisiting_ViT-S-ConvStem.json │ ├── Standard_R50.json │ ├── Wong2020Fast.json │ ├── Xu2024MIMIR_Swin-B.json │ └── Xu2024MIMIR_Swin-L.json │ └── corruptions │ ├── AlexNet.json │ ├── Erichson2022NoisyMix.json │ ├── Erichson2022NoisyMix_new.json │ ├── Geirhos2018_SIN.json │ ├── Geirhos2018_SIN_IN.json │ ├── Geirhos2018_SIN_IN_IN.json │ ├── Hendrycks2020AugMix.json │ ├── Hendrycks2020Many.json │ ├── Salman2020Do_50_2_Linf.json │ ├── Standard_R50.json │ ├── Tian2022Deeper_DeiT-B.json │ ├── Tian2022Deeper_DeiT-S.json │ ├── unaggregated_results.csv │ └── unaggregated_results_3d.csv ├── requirements.txt ├── robustbench ├── __init__.py ├── data.py ├── eval.py ├── helper_files │ ├── imagenet_class_to_id_map.json │ └── imagenet_test_image_ids.txt ├── leaderboard │ ├── __init__.py │ ├── leaderboard.html.j2 │ └── template.py ├── loaders.py ├── model_zoo │ ├── __init__.py │ ├── architectures │ │ ├── CARD_resnet.py │ │ ├── MIMIR_swin_transformer.py │ │ ├── Meansparse_ra_wrn_70_16.py │ │ ├── Meansparse_swin_L.py │ │ ├── Meansparse_wrn_70_16.py │ │ ├── Meansparse_wrn_94_16.py │ │ ├── __init__.py │ │ ├── alexnet.py │ │ ├── bit_rn.py │ │ ├── bit_rn_v2.py │ │ ├── boosting_wide_resnet.py │ │ ├── comp_model.py │ │ ├── convnext_v2.py │ │ ├── convstem_models.py │ │ ├── deit.py │ │ ├── dm_rn.py │ │ ├── dm_wide_resnet.py │ │ ├── mixing_net.py │ │ ├── optimal_spca.yaml │ │ ├── output_maps.py │ │ ├── paf_wide_resnet.py │ │ ├── resnest.py │ │ ├── resnet.py │ │ ├── resnext.py │ │ ├── robust_resnet.py │ │ ├── robust_wide_resnet.py │ │ ├── robustarch_wide_resnet.py │ │ ├── sodef_layers.py │ │ ├── sparsified_model.py │ │ ├── utils_architectures.py │ │ ├── wide_resnet.py │ │ └── xcit.py │ ├── cifar10.py │ ├── cifar100.py │ ├── enums.py │ ├── imagenet.py │ └── models.py ├── utils.py └── zenodo_download.py ├── setup.py └── tests ├── __init__.py ├── config.py ├── example_eval_imagenet_corruptions.sh ├── test_clean_acc.py ├── test_eval.py ├── test_utils.py ├── utils.py └── utils_testing.py /.github/ISSUE_TEMPLATE/new-external-evaluations.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New external evaluation(s) 3 | about: To submit new evaluations of models in the Model Zoo 4 | title: "[New evaluation] <>" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Target model 11 | + **Model identifier(s):** 12 | + **Leaderboard(s):** 13 | 14 | ## New evaluation 15 | + **Link to adversarial images:** 16 | + **Robust accuracy:** 17 | + **Author(s):** 18 | + **Description of the method used:** 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/new-model.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New Model(s) 3 | about: To ask for the addition of new models to the leaderbord and/or model zoo 4 | title: "[New Model] " 5 | labels: new-model 6 | assignees: '' 7 | 8 | --- 9 | 10 | # Paper Information 11 | 12 | - **Paper Title**: 13 | - **Paper URL**: 14 | - **Paper authors**: 15 | 16 | # Leaderboard Claim(s) 17 | 18 | Add here the claim for your model(s). Copy and paste the following subsection for the number of models you want to add. 19 | 20 | ## Model 1 21 | 22 | - **Architecture**: 23 | - **Threat Model**: One of {L2,Linf,Common Corruptions} 24 | - **eps**: (N/A for common corruptions) 25 | - **Clean accuracy**: 26 | - **Robust accuracy**: 27 | - **Additional data**: true/false 28 | - **Evaluation method**: 29 | - **Checkpoint and code**: (insert here the URL for the checkpoint and the code. If you don't want 30 | to share your checkpoint publicly, you can share it for a private submission via an email 31 | to `adversarial.benchmark@gmail.com`). 32 | 33 | ## Model 2 34 | 35 | ... 36 | 37 | # Model Zoo: 38 | 39 | - [ ] I want to add my models to the Model Zoo (check if true) 40 | - [ ] I use an architecture that is included among 41 | those [here](https://github.com/RobustBench/robustbench/tree/master/robustbench/model_zoo/architectures) or in `timm`. If not, I added the link to the architecture implementation so that it can be added. 42 | - [ ] I agree to release my model(s) under MIT license (check if true) **OR** under a custom license, located here: (put the custom license URL here if a custom license is needed. If no URL is specified, we assume that you are fine with MIT) 43 | -------------------------------------------------------------------------------- /CITATION.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{croce2021robustbench, 2 | title = {{RobustBench: a standardized adversarial robustness benchmark}}, 3 | author = {Croce, Francesco and Andriushchenko, Maksym and Sehwag, Vikash and Debenedetti, Edoardo and Flammarion, Nicolas and Chiang, Mung and Mittal, Prateek and Hein, Matthias}, 4 | booktitle = {Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track}, 5 | year = {2021}, 6 | url = {https://openreview.net/forum?id=SSKZPJCt7B} 7 | } -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include robustbench/helper_files/imagenet_class_to_id_map.json 2 | include robustbench/helper_files/imagenet_test_image_ids.txt 3 | include robustbench/model_zoo/architectures/optimal_spca.yaml 4 | -------------------------------------------------------------------------------- /images/aa_robustness_overestimation_vs_venues.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/aa_robustness_overestimation_vs_venues.png -------------------------------------------------------------------------------- /images/aa_robustness_vs_reported.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/aa_robustness_vs_reported.png -------------------------------------------------------------------------------- /images/aa_robustness_vs_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/aa_robustness_vs_standard.png -------------------------------------------------------------------------------- /images/aa_robustness_vs_venues.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/aa_robustness_vs_venues.png -------------------------------------------------------------------------------- /images/aa_robustness_vs_years.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/aa_robustness_vs_years.png -------------------------------------------------------------------------------- /images/leaderboard_screenshot_corruptions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/leaderboard_screenshot_corruptions.png -------------------------------------------------------------------------------- /images/leaderboard_screenshot_l2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/leaderboard_screenshot_l2.png -------------------------------------------------------------------------------- /images/leaderboard_screenshot_linf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/leaderboard_screenshot_linf.png -------------------------------------------------------------------------------- /images/plots_analysis_jsons.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/images/plots_analysis_jsons.png -------------------------------------------------------------------------------- /model_info/cifar10/L2/Amini2024MeanSparse_S-WRN-70-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "95.51", 12 | "reported": "87.28", 13 | "autoattack_acc": "87.28", 14 | "external": "84.33", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained model Wang2023Better_WRN-70-16. 84.33% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar10/L2/Augustin2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.09461", 3 | "name": "Adversarial Robustness on In- and Out-Distribution Improves Explainability", 4 | "authors": "Maximilian Augustin, Alexander Meinke, Matthias Hein", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2020", 9 | "architecture": "ResNet-50", 10 | "eps": "0.5", 11 | "clean_acc": "91.08", 12 | "reported": "73.27", 13 | "autoattack_acc": "72.91", 14 | "footnote": "Extra data used only as OOD dataset." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Augustin2020Adversarial_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.09461", 3 | "name": "Adversarial Robustness on In- and Out-Distribution Improves Explainability", 4 | "authors": "Maximilian Augustin, Alexander Meinke, Matthias Hein", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.5", 11 | "clean_acc": "92.23", 12 | "reported": "76.25", 13 | "autoattack_acc": "76.25", 14 | "footnote": "Extra data used only as OOD dataset.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Augustin2020Adversarial_34_10_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.09461", 3 | "name": "Adversarial Robustness on In- and Out-Distribution Improves Explainability", 4 | "authors": "Maximilian Augustin, Alexander Meinke, Matthias Hein", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.5", 11 | "clean_acc": "93.96", 12 | "reported": "78.79", 13 | "autoattack_acc": "78.79", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Ding2020MMA.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=HkeryxBtPB", 3 | "name": "MMA Training: Direct Input Space Margin Maximization through Adversarial Training", 4 | "authors": "Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, Ruitong Huang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-28-4", 10 | "eps": "0.5", 11 | "clean_acc": "88.02", 12 | "reported": "66.18", 13 | "autoattack_acc": "66.09", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Engstrom2019Robustness.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/MadryLab/robustness", 3 | "name": "Robustness library", 4 | "authors": "Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, Dimitris Tsipras", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "GitHub,
Sep 2019", 9 | "architecture": "ResNet-50", 10 | "eps": "0.5", 11 | "clean_acc": "90.83", 12 | "reported": "70.11", 13 | "autoattack_acc": "69.24", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Gowal2020Uncovering.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "90.90", 12 | "reported": "74.50", 13 | "autoattack_acc": "74.50", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Gowal2020Uncovering_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "94.74", 12 | "reported": "80.53", 13 | "autoattack_acc": "80.53", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rade2021Helper_R18_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "0.5", 11 | "clean_acc": "90.57", 12 | "reported": "76.15", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "76.15", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rebuffi2021Fixing_28_10_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "0.5", 11 | "clean_acc": "91.79", 12 | "reported": "78.80", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "78.80", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rebuffi2021Fixing_70_16_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "92.41", 12 | "reported": "80.42", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "80.42", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rebuffi2021Fixing_70_16_cutmix_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "95.74", 12 | "reported": "82.32", 13 | "footnote": "", 14 | "autoattack_acc": "82.32", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rebuffi2021Fixing_R18_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "0.5", 11 | "clean_acc": "90.33", 12 | "reported": "75.86", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "75.86", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rice2020Overfitting.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.11569", 3 | "name": "Overfitting in adversarially robust deep learning", 4 | "authors": "Leslie Rice, Eric Wong, J. Zico Kolter", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2020", 9 | "architecture": "PreActResNet-18", 10 | "eps": "0.5", 11 | "clean_acc": "88.67", 12 | "reported": "71.6", 13 | "autoattack_acc": "67.68", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Rony2019Decoupling.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1811.09600", 3 | "name": "Decoupling Direction and Norm for Efficient Gradient-Based L2 Adversarial Attacks and Defenses", 4 | "authors": "J\u00e9r\u00f4me Rony, Luiz G. Hafemann, Luiz S. Oliveira, Ismail Ben Ayed, Robert Sabourin, Eric Granger", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "CVPR 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "0.5", 11 | "clean_acc": "89.05", 12 | "reported": "67.6", 13 | "autoattack_acc": "66.44" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Sehwag2021Proxy.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.5", 11 | "clean_acc": "90.93", 12 | "reported": "77.24", 13 | "autoattack_acc": "77.24", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Sehwag2021Proxy_R18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2022", 9 | "architecture": "ResNet-18", 10 | "eps": "0.5", 11 | "clean_acc": "89.76", 12 | "reported": "74.41", 13 | "autoattack_acc": "74.41", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/L2/Standard.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/RobustBench/robustbench/", 3 | "name": "Standardly trained model", 4 | "authors": "", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "N/A", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "0.5", 11 | "clean_acc": "94.78", 12 | "reported": "0.0", 13 | "autoattack_acc": "0.0" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/L2/Wang2023Better_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "0.5", 11 | "clean_acc": "95.16", 12 | "reported": "83.68", 13 | "autoattack_acc": "83.68", 14 | "footnote": "It uses additional 50M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/L2/Wang2023Better_WRN-70-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "0.5", 11 | "clean_acc": "95.54", 12 | "reported": "84.97", 13 | "autoattack_acc": "84.97", 14 | "footnote": "It uses additional 50M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/L2/Wu2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2004.05884", 3 | "name": "Adversarial Weight Perturbation Helps Robust Generalization", 4 | "authors": "Dongxian Wu, Shu-tao Xia, Yisen Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.5", 11 | "clean_acc": "88.51", 12 | "reported": "73.66", 13 | "autoattack_acc": "73.66", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Addepalli2021Towards_RN18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2022", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "80.24", 12 | "reported": "51.06", 13 | "autoattack_acc": "51.06", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Addepalli2021Towards_WRN34.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "85.32", 12 | "reported": "58.04", 13 | "autoattack_acc": "58.04", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Addepalli2022Efficient_RN18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "85.71", 12 | "reported": "52.50", 13 | "autoattack_acc": "52.48", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Addepalli2022Efficient_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "88.71", 12 | "reported": "57.81", 13 | "autoattack_acc": "57.81", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Alayrac2019Labels.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.13725", 3 | "name": "Are Labels Required for Improving Adversarial Robustness?", 4 | "authors": "Jonathan Uesato, Jean-Baptiste Alayrac, Po-Sen Huang, Robert Stanforth, Alhussein Fawzi, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.46", 12 | "reported": "56.30", 13 | "autoattack_acc": "56.03" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Alfarra2020ClusTR.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2006.07682", 3 | "name": "ClusTR: Clustering Training for Robustness\n", 4 | "authors": "Motasem Alfarra, Juan C. Perez, Adel Bibi, Ali Thabet, Pablo Arbelaez, Bernard Ghanem", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Jun 2020", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "91.03", 12 | "reported": "74.04", 13 | "autoattack_acc": "0.00" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Amini2024MeanSparse_Ra_WRN_70_16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse RaWideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "93.24", 12 | "reported": "72.08", 13 | "autoattack_acc": "72.08", 14 | "external": "68.94", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained model Peng2023Robust. 68.94% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Amini2024MeanSparse_S-WRN-94-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse WideResNet-94-16", 10 | "eps": "8/255", 11 | "clean_acc": "93.60", 12 | "reported": "75.28", 13 | "autoattack_acc": "75.28", 14 | "external": "73.10", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained model Bartoldson2024Adversarial_WRN-94-16. 73.10% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Andriushchenko2020Understanding.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2007.02617", 3 | "name": "Understanding and Improving Fast Adversarial Training", 4 | "authors": "Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "79.84", 12 | "reported": "44.54", 13 | "footnote": "Focuses on fast adversarial training.", 14 | "autoattack_acc": "43.93" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Atzmon2019Controlling.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.11911", 3 | "name": "Controlling Neural Level Sets", 4 | "authors": "Matan Atzmon, Niv Haim, Lior Yariv, Ofer Israelov, Haggai Maron, Yaron Lipman", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "ResNet-18", 10 | "eps": "0.031", 11 | "clean_acc": "81.30", 12 | "reported": "43.17", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255.", 14 | "autoattack_acc": "40.22" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Bai2023Improving_edm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2301.12554", 3 | "name": "Improving the Accuracy-Robustness Trade-off of Classifiers via Adaptive Smoothing", 4 | "authors": "Yatong Bai, Brendon G Anderson, Aerin Kim, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "cifar10", 8 | "venue": "SIMODS 2024", 9 | "architecture": "ResNet-152 + WideResNet-70-16 + mixing network", 10 | "eps": "8/255", 11 | "clean_acc": "95.23", 12 | "reported": "68.06", 13 | "autoattack_acc": "68.06", 14 | "footnote": "It uses an ensemble of networks. The robust base classifier uses 50M synthetic images.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Bai2024MixedNUTS.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2402.02263", 3 | "name": "MixedNUTS: Training-Free Accuracy-Robustness Balance via Nonlinearly Mixed Classifiers", 4 | "authors": "Yatong Bai, Mo Zhou, Vishal M. Patel, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "cifar10", 8 | "venue": "TMLR, Aug 2024", 9 | "architecture": "ResNet-152 + WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "95.19", 12 | "reported": "69.71", 13 | "autoattack_acc": "70.08", 14 | "external": "69.71", 15 | "footnote": "It uses an ensemble of networks. The robust base classifier uses 50M synthetic images. 69.71% robust accuracy is due to the original evaluation (Adaptive AutoAttack)", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Bartoldson2024Adversarial_WRN-82-8.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2404.09349", 3 | "name": "Adversarial Robustness Limits via Scaling-Law and Human-Alignment Studies", 4 | "authors": "Brian Bartoldson, James Diffenderfer, Konstantinos Parasyris, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2024", 9 | "architecture": "WideResNet-82-8", 10 | "eps": "8/255", 11 | "clean_acc": "93.11", 12 | "reported": "71.59", 13 | "autoattack_acc": "71.59", 14 | "footnote": "It uses additional 300M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Bartoldson2024Adversarial_WRN-94-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2404.09349", 3 | "name": "Adversarial Robustness Limits via Scaling-Law and Human-Alignment Studies", 4 | "authors": "Brian Bartoldson, James Diffenderfer, Konstantinos Parasyris, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2024", 9 | "architecture": "WideResNet-94-16", 10 | "eps": "8/255", 11 | "clean_acc": "93.68", 12 | "reported": "73.71", 13 | "autoattack_acc": "73.71", 14 | "footnote": "It uses additional 300M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Carmon2019Unlabeled.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.13736", 3 | "name": "Unlabeled Data Improves Adversarial Robustness", 4 | "authors": "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, John C. Duchi", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "89.69", 12 | "reported": "62.5", 13 | "autoattack_acc": "59.53", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chan2020Jacobian.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.10185", 3 | "name": "Jacobian Adversarially Regularized Networks for Robustness", 4 | "authors": "Alvin Chan, Yi Tay, Yew Soon Ong, Jie Fu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "93.79", 12 | "reported": "15.5", 13 | "autoattack_acc": "0.26" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.12862", 3 | "name": "Adversarial Robustness: From Self-Supervised Pre-Training to Fine-Tuning", 4 | "authors": "Tianlong Chen, Sijia Liu, Shiyu Chang, Yu Cheng, Lisa Amini, Zhangyang Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 3, 7 | "dataset": "cifar10", 8 | "venue": "CVPR 2020", 9 | "architecture": "ResNet-50", 10 | "eps": "8/255", 11 | "clean_acc": "86.04", 12 | "reported": "54.64", 13 | "footnote": "Uses ensembles of 3 models.", 14 | "autoattack_acc": "51.56" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2020Efficient.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.01278", 3 | "name": "Efficient Robust Training via Backward Smoothing", 4 | "authors": "Jinghui Chen and Yu Cheng and Zhe Gan and Quanquan Gu and Jingjing Liu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "85.32", 12 | "reported": "51.13", 13 | "footnote": "", 14 | "autoattack_acc": "51.12" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2021LTD_WRN34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2111.02331", 3 | "name": "LTD: Low Temperature Distillation for Robust Adversarial Training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Nov 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "85.21", 12 | "reported": "56.94", 13 | "footnote": "", 14 | "autoattack_acc": "56.94", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2021LTD_WRN34_20.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2111.02331", 3 | "name": "LTD: Low Temperature Distillation for Robust Adversarial Training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Nov 2021", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "86.03", 12 | "reported": "57.71", 13 | "footnote": "", 14 | "autoattack_acc": "57.71", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2024Data_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://doi.org/10.1016/j.patcog.2024.110394", 3 | "name": "Data filtering for efficient adversarial training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "Pattern Recognition 2024", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.54", 12 | "reported": "57.30", 13 | "footnote": "", 14 | "autoattack_acc": "57.30", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Chen2024Data_WRN_34_20.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://doi.org/10.1016/j.patcog.2024.110394", 3 | "name": "Data filtering for efficient adversarial training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "Pattern Recognition 2024", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "86.10", 12 | "reported": "58.09", 13 | "footnote": "", 14 | "autoattack_acc": "58.09", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Cui2020Learnable_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui and Shu Liu and Liwei Wang and Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.031", 11 | "clean_acc": "88.22", 12 | "reported": "52.86", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255", 14 | "autoattack_acc": "52.86", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Cui2020Learnable_34_20.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui and Shu Liu and Liwei Wang and Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "0.031", 11 | "clean_acc": "88.70", 12 | "reported": "53.57", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255", 14 | "autoattack_acc": "53.57", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Cui2023Decoupled_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2305.13948", 3 | "name": "Decoupled Kullback-Leibler Divergence Loss", 4 | "authors": "Jiequan Cui, Zhuotao Tian, Zhisheng Zhong, Xiaojuan Qi, Bei Yu, Hanwang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2024", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "92.16", 12 | "reported": "67.73", 13 | "autoattack_acc": "67.73", 14 | "unreliable": false, 15 | "footnote": "It uses additional 20M synthetic images in training." 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Cui2023Decoupled_WRN-34-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2305.13948", 3 | "name": "Decoupled Kullback-Leibler Divergence Loss", 4 | "authors": "Jiequan Cui, Zhuotao Tian, Zhisheng Zhong, Xiaojuan Qi, Bei Yu, Hanwang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2024", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "85.31", 12 | "reported": "57.09", 13 | "autoattack_acc": "57.09", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Dai2021Parameterizing.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.05626", 3 | "name": "Parameterizing Activation Functions for Adversarial Robustness", 4 | "authors": "Sihui Dai, Saeed Mahloujifar, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2021", 9 | "architecture": "WideResNet-28-10-PSSiLU", 10 | "eps": "8/255", 11 | "clean_acc": "87.02", 12 | "reported": "61.55", 13 | "autoattack_acc": "61.55", 14 | "footnote": "It uses additional ~6M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Debenedetti2022Light_XCiT-L12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-L12", 10 | "eps": "8/255", 11 | "clean_acc": "91.73", 12 | "reported": "57.58", 13 | "autoattack_acc": "57.58", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Debenedetti2022Light_XCiT-M12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-M12", 10 | "eps": "8/255", 11 | "clean_acc": "91.30", 12 | "reported": "57.27", 13 | "autoattack_acc": "57.27", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Debenedetti2022Light_XCiT-S12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-S12", 10 | "eps": "8/255", 11 | "clean_acc": "90.06", 12 | "reported": "56.14", 13 | "autoattack_acc": "56.14", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Ding2020MMA.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=HkeryxBtPB", 3 | "name": "MMA Training: Direct Input Space Margin Maximization through Adversarial Training", 4 | "authors": "Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, Ruitong Huang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-28-4", 10 | "eps": "8/255", 11 | "clean_acc": "84.36", 12 | "reported": "47.18", 13 | "autoattack_acc": "41.44" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Engstrom2019Robustness.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/MadryLab/robustness", 3 | "name": "Robustness library", 4 | "authors": "Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, Dimitris Tsipras", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "GitHub,
Oct 2019", 9 | "architecture": "ResNet-50", 10 | "eps": "8/255", 11 | "clean_acc": "87.03", 12 | "reported": "53.29", 13 | "autoattack_acc": "49.25" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2020Uncovering_28_10_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "89.48", 12 | "reported": "62.76", 13 | "autoattack_acc": "62.80", 14 | "external": "62.76", 15 | "footnote": "62.76% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2020Uncovering_34_20.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "85.64", 12 | "reported": "56.82", 13 | "autoattack_acc": "56.86", 14 | "external": "56.82", 15 | "footnote": "56.82% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2020Uncovering_70_16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "85.29", 12 | "reported": "57.14", 13 | "autoattack_acc": "57.20", 14 | "external": "57.14", 15 | "footnote": "57.14% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2020Uncovering_70_16_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "91.10", 12 | "reported": "65.87", 13 | "autoattack_acc": "65.88", 14 | "external": "65.87", 15 | "footnote": "65.87% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2021Improving_28_10_ddpm_100m.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.09468", 3 | "name": "Improving Robustness using Generated Data", 4 | "authors": "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.50", 12 | "reported": "63.38", 13 | "autoattack_acc": "63.44", 14 | "external": "63.38", 15 | "footnote": "It uses additional 100M synthetic images in training. 63.38% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2021Improving_70_16_ddpm_100m.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.09468", 3 | "name": "Improving Robustness using Generated Data", 4 | "authors": "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "88.74", 12 | "reported": "66.10", 13 | "autoattack_acc": "66.11", 14 | "external": "66.10", 15 | "footnote": "It uses additional 100M synthetic images in training. 66.10% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Gowal2021Improving_R18_ddpm_100m.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.09468", 3 | "name": "Improving Robustness using Generated Data", 4 | "authors": "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "87.35", 12 | "reported": "58.50", 13 | "autoattack_acc": "58.63", 14 | "external": "58.50", 15 | "footnote": "It uses additional 100M synthetic images in training. 58.50% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Hendrycks2019Using.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1901.09960", 3 | "name": "Using Pre-Training Can Improve Model Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Kimin Lee, Mantas Mazeika", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.11", 12 | "reported": "57.4", 13 | "autoattack_acc": "54.92", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Huang2020Self.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.10319", 3 | "name": "Self-Adaptive Training: beyond Empirical Risk Minimization", 4 | "authors": "Lang Huang, Chao Zhang, Hongyang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.031", 11 | "clean_acc": "83.48", 12 | "reported": "58.03", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255.", 14 | "autoattack_acc": "53.34" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Huang2021Exploring.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.03825", 3 | "name": "Exploring Architectural Ingredients of Adversarially Robust Deep Neural Networks", 4 | "authors": "Hanxun Huang, Yisen Wang, Sarah Monazam Erfani, Quanquan Gu, James Bailey, Xingjun Ma", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-34-R", 10 | "eps": "8/255", 11 | "clean_acc": "90.56", 12 | "reported": "61.56", 13 | "autoattack_acc": "61.56", 14 | "unreliable": false 15 | 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Huang2021Exploring_ema.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.03825", 3 | "name": "Exploring Architectural Ingredients of Adversarially Robust Deep Neural Networks", 4 | "authors": "Hanxun Huang, Yisen Wang, Sarah Monazam Erfani, Quanquan Gu, James Bailey, Xingjun Ma", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-34-R", 10 | "eps": "8/255", 11 | "clean_acc": "91.23", 12 | "reported": "62.54", 13 | "autoattack_acc": "62.54", 14 | "footnote": "Uses exponential moving average (EMA)", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Huang2022Revisiting_WRN-A4.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2212.11005", 3 | "name": "Revisiting Residual Networks for Adversarial Robustness: An Architectural Perspective", 4 | "authors": "Shihua Huang, Zhichao Lu, Kalyanmoy Deb, Vishnu Naresh Boddeti", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Dec. 2022", 9 | "architecture": "WideResNet-A4", 10 | "eps": "8/255", 11 | "clean_acc": "91.58", 12 | "reported": "65.79", 13 | "autoattack_acc": "65.79", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Jang2019Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "http://openaccess.thecvf.com/content_ICCV_2019/html/Jang_Adversarial_Defense_via_Learning_to_Generate_Diverse_Attacks_ICCV_2019_paper.html", 3 | "name": "Adversarial Defense via Learning to Generate Diverse Attacks", 4 | "authors": "Yunseok Jang, Tianchen Zhao, Seunghoon Hong, Honglak Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICCV 2019", 9 | "architecture": "ResNet-20", 10 | "eps": "8/255", 11 | "clean_acc": "78.91", 12 | "reported": "37.40", 13 | "autoattack_acc": "34.95" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Jia2022LAS-AT_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2203.06616", 3 | "name": "LAS-AT: Adversarial Training with Learnable Attack Strategy", 4 | "authors": "Xiaojun Jia, Yong Zhang, Baoyuan Wu, Ke Ma, Jue Wang, Xiaochun Cao", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "84.98", 12 | "reported": "56.26", 13 | "autoattack_acc": "56.26", 14 | "footnote": "", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Jia2022LAS-AT_70_16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2203.06616", 3 | "name": "LAS-AT: Adversarial Training with Learnable Attack Strategy", 4 | "authors": "Xiaojun Jia, Yong Zhang, Baoyuan Wu, Ke Ma, Jue Wang, Xiaochun Cao", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2022", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "85.66", 12 | "reported": "57.61", 13 | "autoattack_acc": "57.61", 14 | "footnote": "", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/JinRinard2020Manifold.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.04286", 3 | "name": "Manifold Regularization for Adversarial Robustness", 4 | "authors": "Charles Jin, Martin Rinard", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2020", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "90.84", 12 | "reported": "71.22", 13 | "autoattack_acc": "1.35" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Kang2021Stable.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2110.12976", 3 | "name": "Stable Neural ODE with Lyapunov-Stable Equilibrium Points for Defending Against Adversarial Attacks", 4 | "authors": "Qiyu Kang, Yang Song, Qinxu Ding, Wee Peng Tay", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-70-16, Neural ODE block", 10 | "eps": "8/255", 11 | "clean_acc": "93.73", 12 | "reported": "71.28", 13 | "autoattack_acc": "71.28", 14 | "external": "64.20", 15 | "footnote": "Based on the model Rebuffi2021Fixing_70_16_cutmix_extra. 64.20% robust accuracy is due to AutoAttack + transfer APGD from Rebuffi2021Fixing_70_16_cutmix_extra", 16 | "unreliable": true 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/KimWang2020Sensible.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=rJlf_RVKwr", 3 | "name": "Sensible adversarial learning", 4 | "authors": "Jungeum Kim, Xiao Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Sep 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "91.51", 12 | "reported": "57.23", 13 | "autoattack_acc": "34.22" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Kumari2019Harnessing.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.05186", 3 | "name": "Harnessing the Vulnerability of Latent Layers in Adversarially Trained Models", 4 | "authors": "Mayank Singh, Abhishek Sinha, Nupur Kumari, Harshitha Machiraju, Balaji Krishnamurthy, Vineeth N Balasubramanian", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "IJCAI 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.80", 12 | "reported": "53.04", 13 | "autoattack_acc": "49.12" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Kundu2020Tunable.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.03083", 3 | "name": "A Tunable Robust Pruning Framework Through Dynamic Network Rewiring of DNNs", 4 | "authors": "Kundu, Souvik and Nazemi, Mahdi and Beerel, Peter A and Pedram, Massoud", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ASP-DAC 2021", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "87.32", 12 | "reported": "47.35", 13 | "footnote": "Compressed model", 14 | "autoattack_acc": "40.41", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Madry2018Towards.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1706.06083", 3 | "name": "Towards Deep Learning Models Resistant to Adversarial Attacks", 4 | "authors": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, Adrian Vladu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2018", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.14", 12 | "reported": "47.04", 13 | "autoattack_acc": "44.04" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Mao2019Metric.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "http://papers.nips.cc/paper/8339-metric-learning-for-adversarial-robustness", 3 | "name": "Metric Learning for Adversarial Robustness", 4 | "authors": "Chengzhi Mao, Ziyuan Zhong, Junfeng Yang, Carl Vondrick, Baishakhi Ray", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.21", 12 | "reported": "50.03", 13 | "autoattack_acc": "47.41" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Moosavi-Dezfooli2019Robustness.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "http://openaccess.thecvf.com/content_CVPR_2019/html/Moosavi-Dezfooli_Robustness_via_Curvature_Regularization_and_Vice_Versa_CVPR_2019_paper", 3 | "name": "Robustness via Curvature Regularization, and Vice Versa", 4 | "authors": "Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, Jonathan Uesato, Pascal Frossard", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "CVPR 2019", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "83.11", 12 | "reported": "41.4", 13 | "autoattack_acc": "38.50" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Mustafa2019Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1904.00887", 3 | "name": "Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks", 4 | "authors": "Aamir Mustafa, Salman Khan, Munawar Hayat, Roland Goecke, Jianbing Shen, Ling Shao", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICCV 2019", 9 | "architecture": "ResNet-110", 10 | "eps": "8/255", 11 | "clean_acc": "89.16", 12 | "reported": "32.32", 13 | "autoattack_acc": "0.28" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Pang2020Bag.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.00467", 3 | "name": "Bag of Tricks for Adversarial Training", 4 | "authors": "Tianyu Pang and Xiao Yang and Yinpeng Dong and Hang Su and Jun Zhu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2021", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "86.43", 12 | "reported": "54.39", 13 | "footnote": "", 14 | "autoattack_acc": "54.39" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Pang2020Boosting.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.08619", 3 | "name": "Boosting Adversarial Training with Hypersphere Embedding", 4 | "authors": "Tianyu Pang, Xiao Yang, Yinpeng Dong, Kun Xu, Hang Su, Jun Zhu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "85.14", 12 | "reported": "53.74", 13 | "autoattack_acc": "53.74", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Pang2020Rethinking.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.10626", 3 | "name": "Rethinking Softmax Cross-Entropy Loss for Adversarial Robustness", 4 | "authors": "Tianyu Pang, Kun Xu, Yinpeng Dong, Chao Du, Ning Chen, Jun Zhu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "ResNet-32", 10 | "eps": "8/255", 11 | "clean_acc": "80.89", 12 | "reported": "55.00", 13 | "autoattack_acc": "43.48" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Pang2022Robustness_WRN28_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.10103.pdf", 3 | "name": " Robustness and Accuracy Could Be Reconcilable by (Proper) Definition", 4 | "authors": "Tianyu Pang, Min Lin, Xiao Yang, Jun Zhu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2022", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "88.61", 12 | "reported": "61.04", 13 | "autoattack_acc": "61.04", 14 | "footnote": "It uses additional 1M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Pang2022Robustness_WRN70_16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.10103.pdf", 3 | "name": " Robustness and Accuracy Could Be Reconcilable by (Proper) Definition", 4 | "authors": "Tianyu Pang, Min Lin, Xiao Yang, Jun Zhu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2022", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "89.01", 12 | "reported": "63.35", 13 | "autoattack_acc": "63.35", 14 | "footnote": "It uses additional 1M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Peng2023Robust.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2308.16258", 3 | "name": "Robust Principles: Architectural Design Principles for Adversarially Robust CNNs", 4 | "authors": "ShengYun Peng, Weilin Xu, Cory Cornelius, Matthew Hull, Kevin Li, Rahul Duggal, Mansi Phute, Jason Martin, and Duen Horng Chau", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "BMVC 2023", 9 | "architecture": "RaWideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "93.27", 12 | "reported": "71.07", 13 | "autoattack_acc": "71.07", 14 | "footnote": "It uses additional 50M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Qin2019Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1907.02610v2", 3 | "name": "Adversarial Robustness through Local Linearization", 4 | "authors": "Chongli Qin, James Martens, Sven Gowal, Dilip Krishnan, Krishnamurthy Dvijotham, Alhussein Fawzi, Soham De, Robert Stanforth, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-40-8", 10 | "eps": "8/255", 11 | "clean_acc": "86.28", 12 | "reported": "52.81", 13 | "autoattack_acc": "52.84", 14 | "external": "52.84" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rade2021Helper_R18_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "86.86", 12 | "reported": "57.09", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "57.09", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rade2021Helper_R18_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "89.02", 12 | "reported": "57.67", 13 | "autoattack_acc": "57.67", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rade2021Helper_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "88.16", 12 | "reported": "60.97", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "60.97", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rade2021Helper_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "91.47", 12 | "reported": "62.83", 13 | "autoattack_acc": "62.83", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rebuffi2021Fixing_106_16_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-106-16", 10 | "eps": "8/255", 11 | "clean_acc": "88.50", 12 | "reported": "64.58", 13 | "autoattack_acc": "64.64", 14 | "external": "64.58", 15 | "footnote": "It uses additional 1M synthetic images in training. 64.58% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rebuffi2021Fixing_28_10_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.33", 12 | "reported": "60.73", 13 | "autoattack_acc": "60.75", 14 | "external": "60.73", 15 | "footnote": "It uses additional 1M synthetic images in training. 60.73% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rebuffi2021Fixing_70_16_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "88.54", 12 | "reported": "64.20", 13 | "autoattack_acc": "64.25", 14 | "external": "64.20", 15 | "footnote": "It uses additional 1M synthetic images in training. 64.20% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rebuffi2021Fixing_70_16_cutmix_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "92.23", 12 | "reported": "66.56", 13 | "autoattack_acc": "66.58", 14 | "external": "66.56", 15 | "footnote": "66.56% robust accuracy is due to the original evaluation (AutoAttack + MultiTargeted)", 16 | "unreliable": false 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rebuffi2021Fixing_R18_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "83.53", 12 | "reported": "56.66", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "56.66", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Rice2020Overfitting.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.11569", 3 | "name": "Overfitting in adversarially robust deep learning", 4 | "authors": "Leslie Rice, Eric Wong, J. Zico Kolter", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2020", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "85.34", 12 | "reported": "58", 13 | "autoattack_acc": "53.42", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sehwag2020Hydra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.10509", 3 | "name": "HYDRA: Pruning Adversarially Robust Neural Networks", 4 | "authors": "Vikash Sehwag, Shiqi Wang, Prateek Mittal, Suman Jana", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "88.98", 12 | "reported": "62.24", 13 | "footnote": "Compressed model", 14 | "autoattack_acc": "57.14", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sehwag2021Proxy.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.68", 12 | "reported": "60.30", 13 | "autoattack_acc": "60.27", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sehwag2021Proxy_R18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2022", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "84.59", 12 | "reported": "55.54", 13 | "autoattack_acc": "55.54", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sehwag2021Proxy_ResNest152.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2022", 9 | "architecture": "ResNest152", 10 | "eps": "8/255", 11 | "clean_acc": "87.30", 12 | "reported": "62.79", 13 | "autoattack_acc": "62.79", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Shafahi2019Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1904.12843", 3 | "name": "Adversarial Training for Free!", 4 | "authors": "Ali Shafahi, Mahyar Najibi, Amin Ghiasi, Zheng Xu, John Dickerson, Christoph Studer, Larry S. Davis, Gavin Taylor, Tom Goldstein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.11", 12 | "reported": "46.19", 13 | "autoattack_acc": "41.47" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sitawarin2020Improving.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.09347", 3 | "name": "Improving Adversarial Robustness Through Progressive Hardening", 4 | "authors": "Chawin Sitawarin and Supriyo Chakraborty and David Wagner", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "86.84", 12 | "reported": "50.72", 13 | "footnote": "", 14 | "autoattack_acc": "50.72" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sridhar2021Robust.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.02078", 3 | "name": "Improving Neural Network Robustness via Persistency of Excitation", 4 | "authors": "Kaustubh Sridhar and Oleg Sokolsky and Insup Lee and James Weimer", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ACC 2022", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "89.46", 12 | "reported": "59.66", 13 | "autoattack_acc": "59.66", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Sridhar2021Robust_34_15.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.02078", 3 | "name": "Improving Neural Network Robustness via Persistency of Excitation", 4 | "authors": "Kaustubh Sridhar and Oleg Sokolsky and Insup Lee and James Weimer", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ACC 2022", 9 | "architecture": "WideResNet-34-15", 10 | "eps": "8/255", 11 | "clean_acc": "86.53", 12 | "reported": "60.41", 13 | "autoattack_acc": "60.41", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Standard.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/RobustBench/robustbench/", 3 | "name": "Standardly trained model", 4 | "authors": "", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "N/A", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "94.78", 12 | "reported": "0.0", 13 | "autoattack_acc": "0.0" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wang2020Improving.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=rklOg6EFwS", 3 | "name": "Improving Adversarial Robustness Requires Revisiting Misclassified Examples", 4 | "authors": "Yisen Wang, Difan Zou, Jinfeng Yi, James Bailey, Xingjun Ma, Quanquan Gu", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.50", 12 | "reported": "65.04", 13 | "autoattack_acc": "56.29", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wang2023Better_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "92.44", 12 | "reported": "67.31", 13 | "autoattack_acc": "67.31", 14 | "footnote": "It uses additional 20M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wang2023Better_WRN-70-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "93.25", 12 | "reported": "70.69", 13 | "autoattack_acc": "70.69", 14 | "footnote": "It uses additional 50M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/WangZhang2019Bilateral.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "http://openaccess.thecvf.com/content_ICCV_2019/html/Wang_Bilateral_Adversarial_Training_Towards_Fast_Training_of_More_Robust_Models_ICCV_2019_paper.html", 3 | "name": "Bilateral Adversarial Training: Towards Fast Training of More Robust Models Against Adversarial Attacks", 4 | "authors": "Jianyu Wang, Haichao Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICCV 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "92.80", 12 | "reported": "58.6", 13 | "autoattack_acc": "29.35" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wong2020Fast.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2001.03994", 3 | "name": "Fast is better than free: Revisiting adversarial training", 4 | "authors": "Eric Wong, Leslie Rice, J. Zico Kolter", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "83.34", 12 | "reported": "46.06", 13 | "footnote": "Focuses on fast adversarial training.", 14 | "autoattack_acc": "43.21" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wu2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2004.05884", 3 | "name": "Adversarial Weight Perturbation Helps Robust Generalization", 4 | "authors": "Dongxian Wu, Shu-tao Xia, Yisen Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "85.36", 12 | "reported": "56.17", 13 | "autoattack_acc": "56.17", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wu2020Adversarial_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2004.05884", 3 | "name": "Adversarial Weight Perturbation Helps Robust Generalization", 4 | "authors": "Dongxian Wu, Shu-tao Xia, Yisen Wang", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "88.25", 12 | "reported": "60.04", 13 | "autoattack_acc": "60.04", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Wu2020Do.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.01279", 3 | "name": "Do Wider Neural Networks Really Help Adversarial Robustness?", 4 | "authors": "Boxi Wu and Jinghui Chen and Deng Cai and Xiaofei He and Quanquan Gu", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-34-15", 10 | "eps": "8/255", 11 | "clean_acc": "87.67", 12 | "reported": "60.65", 13 | "footnote": "", 14 | "autoattack_acc": "60.65" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Xiao2020Enhancing.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.10510", 3 | "name": "Enhancing Adversarial Defense by k-Winners-Take-All", 4 | "authors": "Chang Xiao, Peilin Zhong, Changxi Zheng", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "DenseNet-121", 10 | "eps": "0.031", 11 | "clean_acc": "79.28", 12 | "reported": "52.4", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255.
7.40% robust accuracy is due to 1 restart of APGD-CE and 30 restarts of Square Attack
Note: this adaptive evaluation (Section 5) reports 0.16% robust accuracy on a different model (adversarially trained ResNet-18).", 14 | "autoattack_acc": "18.50", 15 | "external": "7.40", 16 | "unreliable": true 17 | } 18 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Xu2023Exploring_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.03015", 3 | "name": "Exploring and Exploiting Decision Boundary Dynamics for Adversarial Robustness", 4 | "authors": "Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, Furong Huang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2023", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "93.69", 12 | "reported": "63.89", 13 | "autoattack_acc": "63.89", 14 | "footnote": "It uses additional 10M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Zhang2019Theoretically.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1901.08573", 3 | "name": "Theoretically Principled Trade-off between Robustness and Accuracy", 4 | "authors": "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, Michael I. Jordan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "0.031", 11 | "clean_acc": "84.92", 12 | "reported": "56.43", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255.", 14 | "autoattack_acc": "53.08" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Zhang2019You.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1905.00877", 3 | "name": "You Only Propagate Once: Accelerating Adversarial Training via Maximal Principle", 4 | "authors": "Dinghuai Zhang, Tianyuan Zhang, Yiping Lu, Zhanxing Zhu, Bin Dong", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "87.20", 12 | "reported": "47.98", 13 | "footnote": "Focuses on fast adversarial training.", 14 | "autoattack_acc": "44.83" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Zhang2020Attacks.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.11242", 3 | "name": "Attacks Which Do Not Kill Training Make Adversarial Learning Stronger", 4 | "authors": "Jingfeng Zhang, Xilie Xu, Bo Han, Gang Niu, Lizhen Cui, Masashi Sugiyama, Mohan Kankanhalli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "84.52", 12 | "reported": "54.36", 13 | "autoattack_acc": "53.51", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Zhang2020Geometry.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.01736", 3 | "name": "Geometry-aware Instance-reweighted Adversarial Training", 4 | "authors": "Jingfeng Zhang, Jianing Zhu, Gang Niu, Bo Han, Masashi Sugiyama, Mohan Kankanhalli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "89.36", 12 | "reported": "59.64", 13 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255.", 14 | "autoattack_acc": "59.64", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/Zhang2020Towards.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1906.06316", 3 | "name": "Towards Stable and Efficient Training of Verifiably Robust Neural Networks", 4 | "authors": "Huan Zhang, Hongge Chen, Chaowei Xiao, Sven Gowal, Robert Stanforth, Bo Li, Duane Boning, Cho-Jui Hsieh", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "5-layer-CNN", 10 | "eps": "8/255", 11 | "clean_acc": "44.73", 12 | "reported": "34.29", 13 | "autoattack_acc": "32.64", 14 | "footnote": "Verifiably robust model with 32.24% provable robust accuracy" 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/ZhangWang2019Defense.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "http://papers.nips.cc/paper/8459-defense-against-adversarial-attacks-using-feature-scattering-based-adversarial-training", 3 | "name": "Defense Against Adversarial Attacks Using Feature Scattering-based Adversarial Training", 4 | "authors": "Haichao Zhang, Jianyu Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "89.98", 12 | "reported": "60.6", 13 | "autoattack_acc": "36.64" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/Linf/ZhangXu2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=Syejj0NYvr¬eId=Syejj0NYvr", 3 | "name": "Adversarial Interpolation Training: A Simple Approach for Improving Model Robustness", 4 | "authors": "Haichao Zhang, Wei Xu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "OpenReview, Sep 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "90.25", 12 | "reported": "68.7", 13 | "autoattack_acc": "36.45" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Addepalli2021Towards_WRN34.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ECCV 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": null, 11 | "clean_acc": "85.32", 12 | "reported": "76.78", 13 | "corruptions_acc": "76.78" 14 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Addepalli2022Efficient_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": null, 11 | "clean_acc": "88.71", 12 | "reported": "80.12", 13 | "corruptions_acc": "80.12" 14 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Calian2021Defending.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.01086", 3 | "name": "Defending Against Image Corruptions Through Adversarial Augmentations", 4 | "authors": "Dan A. Calian, Florian Stimberg, Olivia Wiles, Sylvestre-Alvise Rebuffi, Andras Gyorgy, Timothy Mann, Sven Gowal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Apr 2021", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "94.93", 12 | "reported": "92.17", 13 | "corruptions_acc": "92.17", 14 | "footnote": "Uses extra data indirectly via a super resolution and autoencoder networks that were pre-trained on other datasets." 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Diffenderfer2021Winning_Binary.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "94.87", 12 | "reported": "88.32", 13 | "corruptions_acc": "88.32", 14 | "footnote": "Binary weight network trained with AugMix and pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Diffenderfer2021Winning_Binary_CARD_Deck.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 6, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "95.09", 12 | "reported": "90.15", 13 | "corruptions_acc": "90.15", 14 | "footnote": "Ensemble of binary weight networks each of which are pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Diffenderfer2021Winning_LRR.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "96.66", 12 | "reported": "90.94", 13 | "corruptions_acc": "90.94", 14 | "footnote": "Trained with AugMix and pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Diffenderfer2021Winning_LRR_CARD_Deck.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 6, 7 | "dataset": "cifar10", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "96.56", 12 | "reported": "null", 13 | "corruptions_acc": "92.78", 14 | "corruptions_acc": "92.78", 15 | "footnote": "Ensemble of networks each of which are pruned to 95% sparsity" 16 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Erichson2022NoisyMix.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.01263.pdf", 3 | "name": "NoisyMix: Boosting Robustness by Combining Data Augmentations, Stability Training, and Noise Injections", 4 | "authors": "N. Benjamin Erichson, Soon Hoe Lim, Francisco Utrera, Winnie Xu, Ziang Cao, and Michael W. Mahoney", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Feb 2022", 9 | "architecture": "WideResNet-28-4", 10 | "eps": null, 11 | "clean_acc": "96.73", 12 | "reported": "92.78", 13 | "corruptions_acc": "92.78" 14 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Gowal2020Uncovering_70_16_L2.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "90.90", 12 | "reported": "", 13 | "corruptions_acc": "84.90", 14 | "footnote": "Trained for \\(\\ell_2 \\) robustness with \\(\\varepsilon = 0.5\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Gowal2020Uncovering_70_16_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "85.29", 12 | "reported": "", 13 | "corruptions_acc": "76.37", 14 | "footnote": "Trained for \\(\\ell_{\\infty} \\) robustness with \\(\\varepsilon = 8/255\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Gowal2020Uncovering_70_16_extra_L2.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "94.74", 12 | "reported": "", 13 | "corruptions_acc": "87.68", 14 | "footnote": "Trained for \\(\\ell_2 \\) robustness with \\(\\varepsilon = 0.5\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Gowal2020Uncovering_70_16_extra_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "91.10", 12 | "reported": "", 13 | "corruptions_acc": "81.84", 14 | "footnote": "Trained for \\(\\ell_{\\infty} \\) robustness with \\(\\varepsilon = 8/255\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Hendrycks2020AugMix_ResNeXt.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.02781", 3 | "name": "AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Norman Mu, Ekin D. Cubuk, Barret Zoph, Justin Gilmer, Balaji Lakshminarayanan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "ResNeXt29_32x4d", 10 | "eps": null, 11 | "clean_acc": "95.83", 12 | "reported": "89.1", 13 | "corruptions_acc": "89.09" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Hendrycks2020AugMix_WRN.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.02781", 3 | "name": "AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Norman Mu, Ekin D. Cubuk, Barret Zoph, Justin Gilmer, Balaji Lakshminarayanan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-40-2", 10 | "eps": null, 11 | "clean_acc": "95.08", 12 | "reported": "88.8", 13 | "corruptions_acc": "88.82" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Kireev2021Effectiveness_AugMixNoJSD.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.02325", 3 | "name": "On the effectiveness of adversarial training against common corruptions", 4 | "authors": "Klim Kireev, Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": null, 11 | "clean_acc": "94.97", 12 | "reported": "86.6", 13 | "corruptions_acc": "86.60", 14 | "footnote": "Training with AugMix without the JSD term." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Kireev2021Effectiveness_Gauss50percent.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.02325", 3 | "name": "On the effectiveness of adversarial training against common corruptions", 4 | "authors": "Klim Kireev, Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": null, 11 | "clean_acc": "93.24", 12 | "reported": "85.0", 13 | "corruptions_acc": "85.04", 14 | "footnote": "Trained with 50% Gaussian noise per batch. Note: Gaussian noise is contained in CIFAR-10-C." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Kireev2021Effectiveness_RLAT.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.02325", 3 | "name": "On the effectiveness of adversarial training against common corruptions", 4 | "authors": "Klim Kireev, Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": null, 11 | "clean_acc": "93.10", 12 | "reported": "84.1", 13 | "corruptions_acc": "84.10", 14 | "footnote": "Trained with RLAT." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Kireev2021Effectiveness_RLATAugMix.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.02325", 3 | "name": "On the effectiveness of adversarial training against common corruptions", 4 | "authors": "Klim Kireev, Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "ResNet-18", 10 | "eps": null, 11 | "clean_acc": "94.75", 12 | "reported": "88.5", 13 | "corruptions_acc": "89.60", 14 | "footnote": "Trained with RLAT and AugMix." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Kireev2021Effectiveness_RLATAugMixNoJSD.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.02325", 3 | "name": "On the effectiveness of adversarial training against common corruptions", 4 | "authors": "Klim Kireev, Maksym Andriushchenko, Nicolas Flammarion", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": null, 11 | "clean_acc": "94.77", 12 | "reported": "88.5", 13 | "corruptions_acc": "88.53", 14 | "footnote": "Trained with RLAT and AugMix without the JSD term." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Modas2021PRIMEResNet18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2112.13547", 3 | "name": "PRIME: A Few Primitives Can Boost Robustness to Common Corruptions", 4 | "authors": "Apostolos Modas, Rahul Rade, Guillermo Ortiz-Jiménez, Seyed-Mohsen Moosavi-Dezfooli, Pascal Frossard", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Dec 2021", 9 | "architecture": "ResNet-18", 10 | "eps": null, 11 | "clean_acc": "93.06", 12 | "reported": "89.05", 13 | "corruptions_acc": "89.05" 14 | } -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Rebuffi2021Fixing_70_16_cutmix_extra_L2.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "95.74", 12 | "reported": "", 13 | "corruptions_acc": "88.23", 14 | "footnote": "Trained for \\(\\ell_2 \\) robustness with \\(\\varepsilon = 0.5\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Rebuffi2021Fixing_70_16_cutmix_extra_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "92.23", 12 | "reported": "", 13 | "corruptions_acc": "82.82", 14 | "footnote": "Trained for \\(\\ell_{\\infty} \\) robustness with \\(\\varepsilon = 8/255\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar10/corruptions/Standard.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/RobustBench/robustbench/", 3 | "name": "Standardly trained model", 4 | "authors": "", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "N/A", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "0.5", 11 | "clean_acc": "94.78", 12 | "reported": "73.46", 13 | "corruptions_acc": "73.46" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Addepalli2021Towards_PARN18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ECCV 2022", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "62.02", 12 | "reported": "27.14", 13 | "autoattack_acc": "27.14", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Addepalli2021Towards_WRN34.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ECCV 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "65.73", 12 | "reported": "30.35", 13 | "autoattack_acc": "30.35", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Addepalli2022Efficient_RN18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "ResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "65.45", 12 | "reported": "27.69", 13 | "autoattack_acc": "27.67", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Addepalli2022Efficient_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "68.75", 12 | "reported": "31.85", 13 | "autoattack_acc": "31.85", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Amini2024MeanSparse_S-WRN-70-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "75.13", 12 | "reported": "44.78", 13 | "autoattack_acc": "44.78", 14 | "external": "42.25", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained model Wang2023Better_WRN-70-16. 42.25% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Bai2023Improving_edm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2301.12554", 3 | "name": "Improving the Accuracy-Robustness Trade-off of Classifiers via Adaptive Smoothing", 4 | "authors": "Yatong Bai, Brendon G Anderson, Aerin Kim, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "cifar100", 8 | "venue": "SIMODS 2024", 9 | "architecture": "ResNet-152 + WideResNet-70-16 + mixing network", 10 | "eps": "8/255", 11 | "clean_acc": "85.21", 12 | "reported": "38.72", 13 | "autoattack_acc": "38.72", 14 | "footnote": "It uses an ensemble of networks. The robust base classifier uses 50M synthetic images.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Bai2023Improving_trades.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2301.12554", 3 | "name": "Improving the Accuracy-Robustness Trade-off of Classifiers via Adaptive Smoothing", 4 | "authors": "Yatong Bai, Brendon G Anderson, Aerin Kim, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "cifar100", 8 | "venue": "SIMODS 2024", 9 | "architecture": "ResNet-152 + WideResNet-70-16 + mixing network", 10 | "eps": "8/255", 11 | "clean_acc": "80.18", 12 | "reported": "35.15", 13 | "autoattack_acc": "35.15", 14 | "footnote": "It uses an ensemble of networks.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Bai2024MixedNUTS.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2402.02263", 3 | "name": "MixedNUTS: Training-Free Accuracy-Robustness Balance via Nonlinearly Mixed Classifiers", 4 | "authors": "Yatong Bai, Mo Zhou, Vishal M. Patel, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "cifar100", 8 | "venue": "TMLR, Aug 2024", 9 | "architecture": "ResNet-152 + WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "83.08", 12 | "reported": "41.80", 13 | "autoattack_acc": "41.91", 14 | "external": "41.80", 15 | "footnote": "It uses an ensemble of networks. The robust base classifier uses 50M synthetic images. 41.80% robust accuracy is due to the original evaluation (Adaptive AutoAttack)", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Chen2020Efficient.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.01278", 3 | "name": "Efficient Robust Training via Backward Smoothing", 4 | "authors": "Jinghui Chen, Yu Cheng, Zhe Gan, Quanquan Gu, Jingjing Liu", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "62.15", 12 | "reported": "26.94", 13 | "autoattack_acc": "26.94", 14 | "footnote": null 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Chen2021LTD_WRN34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2111.02331", 3 | "name": "LTD: Low Temperature Distillation for Robust Adversarial Training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Nov 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "64.07", 12 | "reported": "30.59", 13 | "footnote": "", 14 | "autoattack_acc": "30.59", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Chen2024Data_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://doi.org/10.1016/j.patcog.2024.110394", 3 | "name": "Data filtering for efficient adversarial training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "Pattern Recognition 2024", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "64.32", 12 | "reported": "31.13", 13 | "footnote": "", 14 | "autoattack_acc": "31.13", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2020Learnable_34_10_LBGAT0.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui, Shu Liu, Liwei Wang, Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "70.25", 12 | "reported": "27.16", 13 | "autoattack_acc": "27.16", 14 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2020Learnable_34_10_LBGAT6.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui, Shu Liu, Liwei Wang, Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "60.64", 12 | "reported": "29.33", 13 | "autoattack_acc": "29.33", 14 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2020Learnable_34_10_LBGAT9_eps_8_255.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui, Shu Liu, Liwei Wang, Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "62.99", 12 | "reported": "31.20", 13 | "autoattack_acc": "31.20", 14 | "unreliable": false, 15 | "footnote": "It is combined with AWP." 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2020Learnable_34_20_LBGAT6.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2011.11164", 3 | "name": "Learnable Boundary Guided Adversarial Training", 4 | "authors": "Jiequan Cui, Shu Liu, Liwei Wang, Jiaya Jia", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICCV 2021", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "62.55", 12 | "reported": "30.20", 13 | "autoattack_acc": "30.20", 14 | "footnote": "Uses \\(\\ell_{\\infty} \\) = 0.031 \u2248 7.9/255 instead of 8/255" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2023Decoupled_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2305.13948", 3 | "name": "Decoupled Kullback-Leibler Divergence Loss", 4 | "authors": "Jiequan Cui, Zhuotao Tian, Zhisheng Zhong, Xiaojuan Qi, Bei Yu, Hanwang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2024", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "73.85", 12 | "reported": "39.18", 13 | "autoattack_acc": "39.18", 14 | "unreliable": false, 15 | "footnote": "It uses additional 50M synthetic images in training." 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2023Decoupled_WRN-34-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2305.13948", 3 | "name": "Decoupled Kullback-Leibler Divergence Loss", 4 | "authors": "Jiequan Cui, Zhuotao Tian, Zhisheng Zhong, Xiaojuan Qi, Bei Yu, Hanwang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2024", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "65.76", 12 | "reported": "31.91", 13 | "autoattack_acc": "31.91", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Cui2023Decoupled_WRN-34-10_autoaug.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2305.13948", 3 | "name": "Decoupled Kullback-Leibler Divergence Loss", 4 | "authors": "Jiequan Cui, Zhuotao Tian, Zhisheng Zhong, Xiaojuan Qi, Bei Yu, Hanwang Zhang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2024", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "65.93", 12 | "reported": "32.52", 13 | "autoattack_acc": "32.52", 14 | "unreliable": false, 15 | "footnote": "It uses AutoAugment." 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Debenedetti2022Light_XCiT-L12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-L12", 10 | "eps": "8/255", 11 | "clean_acc": "70.76", 12 | "reported": "35.08", 13 | "autoattack_acc": "35.08", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Debenedetti2022Light_XCiT-M12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-M12", 10 | "eps": "8/255", 11 | "clean_acc": "69.21", 12 | "reported": "34.21", 13 | "autoattack_acc": "34.21", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Debenedetti2022Light_XCiT-S12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-S12", 10 | "eps": "8/255", 11 | "clean_acc": "67.34", 12 | "reported": "32.19", 13 | "autoattack_acc": "32.19", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Gowal2020Uncovering.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "60.86", 12 | "reported": "30.67", 13 | "autoattack_acc": "30.03", 14 | "footnote": null, 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Gowal2020Uncovering_extra.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "69.15", 12 | "reported": "37.70", 13 | "autoattack_acc": "36.88", 14 | "footnote": null, 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Hendrycks2019Using.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1901.09960", 3 | "name": "Using Pre-Training Can Improve Model Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Kimin Lee, Mantas Mazeika", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICML 2019", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "59.23", 12 | "reported": "33.5", 13 | "autoattack_acc": "28.42" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Jia2022LAS-AT_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2203.06616", 3 | "name": "LAS-AT: Adversarial Training with Learnable Attack Strategy", 4 | "authors": "Xiaojun Jia, Yong Zhang, Baoyuan Wu, Ke Ma, Jue Wang, Xiaochun Cao", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "64.89", 12 | "reported": "30.77", 13 | "autoattack_acc": "30.77", 14 | "footnote": "", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Jia2022LAS-AT_34_20.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2203.06616", 3 | "name": "LAS-AT: Adversarial Training with Learnable Attack Strategy", 4 | "authors": "Xiaojun Jia, Yong Zhang, Baoyuan Wu, Ke Ma, Jue Wang, Xiaochun Cao", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2022", 9 | "architecture": "WideResNet-34-20", 10 | "eps": "8/255", 11 | "clean_acc": "67.31", 12 | "reported": "31.91", 13 | "autoattack_acc": "31.91", 14 | "footnote": "", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Pang2022Robustness_WRN28_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.10103.pdf", 3 | "name": " Robustness and Accuracy Could Be Reconcilable by (Proper) Definition", 4 | "authors": "Tianyu Pang, Min Lin, Xiao Yang, Jun Zhu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICML 2022", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "63.66", 12 | "reported": "31.08", 13 | "autoattack_acc": "31.08", 14 | "footnote": "It uses additional 1M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Pang2022Robustness_WRN70_16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.10103.pdf", 3 | "name": " Robustness and Accuracy Could Be Reconcilable by (Proper) Definition", 4 | "authors": "Tianyu Pang, Min Lin, Xiao Yang, Jun Zhu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICML 2022", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "65.56", 12 | "reported": "33.05", 13 | "autoattack_acc": "33.05", 14 | "footnote": "It uses additional 1M synthetic images in training.", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Rade2021Helper_R18_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://openreview.net/forum?id=BuD2LmNaU3a", 3 | "name": "Helper-based Adversarial Training: Reducing Excessive Margin to Achieve a Better Accuracy vs. Robustness Trade-off", 4 | "authors": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "OpenReview, Jun 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "61.50", 12 | "reported": "28.88", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "28.88", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Rebuffi2021Fixing_28_10_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "62.41", 12 | "reported": "32.06", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "32.06", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Rebuffi2021Fixing_70_16_cutmix_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "63.56", 12 | "reported": "34.64", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "34.64", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Rebuffi2021Fixing_R18_ddpm.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2103.01946", 3 | "name": "Fixing Data Augmentation to Improve Adversarial Robustness", 4 | "authors": "Sylvestre-Alvise Rebuffi, Sven Gowal, Dan A. Calian, Florian Stimberg, Olivia Wiles, Timothy Mann", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2021", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "56.87", 12 | "reported": "28.50", 13 | "footnote": "It uses additional 1M synthetic images in training.", 14 | "autoattack_acc": "28.50", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Rice2020Overfitting.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2002.11569", 3 | "name": "Overfitting in adversarially robust deep learning", 4 | "authors": "Leslie Rice, Eric Wong, J. Zico Kolter", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICML 2020", 9 | "architecture": "PreActResNet-18", 10 | "eps": "8/255", 11 | "clean_acc": "53.83", 12 | "reported": "28.1", 13 | "autoattack_acc": "18.95", 14 | "footnote": null 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Sehwag2021Proxy.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2104.09425", 3 | "name": "Robust Learning Meets Generative Models: Can Proxy Distributions Improve Adversarial Robustness?", 4 | "authors": "Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICLR 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "65.93", 12 | "reported": "31.15", 13 | "autoattack_acc": "31.15", 14 | "footnote": "It uses additional 1M synthetic images in training.", 15 | "unreliable": false 16 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Sitawarin2020Improving.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2003.09347", 3 | "name": "Improving Adversarial Robustness Through Progressive Hardening", 4 | "authors": "Chawin Sitawarin, Supriyo Chakraborty, David Wagner", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Mar 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "62.82", 12 | "reported": "24.57", 13 | "autoattack_acc": "24.57", 14 | "footnote": null 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Wang2023Better_WRN-28-10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-28-10", 10 | "eps": "8/255", 11 | "clean_acc": "72.58", 12 | "reported": "38.83", 13 | "autoattack_acc": "38.83", 14 | "external": "38.77", 15 | "footnote": "It uses additional 50M synthetic images in training. 38.77% robust accuracy is given by MALT (Melamed et al., 2024).", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Wang2023Better_WRN-70-16.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.04638", 3 | "name": "Better Diffusion Models Further Improve Adversarial Training", 4 | "authors": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, Shuicheng Yan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ICML 2023", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "75.22", 12 | "reported": "42.67", 13 | "autoattack_acc": "42.67", 14 | "external": "42.66", 15 | "footnote": "It uses additional 50M synthetic images in training. 42.66% robust accuracy is given by MALT (Melamed et al., 2024).", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/cifar100/Linf/Wu2020Adversarial.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2004.05884", 3 | "name": "Adversarial Weight Perturbation Helps Robust Generalization", 4 | "authors": "Dongxian Wu, Shu-tao Xia, Yisen Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-34-10", 10 | "eps": "8/255", 11 | "clean_acc": "60.38", 12 | "reported": "28.86", 13 | "autoattack_acc": "28.86", 14 | "footnote": null, 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Addepalli2021Towards_PARN18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ECCV 2022", 9 | "architecture": "PreActResNet-18", 10 | "eps": null, 11 | "clean_acc": "62.02", 12 | "reported": "51.77", 13 | "corruptions_acc": "51.77" 14 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Addepalli2021Towards_WRN34.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.09852", 3 | "name": "Scaling Adversarial Training to Large Perturbation Bounds", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Gaurang Sriramanan, Shivangi Khare, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "ECCV 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": null, 11 | "clean_acc": "65.73", 12 | "reported": "54.88", 13 | "corruptions_acc": "54.88" 14 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Addepalli2022Efficient_WRN_34_10.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.15318", 3 | "name": "Efficient and Effective Augmentation Strategy for Adversarial Training", 4 | "authors": "Sravanti Addepalli, Samyak Jain, Venkatesh Babu Radhakrishnan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "WideResNet-34-10", 10 | "eps": null, 11 | "clean_acc": "68.75", 12 | "reported": "56.95", 13 | "corruptions_acc": "56.95" 14 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Diffenderfer2021Winning_Binary.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "77.69", 12 | "reported": "65.26", 13 | "corruptions_acc": "65.26", 14 | "footnote": "Binary weight network trained with AugMix and pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Diffenderfer2021Winning_Binary_CARD_Deck.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 6, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "78.50", 12 | "reported": "69.09", 13 | "corruptions_acc": "69.09", 14 | "footnote": "Ensemble of binary weight networks each of which are pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Diffenderfer2021Winning_LRR.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "78.41", 12 | "reported": "66.45", 13 | "corruptions_acc": "66.45", 14 | "footnote": "Trained with AugMix and pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Diffenderfer2021Winning_LRR_CARD_Deck.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2106.09129", 3 | "name": "A Winning Hand: Compressing Deep Networks Can Improve Out-Of-Distribution Robustness", 4 | "authors": "James Diffenderfer, Brian R. Bartoldson, Shreya Chaganti, Jize Zhang, Bhavya Kailkhura", 5 | "additional_data": false, 6 | "number_forward_passes": 6, 7 | "dataset": "cifar100", 8 | "venue": "NeurIPS 2021", 9 | "architecture": "WideResNet-18-2", 10 | "eps": null, 11 | "clean_acc": "79.93", 12 | "reported": "71.08", 13 | "corruptions_acc": "71.08", 14 | "footnote": "Ensemble of networks each of which are pruned to 95% sparsity" 15 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Erichson2022NoisyMix.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.01263.pdf", 3 | "name": "NoisyMix: Boosting Robustness by Combining Data Augmentations, Stability Training, and Noise Injections", 4 | "authors": "N. Benjamin Erichson, Soon Hoe Lim, Francisco Utrera, Winnie Xu, Ziang Cao, and Michael W. Mahoney", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Feb 2022", 9 | "architecture": "WideResNet-28-4", 10 | "eps": null, 11 | "clean_acc": "81.16", 12 | "reported": "72.06", 13 | "corruptions_acc": "72.06" 14 | } -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Gowal2020Uncovering_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "60.86", 12 | "reported": "", 13 | "corruptions_acc": "49.46", 14 | "footnote": "Trained for \\(\\ell_{\\infty} \\) robustness with \\(\\varepsilon = 8/255\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Gowal2020Uncovering_extra_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2010.03593", 3 | "name": "Uncovering the Limits of Adversarial Training against Norm-Bounded Adversarial Examples", 4 | "authors": "Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, Pushmeet Kohli", 5 | "additional_data": true, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "arXiv, Oct 2020", 9 | "architecture": "WideResNet-70-16", 10 | "eps": "8/255", 11 | "clean_acc": "69.15", 12 | "reported": "", 13 | "corruptions_acc": "56.00", 14 | "footnote": "Trained for \\(\\ell_{\\infty} \\) robustness with \\(\\varepsilon = 8/255\\)." 15 | } 16 | -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Hendrycks2020AugMix_ResNeXt.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.02781", 3 | "name": "AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Norman Mu, Ekin D. Cubuk, Barret Zoph, Justin Gilmer, Balaji Lakshminarayanan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "ResNeXt29_32x4d", 10 | "eps": null, 11 | "clean_acc": "78.90", 12 | "reported": "65.1", 13 | "corruptions_acc": "65.14" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Hendrycks2020AugMix_WRN.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.02781", 3 | "name": "AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Norman Mu, Ekin D. Cubuk, Barret Zoph, Justin Gilmer, Balaji Lakshminarayanan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar10", 8 | "venue": "ICLR 2020", 9 | "architecture": "WideResNet-40-2", 10 | "eps": null, 11 | "clean_acc": "76.28", 12 | "reported": "64.1", 13 | "corruptions_acc": "64.11" 14 | } 15 | -------------------------------------------------------------------------------- /model_info/cifar100/corruptions/Modas2021PRIMEResNet18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2112.13547", 3 | "name": "PRIME: A Few Primitives Can Boost Robustness to Common Corruptions", 4 | "authors": "Apostolos Modas, Rahul Rade, Guillermo Ortiz-Jiménez, Seyed-Mohsen Moosavi-Dezfooli, Pascal Frossard", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "cifar100", 8 | "venue": "arXiv, Dec 2021", 9 | "architecture": "ResNet-18", 10 | "eps": null, 11 | "clean_acc": "77.60", 12 | "reported": "68.28", 13 | "corruptions_acc": "68.28" 14 | } -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Amini2024MeanSparse_ConvNeXt-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse ConvNeXt-L", 10 | "eps": "4/255", 11 | "clean_acc": "77.92", 12 | "reported": "59.64", 13 | "autoattack_acc": "59.64", 14 | "external": "58.22", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained models Liu2023Comprehensive_ConvNeXt-L. 58.22% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Amini2024MeanSparse_Swin-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2406.05927", 3 | "name": "MeanSparse: Post-Training Robustness Enhancement Through Mean-Centered Feature Sparsification", 4 | "authors": "Sajjad Amini, Mohammadreza Teymoorianfard, Shiqing Ma, Amir Houmansadr", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Jun 2024", 9 | "architecture": "MeanSparse Swin-L", 10 | "eps": "4/255", 11 | "clean_acc": "78.80", 12 | "reported": "62.12", 13 | "autoattack_acc": "62.12", 14 | "external": "58.92", 15 | "footnote": "It adds the MeanSparse operator to the adversarially trained models Liu2023Comprehensive_Swin-L. 58.92% robust accuracy is due to APGD (both versions) with BPDA.", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Bai2024MixedNUTS.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2402.02263", 3 | "name": "MixedNUTS: Training-Free Accuracy-Robustness Balance via Nonlinearly Mixed Classifiers", 4 | "authors": "Yatong Bai, Mo Zhou, Vishal M. Patel, Somayeh Sojoudi", 5 | "additional_data": true, 6 | "number_forward_passes": 2, 7 | "dataset": "imagenet", 8 | "venue": "TMLR, Aug 2024", 9 | "architecture": "ConvNeXtV2-L + Swin-L", 10 | "eps": "4/255", 11 | "clean_acc": "81.48", 12 | "reported": "58.50", 13 | "autoattack_acc": "58.62", 14 | "external": "58.50", 15 | "footnote": "It uses an ensemble of networks. The accurate base classifier was pre-trained on ImageNet-21k. 58.50% robust accuracy is due to the original evaluation (Adaptive AutoAttack)", 16 | "unreliable": false 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Chen2024Data_WRN_50_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://doi.org/10.1016/j.patcog.2024.110394", 3 | "name": "Data filtering for efficient adversarial training", 4 | "authors": "Erh-Chung Chen, Che-Rung Lee", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "Pattern Recognition 2024", 9 | "architecture": "WideResNet-50-2", 10 | "eps": "4/255", 11 | "clean_acc": "68.76", 12 | "reported": "41.92", 13 | "footnote": "", 14 | "autoattack_acc": "40.60", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Debenedetti2022Light_XCiT-L12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-L12", 10 | "eps": "4/255", 11 | "clean_acc": "73.76", 12 | "reported": "47.60", 13 | "autoattack_acc": "47.60", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Debenedetti2022Light_XCiT-M12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-M12", 10 | "eps": "4/255", 11 | "clean_acc": "74.04", 12 | "reported": "45.24", 13 | "autoattack_acc": "45.24", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Debenedetti2022Light_XCiT-S12.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2209.07399", 3 | "name": "A Light Recipe to Train Robust Vision Transformers", 4 | "authors": "Edoardo Debenedetti, Vikash Sehwag, Prateek Mittal", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Sep 2022", 9 | "architecture": "XCiT-S12", 10 | "eps": "4/255", 11 | "clean_acc": "72.34", 12 | "reported": "41.78", 13 | "autoattack_acc": "41.78", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Engstrom2019Robustness.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/MadryLab/robustness", 3 | "name": "Robustness library", 4 | "authors": "Logan Engstrom, Andrew Ilyas, Hadi Salman, Shibani Santurkar, Dimitris Tsipras", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "GitHub,
Oct 2019", 9 | "architecture": "ResNet-50", 10 | "eps": "4/255", 11 | "clean_acc": "62.56", 12 | "reported": "33.38", 13 | "autoattack_acc": "29.22", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Liu2023Comprehensive_ConvNeXt-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.14301", 3 | "name": "A Comprehensive Study on Robustness of Image Classification Models: Benchmarking and Rethinking", 4 | "authors": "Chang Liu, Yinpeng Dong, Wenzhao Xiang, Xiao Yang, Hang Su, Jun Zhu, Yuefeng Chen, Yuan He, Hui Xue, Shibao Zheng", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2023", 9 | "architecture": "ConvNeXt-B", 10 | "eps": "4/255", 11 | "clean_acc": "76.02", 12 | "reported": "55.82", 13 | "autoattack_acc": "55.82", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Liu2023Comprehensive_ConvNeXt-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.14301", 3 | "name": "A Comprehensive Study on Robustness of Image Classification Models: Benchmarking and Rethinking", 4 | "authors": "Chang Liu, Yinpeng Dong, Wenzhao Xiang, Xiao Yang, Hang Su, Jun Zhu, Yuefeng Chen, Yuan He, Hui Xue, Shibao Zheng", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2023", 9 | "architecture": "ConvNeXt-L", 10 | "eps": "4/255", 11 | "clean_acc": "78.02", 12 | "reported": "58.48", 13 | "autoattack_acc": "58.48", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Liu2023Comprehensive_Swin-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.14301", 3 | "name": "A Comprehensive Study on Robustness of Image Classification Models: Benchmarking and Rethinking", 4 | "authors": "Chang Liu, Yinpeng Dong, Wenzhao Xiang, Xiao Yang, Hang Su, Jun Zhu, Yuefeng Chen, Yuan He, Hui Xue, Shibao Zheng", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2023", 9 | "architecture": "Swin-B", 10 | "eps": "4/255", 11 | "clean_acc": "76.16", 12 | "reported": "56.16", 13 | "autoattack_acc": "56.16", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Liu2023Comprehensive_Swin-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2302.14301", 3 | "name": "A Comprehensive Study on Robustness of Image Classification Models: Benchmarking and Rethinking", 4 | "authors": "Chang Liu, Yinpeng Dong, Wenzhao Xiang, Xiao Yang, Hang Su, Jun Zhu, Yuefeng Chen, Yuan He, Hui Xue, Shibao Zheng", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2023", 9 | "architecture": "Swin-L", 10 | "eps": "4/255", 11 | "clean_acc": "78.92", 12 | "reported": "59.56", 13 | "autoattack_acc": "59.56", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Mo2022When_Swin-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.07540", 3 | "name": "When Adversarial Training Meets Vision Transformers: Recipes from Training to Architecture", 4 | "authors": "Yichuan Mo, Dongxian Wu, Yifei Wang, Yiwen Guo, Yisen Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "Swin-B", 10 | "eps": "4/255", 11 | "clean_acc": "74.66", 12 | "reported": "38.30", 13 | "autoattack_acc": "38.30", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Mo2022When_ViT-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2210.07540", 3 | "name": "When Adversarial Training Meets Vision Transformers: Recipes from Training to Architecture", 4 | "authors": "Yichuan Mo, Dongxian Wu, Yifei Wang, Yiwen Guo, Yisen Wang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2022", 9 | "architecture": "ViT-B", 10 | "eps": "4/255", 11 | "clean_acc": "68.38", 12 | "reported": "34.40", 13 | "autoattack_acc": "34.40", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Peng2023Robust.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2308.16258", 3 | "name": "Robust Principles: Architectural Design Principles for Adversarially Robust CNNs", 4 | "authors": "ShengYun Peng, Weilin Xu, Cory Cornelius, Matthew Hull, Kevin Li, Rahul Duggal, Mansi Phute, Jason Martin, and Duen Horng Chau", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "BMVC 2023", 9 | "architecture": "RaWideResNet-101-2", 10 | "eps": "4/255", 11 | "clean_acc": "73.44", 12 | "reported": "48.94", 13 | "autoattack_acc": "48.94", 14 | "unreliable": false 15 | } -------------------------------------------------------------------------------- /model_info/imagenet/Linf/RodriguezMunoz2024Characterizing_Swin-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2409.20139", 3 | "name": "Characterizing Model Robustness via Natural Input Gradients", 4 | "authors": "Adrián Rodríguez-Muñoz, Tongzhou Wang, Antonio Torralba", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Sep 2024", 9 | "architecture": "Swin-B", 10 | "eps": "4/255", 11 | "clean_acc": "77.76", 12 | "reported": "51.56", 13 | "autoattack_acc": "51.56", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/RodriguezMunoz2024Characterizing_Swin-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2409.20139", 3 | "name": "Characterizing Model Robustness via Natural Input Gradients", 4 | "authors": "Adrián Rodríguez-Muñoz, Tongzhou Wang, Antonio Torralba", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Sep 2024", 9 | "architecture": "Swin-L", 10 | "eps": "4/255", 11 | "clean_acc": "79.36", 12 | "reported": "53.82", 13 | "autoattack_acc": "53.82", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Salman2020Do_50_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2007.08489", 3 | "name": "Do Adversarially Robust ImageNet Models Transfer Better?", 4 | "authors": "Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, Aleksander Madry", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-50-2", 10 | "eps": "4/255", 11 | "clean_acc": "68.46", 12 | "reported": "", 13 | "autoattack_acc": "38.14", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Salman2020Do_R18.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2007.08489", 3 | "name": "Do Adversarially Robust ImageNet Models Transfer Better?", 4 | "authors": "Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, Aleksander Madry", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "ResNet-18", 10 | "eps": "4/255", 11 | "clean_acc": "52.92", 12 | "reported": "", 13 | "autoattack_acc": "25.32", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Salman2020Do_R50.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2007.08489", 3 | "name": "Do Adversarially Robust ImageNet Models Transfer Better?", 4 | "authors": "Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, Aleksander Madry", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "ResNet-50", 10 | "eps": "4/255", 11 | "clean_acc": "64.02", 12 | "reported": "", 13 | "autoattack_acc": "34.96", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ConvNeXt-B-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ConvNeXt-B + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "75.90", 12 | "reported": "56.14", 13 | "autoattack_acc": "56.14", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ConvNeXt-L-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ConvNeXt-L + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "77.00", 12 | "reported": "57.70", 13 | "autoattack_acc": "57.70", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ConvNeXt-S-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ConvNeXt-S + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "74.10", 12 | "reported": "52.42", 13 | "autoattack_acc": "52.42", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ConvNeXt-T-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ConvNeXt-T + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "72.72", 12 | "reported": "49.46", 13 | "autoattack_acc": "49.46", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ViT-B-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ViT-B + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "76.30", 12 | "reported": "54.66", 13 | "autoattack_acc": "54.66", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Singh2023Revisiting_ViT-S-ConvStem.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2303.01870", 3 | "name": "Revisiting Adversarial Training for ImageNet: Architectures, Training and Generalization across Threat Models", 4 | "authors": "Naman D Singh, Francesco Croce, Matthias Hein", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2023", 9 | "architecture": "ViT-S + ConvStem", 10 | "eps": "4/255", 11 | "clean_acc": "72.56", 12 | "reported": "48.08", 13 | "autoattack_acc": "48.08", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Standard_R50.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/RobustBench/robustbench/", 3 | "name": "Standardly trained model", 4 | "authors": "Torchvision", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "N/A", 9 | "architecture": "ResNet-50", 10 | "eps": "4/255", 11 | "clean_acc": "76.52", 12 | "reported": "0.0", 13 | "autoattack_acc": "0.0", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Wong2020Fast.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2001.03994", 3 | "name": "Fast is better than free: Revisiting adversarial training", 4 | "authors": "Eric Wong, Leslie Rice, J. Zico Kolter", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICLR 2020", 9 | "architecture": "ResNet-50", 10 | "eps": "4/255", 11 | "clean_acc": "55.62", 12 | "reported": "30.18", 13 | "footnote": "Focuses on fast adversarial training.", 14 | "autoattack_acc": "26.24", 15 | "unreliable": false 16 | } 17 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Xu2024MIMIR_Swin-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2312.04960", 3 | "name": "MIMIR: Masked Image Modeling for Mutual Information-based Adversarial Robustness", 4 | "authors": "Xiaoyun Xu, Shujian Yu, Zhuoran Liu, Stjepan Picek", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Dec 2023", 9 | "architecture": "Swin-B", 10 | "eps": "4/255", 11 | "clean_acc": "76.62", 12 | "reported": "55.90", 13 | "autoattack_acc": "55.90", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/Linf/Xu2024MIMIR_Swin-L.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2312.04960", 3 | "name": "MIMIR: Masked Image Modeling for Mutual Information-based Adversarial Robustness", 4 | "authors": "Xiaoyun Xu, Shujian Yu, Zhuoran Liu, Stjepan Picek", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Dec 2023", 9 | "architecture": "Swin-L", 10 | "eps": "4/255", 11 | "clean_acc": "78.62", 12 | "reported": "59.68", 13 | "autoattack_acc": "59.68", 14 | "unreliable": false 15 | } 16 | -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/AlexNet.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html", 3 | "name": "ImageNet Classification with Deep Convolutional Neural Networks", 4 | "authors": "Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2012", 9 | "architecture": "AlexNet", 10 | "eps": null, 11 | "clean_acc": "56.24", 12 | "reported": null, 13 | "corruptions_acc_3d": "28.47", 14 | "corruptions_mce_3d": "100.0", 15 | "footnote": null, 16 | "corruptions_acc": "21.12", 17 | "corruptions_mce": "100.0" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Erichson2022NoisyMix.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.01263.pdf", 3 | "name": "NoisyMix: Boosting Robustness by Combining Data Augmentations, Stability Training, and Noise Injections", 4 | "authors": "N. Benjamin Erichson, Soon Hoe Lim, Francisco Utrera, Winnie Xu, Ziang Cao, and Michael W. Mahoney", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2022", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "76.98", 12 | "reported": "52.25", 13 | "footnote": "Basic model.", 14 | "corruptions_acc_3d": "55.79", 15 | "corruptions_mce_3d": "61.67", 16 | "corruptions_acc": "52.47", 17 | "corruptions_mce": "60.46" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Erichson2022NoisyMix_new.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/pdf/2202.01263.pdf", 3 | "name": "NoisyMix: Boosting Robustness by Combining Data Augmentations, Stability Training, and Noise Injections", 4 | "authors": "N. Benjamin Erichson, Soon Hoe Lim, Francisco Utrera, Winnie Xu, Ziang Cao, and Michael W. Mahoney", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Feb 2022", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "76.9", 12 | "reported": "52.25", 13 | "footnote": "Better tuned model.", 14 | "corruptions_acc_3d": "56.1", 15 | "corruptions_mce_3d": "61.27", 16 | "corruptions_acc": "53.28", 17 | "corruptions_mce": "59.43" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Geirhos2018_SIN.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1811.12231", 3 | "name": "ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness", 4 | "authors": "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A. Wichmann, Wieland Brendel", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICLR 2019", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "60.08", 12 | "reported": "", 13 | "corruptions_acc_3d": "40.46", 14 | "corruptions_mce_3d": "84.1", 15 | "footnote": "Model A: trained on Stylized ImageNet.", 16 | "corruptions_acc": "39.92", 17 | "corruptions_mce": "76.67" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Geirhos2018_SIN_IN.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1811.12231", 3 | "name": "ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness", 4 | "authors": "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A. Wichmann, Wieland Brendel", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICLR 2019", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "74.98", 12 | "reported": "", 13 | "corruptions_acc_3d": "50.03", 14 | "corruptions_mce_3d": "69.68", 15 | "footnote": "Model B: trained on Stylized ImageNet and standard ImageNet.", 16 | "corruptions_acc": "45.76", 17 | "corruptions_mce": "68.67" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Geirhos2018_SIN_IN_IN.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1811.12231", 3 | "name": "ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness", 4 | "authors": "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A. Wichmann, Wieland Brendel", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICLR 2019", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "77.56", 12 | "reported": "", 13 | "corruptions_acc_3d": "49.03", 14 | "corruptions_mce_3d": "70.53", 15 | "footnote": "Model C: trained on Stylized ImageNet and standard ImageNet, then fine-tuned on standard ImageNet.", 16 | "corruptions_acc": "42.0", 17 | "corruptions_mce": "73.19" 18 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Hendrycks2020AugMix.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/1912.02781", 3 | "name": "AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty", 4 | "authors": "Dan Hendrycks, Norman Mu, Ekin D. Cubuk, Barret Zoph, Justin Gilmer, Balaji Lakshminarayanan", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICLR 2020", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "77.34", 12 | "reported": "", 13 | "corruptions_acc": "49.33", 14 | "corruptions_mce": "64.17", 15 | "corruptions_acc_3d": "53.48", 16 | "corruptions_mce_3d": "64.64" 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Hendrycks2020Many.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2006.16241", 3 | "name": "The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization", 4 | "authors": "Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, Justin Gilmer", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "ICCV 2021", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "76.86", 12 | "reported": "", 13 | "corruptions_acc_3d": "56.51", 14 | "corruptions_mce_3d": "60.69", 15 | "corruptions_acc": "52.9", 16 | "corruptions_mce": "60.04" 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Salman2020Do_50_2_Linf.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2007.08489", 3 | "name": "Do Adversarially Robust ImageNet Models Transfer Better?", 4 | "authors": "Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish Kapoor, Aleksander Madry", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "NeurIPS 2020", 9 | "architecture": "WideResNet-50-2", 10 | "eps": null, 11 | "clean_acc": "68.64", 12 | "reported": "", 13 | "corruptions_acc_3d": "41.86", 14 | "corruptions_mce_3d": "80.51", 15 | "corruptions_acc": "36.09", 16 | "corruptions_mce": "80.0" 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Standard_R50.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://github.com/RobustBench/robustbench/", 3 | "name": "Standardly trained model", 4 | "authors": "Torchvision", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "N/A", 9 | "architecture": "ResNet-50", 10 | "eps": null, 11 | "clean_acc": "76.72", 12 | "reported": "", 13 | "corruptions_acc_3d": "47.12", 14 | "corruptions_mce_3d": "73.16", 15 | "corruptions_acc": "39.48", 16 | "corruptions_mce": "76.38" 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Tian2022Deeper_DeiT-B.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2204.12143", 3 | "name": "Deeper Insights into the Robustness of ViTs towards Common Corruptions", 4 | "authors": "Rui Tian, Zuxuan Wu, Qi Dai, Han Hu, Yu-Gang Jiang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Apr 2022", 9 | "architecture": "DeiT Base", 10 | "eps": null, 11 | "clean_acc": "81.38", 12 | "reported": "", 13 | "corruptions_acc_3d": "65.9", 14 | "corruptions_mce_3d": "47.97", 15 | "corruptions_acc": "67.55", 16 | "corruptions_mce": "41.7" 17 | } -------------------------------------------------------------------------------- /model_info/imagenet/corruptions/Tian2022Deeper_DeiT-S.json: -------------------------------------------------------------------------------- 1 | { 2 | "link": "https://arxiv.org/abs/2204.12143", 3 | "name": "Deeper Insights into the Robustness of ViTs towards Common Corruptions", 4 | "authors": "Rui Tian, Zuxuan Wu, Qi Dai, Han Hu, Yu-Gang Jiang", 5 | "additional_data": false, 6 | "number_forward_passes": 1, 7 | "dataset": "imagenet", 8 | "venue": "arXiv, Apr 2022", 9 | "architecture": "DeiT Small", 10 | "eps": null, 11 | "clean_acc": "79.76", 12 | "reported": "", 13 | "corruptions_acc_3d": "62.64", 14 | "corruptions_mce_3d": "52.5", 15 | "corruptions_acc": "62.91", 16 | "corruptions_mce": "47.61" 17 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.7.1 2 | torchvision>=0.8.2 3 | torchdiffeq 4 | geotorch 5 | requests>=2.25.1 6 | numpy>=1.19.4 7 | Jinja2~=3.1.2 8 | timm>=1.0.9 9 | tqdm>=4.56.1 10 | pandas>=1.3.5 11 | git+https://github.com/fra31/auto-attack.git@a39220048b3c9f2cca9a4d3a54604793c68eca7e#egg=autoattack 12 | gdown==5.1.0 13 | pyyaml 14 | -------------------------------------------------------------------------------- /robustbench/__init__.py: -------------------------------------------------------------------------------- 1 | from .data import load_cifar10 2 | from .utils import load_model 3 | from .eval import benchmark 4 | -------------------------------------------------------------------------------- /robustbench/leaderboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/robustbench/leaderboard/__init__.py -------------------------------------------------------------------------------- /robustbench/leaderboard/leaderboard.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 10 | {% if threat_model != "corruptions" %} 11 | 16 | 21 | 26 | {% endif %} 27 | {% if threat_model == "corruptions" %} 28 | 33 | {% endif %} 34 | {% if threat_model == "corruptions" and dataset == 'imagenet' %} 35 | 40 | {% endif %} 41 | 42 | 43 | 44 | 45 | 46 | 47 | {% for model in models %} 48 | 49 | 50 | 59 | 60 | 61 | {%- if threat_model != "corruptions" %} 62 | 63 | 64 | {% endif %} 65 | {% if threat_model == "corruptions" and dataset == 'imagenet' -%} 66 | 67 | {%- endif %} 68 | 69 | 70 | 71 | 72 | {% endfor %} 73 | 74 |
RankMethod 7 | Standard
8 | accuracy 9 |
12 | AutoAttack
13 | robust
14 | accuracy 15 |
17 | Best known
18 | robust
19 | accuracy 20 |
22 | AA eval.
23 | potentially
24 | unreliable 25 |
29 | Robust
30 | accuracy
31 | (IN-C) 32 |
36 | Robust
37 | accuracy
38 | (IN-3DCC) 39 |
Extra
data
ArchitectureVenue
{{ loop.index }} 51 | {{ model.name }} 52 | {% if model.footnote is defined and model.footnote != None %} 53 |
54 | 55 | {{ model.footnote }} 56 | 57 | {% endif %} 58 |
{{ model.clean_acc }}%{{ model[acc_field] }}%{{ model.external if model.external is defined and model.external else model[acc_field]}}%{{ "Unknown" if model.unreliable is not defined else ("
" if model.unreliable else "
×
") }}
{{ model['corruptions_acc_3d'] }}%{{ "☑" if model.additional_data else "×" }}{{ model.architecture }}{{ model.venue }}
75 | -------------------------------------------------------------------------------- /robustbench/leaderboard/template.py: -------------------------------------------------------------------------------- 1 | import json 2 | from argparse import ArgumentParser 3 | from pathlib import Path 4 | from typing import Union 5 | 6 | from jinja2 import Environment, PackageLoader, select_autoescape 7 | 8 | from robustbench.model_zoo.enums import BenchmarkDataset, ThreatModel 9 | from robustbench.utils import ACC_FIELDS 10 | 11 | 12 | def generate_leaderboard(dataset: Union[str, BenchmarkDataset], 13 | threat_model: Union[str, ThreatModel], 14 | models_folder: str = "model_info") -> str: 15 | """Prints the HTML leaderboard starting from the .json results. 16 | 17 | The result is a that can be put directly into the RobustBench index.html page, 18 | and looks the same as the tables that are already existing. 19 | 20 | The .json results must have the same structure as the following: 21 | `` 22 | { 23 | "link": "https://arxiv.org/abs/2003.09461", 24 | "name": "Adversarial Robustness on In- and Out-Distribution Improves Explainability", 25 | "authors": "Maximilian Augustin, Alexander Meinke, Matthias Hein", 26 | "additional_data": true, 27 | "number_forward_passes": 1, 28 | "dataset": "cifar10", 29 | "venue": "ECCV 2020", 30 | "architecture": "ResNet-50", 31 | "eps": "0.5", 32 | "clean_acc": "91.08", 33 | "reported": "73.27", 34 | "autoattack_acc": "72.91" 35 | } 36 | `` 37 | 38 | If the model is robust to common corruptions, then the "autoattack_acc" field should be 39 | "corruptions_acc". 40 | 41 | :param dataset: The dataset of the wanted leaderboard. example: "cifar10" 42 | :param threat_model: The threat model of the wanted leaderboard. example: "Linf" 43 | :param models_folder: The base folder of the model jsons (e.g. our "model_info" folder). 44 | 45 | :return: The resulting HTML table. 46 | """ 47 | dataset_: BenchmarkDataset = BenchmarkDataset(dataset) 48 | threat_model_: ThreatModel = ThreatModel(threat_model) 49 | 50 | folder = Path(models_folder) / dataset_.value / threat_model_.value 51 | 52 | acc_field = ACC_FIELDS[threat_model_] 53 | 54 | models = [] 55 | for model_path in folder.glob("*.json"): 56 | with open(model_path) as fp: 57 | model = json.load(fp) 58 | 59 | models.append(model) 60 | 61 | #models.sort(key=lambda x: x[acc_field], reverse=True) 62 | def get_key(x): 63 | if isinstance(acc_field, str): 64 | return float(x[acc_field]) 65 | else: 66 | for k in acc_field: 67 | if k in x.keys(): 68 | return float(x[k]) 69 | models.sort(key=get_key, reverse=True) 70 | 71 | env = Environment(loader=PackageLoader('robustbench', 'leaderboard'), 72 | autoescape=select_autoescape(['html', 'xml'])) 73 | 74 | template = env.get_template('leaderboard.html.j2') 75 | 76 | result = template.render(threat_model=threat_model, dataset=dataset, 77 | models=models, acc_field=acc_field if isinstance(acc_field, str) else acc_field[-1]) 78 | print(result) 79 | return result 80 | 81 | 82 | if __name__ == "__main__": 83 | parser = ArgumentParser() 84 | parser.add_argument( 85 | "--dataset", 86 | type=str, 87 | default="cifar10", 88 | help="The dataset of the desired leaderboard." 89 | ) 90 | parser.add_argument( 91 | "--threat_model", 92 | type=str, 93 | help="The threat model of the desired leaderboard." 94 | ) 95 | parser.add_argument( 96 | "--models_folder", 97 | type=str, 98 | default="model_info", 99 | help="The base folder of the model jsons (e.g. our 'model_info' folder)" 100 | ) 101 | args = parser.parse_args() 102 | 103 | generate_leaderboard(args.dataset, args.threat_model, args.models_folder) 104 | -------------------------------------------------------------------------------- /robustbench/model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import model_dicts 2 | 3 | -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/robustbench/model_zoo/architectures/__init__.py -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/alexnet.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD 3 | from timm.models import build_model_with_cfg, register_model 4 | from torchvision.models.alexnet import AlexNet 5 | 6 | from .utils_architectures import normalize_timm_model 7 | 8 | 9 | def _cfg(url='', **kwargs): 10 | return { 11 | 'url': url, 12 | 'num_classes': 1000, 13 | 'input_size': (3, 224, 224), 14 | 'pool_size': None, 15 | 'crop_pct': 1.0, 16 | 'interpolation': 'bilinear', 17 | 'fixed_input_size': True, 18 | 'mean': IMAGENET_DEFAULT_MEAN, 19 | 'std': IMAGENET_DEFAULT_STD, 20 | 'first_conv': None, 21 | 'classifier': 'classifier', 22 | **kwargs 23 | } 24 | 25 | 26 | default_cfgs = { 27 | 'alexnet_imagenet_corruptions': 28 | _cfg(url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth") 29 | } 30 | 31 | 32 | class TimmAlexNet(AlexNet): 33 | def __init__(self, 34 | num_classes: int = 1000, 35 | dropout: float = 0.5, 36 | in_chans: Optional[int] = None, 37 | img_size: Optional[Tuple[int]] = None) -> None: 38 | super().__init__(num_classes, dropout) 39 | 40 | 41 | def _create_alexnet(variant, pretrained=False, default_cfg=None, **kwargs): 42 | model = build_model_with_cfg(TimmAlexNet, variant, pretrained, **kwargs) 43 | return model 44 | 45 | 46 | @register_model 47 | def alexnet_imagenet_corruptions(pretrained=False, **kwargs): 48 | model_kwargs = dict(**kwargs) 49 | model = _create_alexnet('alexnet_imagenet_corruptions', 50 | pretrained=pretrained, 51 | **model_kwargs) 52 | assert isinstance(model, TimmAlexNet) 53 | model = normalize_timm_model(model) 54 | return model 55 | -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/boosting_wide_resnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import torch.nn.functional as F 4 | from torch import nn 5 | from .wide_resnet import WideResNet 6 | 7 | class BoostingWideResNet(WideResNet): 8 | 9 | def __init__(self, depth=34, widen_factor=20): 10 | super(BoostingWideResNet, self).__init__(depth=depth, 11 | widen_factor=widen_factor, 12 | sub_block1=True, 13 | bias_last=False) 14 | self.register_buffer( 15 | 'mu', 16 | torch.tensor([0.4914, 0.4822, 0.4465]).view(1, 3, 1, 1)) 17 | self.register_buffer( 18 | 'sigma', 19 | torch.tensor([0.2471, 0.2435, 0.2616]).view(1, 3, 1, 1)) 20 | 21 | def forward(self, x): 22 | x = (x - self.mu) / self.sigma 23 | out = self.conv1(x) 24 | out = self.block1(out) 25 | out = self.block2(out) 26 | out = self.block3(out) 27 | out = self.relu(self.bn1(out)) 28 | out = F.avg_pool2d(out, 8) 29 | out = out.view(-1, self.nChannels) 30 | out = F.normalize(out, p=2, dim=1) 31 | for _, module in self.fc.named_modules(): 32 | if isinstance(module, nn.Linear): 33 | module.weight.data = F.normalize(module.weight, p=2, dim=1) 34 | return self.fc(out) -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/deit.py: -------------------------------------------------------------------------------- 1 | from timm.models import deit 2 | from timm.models import register_model 3 | 4 | from .utils_architectures import normalize_timm_model 5 | 6 | default_cfgs = { 7 | 'tian2022deeper_deit_s_imagenet_corruptions': 8 | deit._cfg( 9 | url= 10 | "https://github.com/RobustBench/robustbench/releases/download/v1.1.1-test/tian2022deeper-deit-s.pth" 11 | ), 12 | 'tian2022deeper_deit_b_imagenet_corruptions': 13 | deit._cfg( 14 | url= 15 | "https://github.com/RobustBench/robustbench/releases/download/v1.1.1-test/tian2022deeper-deit-b.pth" 16 | ), 17 | } 18 | 19 | 20 | @register_model 21 | def tian2022deeper_deit_s_imagenet_corruptions(pretrained=False, 22 | **kwargs) -> deit.VisionTransformer: 23 | model_kwargs = dict(patch_size=16, 24 | embed_dim=384, 25 | depth=12, 26 | num_heads=6, 27 | **kwargs) 28 | model = deit._create_deit('tian2022deeper_deit_s_imagenet_corruptions', 29 | pretrained=pretrained, 30 | **model_kwargs) 31 | assert isinstance(model, deit.VisionTransformer) 32 | model = normalize_timm_model(model) 33 | return model 34 | 35 | 36 | @register_model 37 | def tian2022deeper_deit_b_imagenet_corruptions(pretrained=False, 38 | **kwargs) -> deit.VisionTransformer: 39 | model_kwargs = dict(patch_size=16, 40 | embed_dim=768, 41 | depth=12, 42 | num_heads=12, 43 | **kwargs) 44 | model = deit._create_deit('tian2022deeper_deit_b_imagenet_corruptions', 45 | pretrained=pretrained, 46 | **model_kwargs) 47 | assert isinstance(model, deit.VisionTransformer) 48 | model = normalize_timm_model(model) 49 | return model 50 | -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/optimal_spca.yaml: -------------------------------------------------------------------------------- 1 | cifar10: 2 | Peng2023Robust: 3 | gelu: 4 | default_beta: 98.5 5 | 98.5: 6 | s: 5.0 7 | c: -1.1 8 | p: 4.0 9 | alpha: 0.9987500602493457 10 | none: 11 | default_beta: 95.0 12 | 100.0: 13 | alpha: 1 14 | 95.0: 15 | alpha: 0.9570535356459247 16 | 0.0: 17 | alpha: 0 18 | cifar100: 19 | Wang2023Better_WRN-70-16: 20 | gelu: 21 | default_beta: 98.5 22 | 98.5: 23 | s: 0.6115753843212784 24 | c: -2.142857142857143 25 | p: 3.571428571428571 26 | alpha: 0.9860300534207915 27 | none: 28 | default_beta: 93.5 29 | 100.0: 30 | alpha: 1 31 | 93.5: 32 | alpha: 0.9706995384302841 33 | 0.0: 34 | alpha: 0 35 | imagenet: 36 | Liu2023Comprehensive_Swin-L: 37 | gelu: 38 | default_beta: 99.0 39 | 99.0: 40 | s: 0.023535468936502525 41 | c: -0.2857142857142858 42 | p: 2.7142857142857144 43 | alpha: 0.9971266414518896 44 | none: 45 | default_beta: 95.0 46 | 100.0: 47 | alpha: 1 48 | 95.0: 49 | alpha: 0.90683792395046 50 | 0.0: 51 | alpha: 0 -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/robust_wide_resnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import torch.nn.functional as F 4 | from torch import nn 5 | from .wide_resnet import WideResNet, NetworkBlock, BasicBlock 6 | 7 | 8 | class RobustWideResNet(nn.Module): 9 | def __init__(self, num_classes=10, channel_configs=[16, 160, 320, 640], 10 | depth_configs=[5, 5, 5], stride_config=[1, 2, 2], 11 | drop_rate_config=[0.0, 0.0, 0.0]): 12 | super(RobustWideResNet, self).__init__() 13 | assert len(channel_configs) - 1 == len(depth_configs) == len(stride_config) == len(drop_rate_config) 14 | self.channel_configs = channel_configs 15 | self.depth_configs = depth_configs 16 | self.stride_config = stride_config 17 | 18 | self.stem_conv = nn.Conv2d(3, channel_configs[0], kernel_size=3, 19 | stride=1, padding=1, bias=False) 20 | self.blocks = nn.ModuleList([]) 21 | for i, stride in enumerate(stride_config): 22 | self.blocks.append(NetworkBlock(block=BasicBlock, 23 | nb_layers=depth_configs[i], 24 | in_planes=channel_configs[i], 25 | out_planes=channel_configs[i+1], 26 | stride=stride, 27 | dropRate=drop_rate_config[i],)) 28 | 29 | # global average pooling and classifier 30 | self.bn1 = nn.BatchNorm2d(channel_configs[-1]) 31 | self.relu = nn.ReLU(inplace=True) 32 | self.global_pooling = nn.AdaptiveAvgPool2d(1) 33 | self.fc = nn.Linear(channel_configs[-1], num_classes) 34 | self.fc_size = channel_configs[-1] 35 | 36 | for m in self.modules(): 37 | if isinstance(m, nn.Conv2d): 38 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 39 | m.weight.data.normal_(0, math.sqrt(2. / n)) 40 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm): 41 | m.weight.data.fill_(1) 42 | m.bias.data.zero_() 43 | elif isinstance(m, nn.Linear): 44 | m.bias.data.zero_() 45 | 46 | def forward(self, x): 47 | out = self.stem_conv(x) 48 | for i, block in enumerate(self.blocks): 49 | out = block(out) 50 | out = self.relu(self.bn1(out)) 51 | out = self.global_pooling(out) 52 | out = out.view(-1, self.fc_size) 53 | out = self.fc(out) 54 | return out 55 | -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/utils_architectures.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from collections import OrderedDict 4 | from typing import Tuple, TypeVar 5 | from torch import Tensor 6 | 7 | 8 | class ImageNormalizer(nn.Module): 9 | 10 | def __init__(self, mean: Tuple[float, float, float], 11 | std: Tuple[float, float, float]) -> None: 12 | super(ImageNormalizer, self).__init__() 13 | 14 | self.register_buffer('mean', torch.as_tensor(mean).view(1, 3, 1, 1)) 15 | self.register_buffer('std', torch.as_tensor(std).view(1, 3, 1, 1)) 16 | 17 | def forward(self, input: Tensor) -> Tensor: 18 | return (input - self.mean) / self.std 19 | 20 | def __repr__(self): 21 | return f'ImageNormalizer(mean={self.mean.squeeze()}, std={self.std.squeeze()})' # type: ignore 22 | 23 | 24 | def normalize_model(model: nn.Module, mean: Tuple[float, float, float], 25 | std: Tuple[float, float, float]) -> nn.Module: 26 | layers = OrderedDict([('normalize', ImageNormalizer(mean, std)), 27 | ('model', model)]) 28 | return nn.Sequential(layers) 29 | 30 | 31 | M = TypeVar('M', bound=nn.Module) 32 | 33 | 34 | def normalize_timm_model(model: M) -> M: 35 | return normalize_model( 36 | model, 37 | mean=model.default_cfg['mean'], # type: ignore 38 | std=model.default_cfg['std']) # type: ignore 39 | -------------------------------------------------------------------------------- /robustbench/model_zoo/architectures/wide_resnet.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class BasicBlock(nn.Module): 8 | def __init__(self, in_planes, out_planes, stride, dropRate=0.0): 9 | super(BasicBlock, self).__init__() 10 | self.bn1 = nn.BatchNorm2d(in_planes) 11 | self.relu1 = nn.ReLU(inplace=True) 12 | self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 13 | padding=1, bias=False) 14 | self.bn2 = nn.BatchNorm2d(out_planes) 15 | self.relu2 = nn.ReLU(inplace=True) 16 | self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, 17 | padding=1, bias=False) 18 | self.droprate = dropRate 19 | self.equalInOut = (in_planes == out_planes) 20 | self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, 21 | padding=0, bias=False) or None 22 | 23 | def forward(self, x): 24 | if not self.equalInOut: 25 | x = self.relu1(self.bn1(x)) 26 | else: 27 | out = self.relu1(self.bn1(x)) 28 | out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) 29 | if self.droprate > 0: 30 | out = F.dropout(out, p=self.droprate, training=self.training) 31 | out = self.conv2(out) 32 | return torch.add(x if self.equalInOut else self.convShortcut(x), out) 33 | 34 | 35 | class NetworkBlock(nn.Module): 36 | def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): 37 | super(NetworkBlock, self).__init__() 38 | self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) 39 | 40 | def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): 41 | layers = [] 42 | for i in range(int(nb_layers)): 43 | layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate)) 44 | return nn.Sequential(*layers) 45 | 46 | def forward(self, x): 47 | return self.layer(x) 48 | 49 | 50 | class WideResNet(nn.Module): 51 | """ Based on code from https://github.com/yaodongyu/TRADES """ 52 | def __init__(self, depth=28, num_classes=10, widen_factor=10, sub_block1=False, dropRate=0.0, bias_last=True): 53 | super(WideResNet, self).__init__() 54 | nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] 55 | assert ((depth - 4) % 6 == 0) 56 | n = (depth - 4) / 6 57 | block = BasicBlock 58 | # 1st conv before any network block 59 | self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, 60 | padding=1, bias=False) 61 | # 1st block 62 | self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) 63 | if sub_block1: 64 | # 1st sub-block 65 | self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) 66 | # 2nd block 67 | self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) 68 | # 3rd block 69 | self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) 70 | # global average pooling and classifier 71 | self.bn1 = nn.BatchNorm2d(nChannels[3]) 72 | self.relu = nn.ReLU(inplace=True) 73 | self.fc = nn.Linear(nChannels[3], num_classes, bias=bias_last) 74 | self.nChannels = nChannels[3] 75 | 76 | for m in self.modules(): 77 | if isinstance(m, nn.Conv2d): 78 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 79 | m.weight.data.normal_(0, math.sqrt(2. / n)) 80 | elif isinstance(m, nn.BatchNorm2d): 81 | m.weight.data.fill_(1) 82 | m.bias.data.zero_() 83 | elif isinstance(m, nn.Linear) and not m.bias is None: 84 | m.bias.data.zero_() 85 | 86 | def forward(self, x): 87 | out = self.conv1(x) 88 | out = self.block1(out) 89 | out = self.block2(out) 90 | out = self.block3(out) 91 | out = self.relu(self.bn1(out)) 92 | out = F.avg_pool2d(out, 8) 93 | out = out.view(-1, self.nChannels) 94 | return self.fc(out) 95 | 96 | -------------------------------------------------------------------------------- /robustbench/model_zoo/enums.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class BenchmarkDataset(Enum): 5 | cifar_10 = 'cifar10' 6 | cifar_100 = 'cifar100' 7 | imagenet = 'imagenet' 8 | 9 | 10 | class ThreatModel(Enum): 11 | Linf = "Linf" 12 | L2 = "L2" 13 | corruptions = "corruptions" 14 | corruptions_3d = "corruptions_3d" 15 | -------------------------------------------------------------------------------- /robustbench/model_zoo/models.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from typing import Any, Dict, Dict as OrderedDictType 3 | 4 | from robustbench.model_zoo.cifar10 import cifar_10_models 5 | from robustbench.model_zoo.cifar100 import cifar_100_models 6 | from robustbench.model_zoo.imagenet import imagenet_models 7 | from robustbench.model_zoo.enums import BenchmarkDataset, ThreatModel 8 | 9 | ModelsDict = OrderedDictType[str, Dict[str, Any]] 10 | ThreatModelsDict = OrderedDictType[ThreatModel, ModelsDict] 11 | BenchmarkDict = OrderedDictType[BenchmarkDataset, ThreatModelsDict] 12 | 13 | model_dicts: BenchmarkDict = OrderedDict([ 14 | (BenchmarkDataset.cifar_10, cifar_10_models), 15 | (BenchmarkDataset.cifar_100, cifar_100_models), 16 | (BenchmarkDataset.imagenet, imagenet_models) 17 | ]) 18 | -------------------------------------------------------------------------------- /robustbench/zenodo_download.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import shutil 3 | from pathlib import Path 4 | from typing import Set 5 | 6 | import requests 7 | from tqdm import tqdm 8 | 9 | ZENODO_ENTRY_POINT = "https://zenodo.org/api" 10 | RECORDS_ENTRY_POINT = f"{ZENODO_ENTRY_POINT}/records/" 11 | 12 | CHUNK_SIZE = 65536 13 | 14 | 15 | class DownloadError(Exception): 16 | pass 17 | 18 | 19 | def download_file(url: str, save_dir: Path, total_bytes: int) -> Path: 20 | """Downloads large files from the given URL. 21 | 22 | From: https://stackoverflow.com/a/16696317 23 | 24 | :param url: The URL of the file. 25 | :param save_dir: The directory where the file should be saved. 26 | :param total_bytes: The total bytes of the file. 27 | :return: The path to the downloaded file. 28 | """ 29 | local_filename = save_dir / url.split('/')[-1] 30 | print(f"Starting download from {url}") 31 | with requests.get(url, stream=True) as r: 32 | r.raise_for_status() 33 | with open(local_filename, 'wb') as f: 34 | iters = total_bytes // CHUNK_SIZE 35 | for chunk in tqdm(r.iter_content(chunk_size=CHUNK_SIZE), 36 | total=iters): 37 | f.write(chunk) 38 | 39 | return local_filename 40 | 41 | 42 | def file_md5(filename: Path) -> str: 43 | """Computes the MD5 hash of a given file""" 44 | hash_md5 = hashlib.md5() 45 | with open(filename, "rb") as f: 46 | for chunk in iter(lambda: f.read(32768), b""): 47 | hash_md5.update(chunk) 48 | 49 | return hash_md5.hexdigest() 50 | 51 | 52 | def zenodo_download(record_id: str, filenames_to_download: Set[str], 53 | save_dir: Path) -> None: 54 | """Downloads the given files from the given Zenodo record. 55 | 56 | :param record_id: The ID of the record. 57 | :param filenames_to_download: The files to download from the record. 58 | :param save_dir: The directory where the files should be saved. 59 | """ 60 | if not save_dir.exists(): 61 | save_dir.mkdir(parents=True, exist_ok=True) 62 | 63 | url = f"{RECORDS_ENTRY_POINT}/{record_id}" 64 | res = requests.get(url) 65 | files = res.json()["files"] 66 | files_to_download = list( 67 | filter(lambda file: file["key"] in filenames_to_download, files)) 68 | 69 | for file in files_to_download: 70 | if (save_dir / file["key"]).exists(): 71 | continue 72 | file_url = file["links"]["self"] 73 | file_checksum = file["checksum"].split(":")[-1] 74 | filename = download_file(file_url, save_dir, file["size"]) 75 | if file_md5(filename) != file_checksum: 76 | raise DownloadError( 77 | "The hash of the downloaded file does not match" 78 | " the expected one.") 79 | print("Download finished, extracting...") 80 | format = file["type"] if "type" in file.keys() else file["key"].split('.')[-1] 81 | shutil.unpack_archive(filename, 82 | extract_dir=save_dir, 83 | format=format) 84 | print("Downloaded and extracted.") 85 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r", encoding='utf-8') as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="robustbench", 8 | version="1.1", 9 | author="Francesco Croce, Maksym Andriushchenko, Vikash Sehwag, Edoardo Debenedetti", 10 | author_email="adversarial.benchmark@gmail.com", 11 | description="This package provides the data for RobustBench together with the model zoo.", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/RobustBench/robustbench", 15 | packages=setuptools.find_packages(), 16 | install_requires=[ 17 | 'torch>=1.7.1', 'torchvision>=0.8.2', 'torchdiffeq', 'geotorch', 18 | 'requests>=2.25.0', 'numpy>=1.19.4', 'Jinja2~=3.1.2', 'tqdm>=4.56.1', 19 | 'pandas>=1.3.5', 20 | 'autoattack @ git+https://github.com/fra31/auto-attack.git@a39220048b3c9f2cca9a4d3a54604793c68eca7e#egg=autoattack', 21 | 'timm>=1.0.9', 22 | 'gdown==5.1.0', 23 | 'pyyaml', 24 | ], 25 | classifiers=[ 26 | "Programming Language :: Python :: 3", 27 | "License :: OSI Approved :: Apache Software License", 28 | "Operating System :: OS Independent", 29 | ], 30 | include_package_data=True, 31 | ) 32 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobustBench/robustbench/78fcc9e48a07a861268f295a777b975f25155964/tests/__init__.py -------------------------------------------------------------------------------- /tests/config.py: -------------------------------------------------------------------------------- 1 | def get_test_config(): 2 | config = { 3 | 'batch_size': 128, 4 | 'datasets': ['cifar10', 'cifar100', 'imagenet'], 5 | 'threat_models': ['Linf', 'L2', 'corruptions'], 6 | 'data_dir': 'data', 7 | 'imagenet_data_dir': '/tmldata1/andriush/imagenet', 8 | 'model_dir': 'models', 9 | 'device': 'cpu', # 'cuda:0' 10 | } 11 | return config 12 | -------------------------------------------------------------------------------- /tests/example_eval_imagenet_corruptions.sh: -------------------------------------------------------------------------------- 1 | # for debugging 2 | python -m robustbench.eval --n_ex=2 --dataset=imagenet --threat_model=corruptions --model_name=AlexNet --data_dir=/tmldata1/andriush/imagenet --corruptions_data_dir=/tmldata1/andriush/imagenet --batch_size=256 --to_disk=True 3 | python -m robustbench.eval --n_ex=2 --dataset=imagenet --threat_model=corruptions_3d --model_name=AlexNet --data_dir=/tmldata1/andriush/imagenet --corruptions_data_dir=/tmldata1/andriush/data/3DCommonCorruptions/ --batch_size=256 --to_disk=True 4 | 5 | # 2d corruptions 6 | for model_name in AlexNet Standard_R50 Salman2020Do_50_2_Linf Hendrycks2020AugMix Hendrycks2020Many Geirhos2018_SIN Geirhos2018_SIN_IN Geirhos2018_SIN_IN_IN Erichson2022NoisyMix Erichson2022NoisyMix_new Tian2022Deeper_DeiT-S Tian2022Deeper_DeiT-B; 7 | do 8 | python -m robustbench.eval --n_ex=5000 --dataset=imagenet --threat_model=corruptions --model_name=$model_name --data_dir=/tmldata1/andriush/imagenet --corruptions_data_dir=/tmldata1/andriush/imagenet --batch_size=256 --to_disk=True 9 | done 10 | 11 | # 3d corruptions 12 | for model_name in AlexNet Standard_R50 Salman2020Do_50_2_Linf Hendrycks2020AugMix Hendrycks2020Many Geirhos2018_SIN Geirhos2018_SIN_IN Geirhos2018_SIN_IN_IN Erichson2022NoisyMix Erichson2022NoisyMix_new Tian2022Deeper_DeiT-S Tian2022Deeper_DeiT-B; 13 | do 14 | python -m robustbench.eval --n_ex=5000 --dataset=imagenet --threat_model=corruptions_3d --model_name=$model_name --data_dir=/tmldata1/andriush/imagenet --corruptions_data_dir=/tmldata1/andriush/data/3DCommonCorruptions/ --batch_size=256 --to_disk=True 15 | done 16 | 17 | -------------------------------------------------------------------------------- /tests/test_clean_acc.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from pathlib import Path 4 | from typing import Callable, Sequence 5 | 6 | import torch 7 | 8 | from robustbench.data import load_clean_dataset 9 | from robustbench.model_zoo.models import model_dicts 10 | from robustbench.utils import clean_accuracy, load_model 11 | from tests.config import get_test_config 12 | from tests.utils_testing import slow 13 | 14 | 15 | def _accuracy_computation(success_criterion: Callable[[str, float, str, str], bool], n_ex: int = 200) -> None: 16 | config = get_test_config() 17 | device = torch.device(config["device"]) 18 | 19 | tot_models = 0 20 | n_tests_passed = 0 21 | for dataset, dataset_dict in model_dicts.items(): 22 | if dataset.value not in config['datasets']: 23 | continue 24 | print(f"Test models trained on {dataset.value}") 25 | data_dir = config['data_dir'] if dataset.value != 'imagenet' else config['imagenet_data_dir'] 26 | 27 | last_preprocessing = '' 28 | for threat_model, threat_model_dict in dataset_dict.items(): 29 | if threat_model.value not in config['threat_models']: 30 | continue 31 | print(f"Test models robust wrt {threat_model.value}") 32 | models = list(threat_model_dict.keys()) 33 | tot_models += len(models) 34 | 35 | for model_name in models: 36 | # reload dataset if preprocessing is different for the current model (needed for imagenet) 37 | curr_preprocessing = threat_model_dict[model_name]['preprocessing'] \ 38 | if 'preprocessing' in threat_model_dict[model_name] else 'none' 39 | if curr_preprocessing != last_preprocessing: 40 | x_test, y_test = load_clean_dataset(dataset, n_ex, data_dir, curr_preprocessing) 41 | last_preprocessing = curr_preprocessing 42 | 43 | model = load_model(model_name, config["model_dir"], dataset, threat_model).to(device) 44 | model.eval() 45 | 46 | acc = clean_accuracy(model, x_test, y_test, 47 | batch_size=config["batch_size"], device=device) 48 | 49 | success = success_criterion(model_name, acc, dataset.value, threat_model.value) 50 | n_tests_passed += int(success) 51 | print(f"{model_name}: clean accuracy {acc:.2%} (on {n_ex} examples)," 52 | f" test passed: {success}") 53 | 54 | print(f"Test is passed for {n_tests_passed}/{tot_models} models.") 55 | 56 | 57 | class CleanAccTester(unittest.TestCase): 58 | 59 | def test_clean_acc_jsons_fast(self): 60 | datasets_acc = { 61 | "cifar10": 70.0, 62 | "cifar100": 45.0, 63 | "imagenet": 40.0, 64 | } 65 | def fast_acc_success_criterion(model_name: str, acc: float, dataset: str, threat_model: str) -> bool: 66 | self.assertGreater(round(acc * 100., 2), datasets_acc[dataset]) 67 | return round(acc * 100., 2) > datasets_acc[dataset] 68 | 69 | n_ex = 200 70 | _accuracy_computation(fast_acc_success_criterion, n_ex) 71 | 72 | @slow 73 | def test_clean_acc_jsons_exact(self): 74 | def exact_acc_success_criterion(model_name: str, acc: float, dataset: str, threat_model: str) -> bool: 75 | model_info_path = Path("model_info") / dataset / threat_model / f"{model_name}.json" 76 | 77 | with open(model_info_path) as model_info: 78 | json_dict = json.load(model_info) 79 | if abs(round(acc * 100., 2) - float(json_dict['clean_acc'])) > 0.05: 80 | print(f"{model_name} accuracy test failed, {round(acc * 100., 2)} vs. {json_dict['clean_acc']}") 81 | self.assertLessEqual(abs(round(acc * 100., 2) - float(json_dict['clean_acc'])), 0.05) 82 | 83 | return abs(round(acc * 100., 2) - float(json_dict['clean_acc'])) <= 0.05 84 | 85 | n_ex = 10000 86 | _accuracy_computation(exact_acc_success_criterion, n_ex) 87 | -------------------------------------------------------------------------------- /tests/test_eval.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from robustbench import benchmark 4 | from robustbench.model_zoo.enums import ThreatModel 5 | from tests.utils import DummyModel 6 | 7 | 8 | class CleanAccTester(TestCase): 9 | def test_benchmark_train(self): 10 | model = DummyModel() 11 | model.train() 12 | with self.assertWarns(Warning): 13 | benchmark(model, 14 | n_examples=1, 15 | threat_model=ThreatModel.Linf, 16 | eps=8/255, batch_size=100) 17 | 18 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from robustbench import load_model 4 | from tests.config import get_test_config 5 | 6 | 7 | class Test(TestCase): 8 | def test_load_model(self): 9 | config = get_test_config() 10 | model_name = "Standard" 11 | load_model(model_name, model_dir=config["model_dir"]) 12 | 13 | def test_load_model_norm(self): 14 | model_name = "Standard" 15 | config = get_test_config() 16 | with self.assertWarns(DeprecationWarning): 17 | load_model(model_name, model_dir=config["model_dir"], norm="L2") 18 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class DummyModel(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.main = nn.Sequential(nn.Flatten(), nn.Linear(3072, 10)) 8 | 9 | def forward(self, x): 10 | return self.main(x) 11 | -------------------------------------------------------------------------------- /tests/utils_testing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Partially borrowed from https://github.com/huggingface/transformers/blob/master/src/transformers/testing_utils.py 3 | """ 4 | import os 5 | import unittest 6 | from distutils.util import strtobool 7 | 8 | 9 | def parse_flag_from_env(key, default=False): 10 | try: 11 | value = os.environ[key] 12 | except KeyError: 13 | # KEY isn't set, default to `default`. 14 | _value = default 15 | else: 16 | # KEY is set, convert it to True or False. 17 | try: 18 | _value = strtobool(value) 19 | except ValueError: 20 | # More values are supported, but let's keep the message simple. 21 | raise ValueError("If set, {} must be yes or no.".format(key)) 22 | return _value 23 | 24 | 25 | _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) 26 | 27 | 28 | def slow(test_case): 29 | """ 30 | Decorator marking a test as slow. 31 | Slow tests are skipped by default. Set the RUN_SLOW environment variable 32 | to a truthy value to run them. 33 | """ 34 | if not _run_slow_tests: 35 | test_case = unittest.skip("test is slow")(test_case) 36 | return test_case 37 | 38 | --------------------------------------------------------------------------------