├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── Reproduce_FeatureSqueezing.md ├── attacks ├── README.md ├── __init__.py ├── adaptive │ ├── __init__.py │ └── adaptive_adversary.py ├── carlini_wrapper.py ├── cleverhans_wrapper.py ├── deepfool_wrapper.py ├── pgd │ ├── LICENSE │ ├── __init__.py │ ├── pgd_attack.py │ └── pgd_wrapper.py └── tohinz_wrapper.py ├── datasets ├── __init__.py ├── cifar10.py ├── datasets_utils.py ├── imagenet.py ├── imagenet_dataset │ ├── ILSVRC2014_clsloc_validation_ground_truth.txt │ ├── caffe_clsloc_validation_ground_truth.txt │ └── label_as_filename.py ├── mnist.py ├── svhn.py ├── svhn_dataset │ └── download_svhn_data.py └── visualization.py ├── detections ├── __init__.py ├── base.py ├── feature_squeezing.py ├── magnet_cifar.py └── magnet_mnist.py ├── externals └── __init__.py ├── main.py ├── models ├── README.md ├── __init__.py ├── carlini_models.py ├── cleverhans_models.py ├── densenet_models.py ├── keras_models │ ├── __init__.py │ ├── inceptionv3_model.py │ ├── keras_models.py │ ├── resnet50_model.py │ └── vgg19_model.py ├── mobilenets_model │ ├── __init__.py │ └── mobilenets_model.py ├── pgdtrained_models.py └── tohinz_models.py ├── requirements_cpu.txt ├── requirements_gpu.txt ├── robustness ├── __init__.py ├── base.py ├── feature_squeezing.py └── magnet.py └── utils ├── __init__.py ├── load_externals.py ├── median.py ├── output.py ├── parameter_parser.py ├── squeeze.py └── visualization.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | results* 104 | 105 | # MacOS 106 | .DS_Store 107 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "externals/cleverhans"] 2 | path = externals/cleverhans 3 | url = https://github.com/openai/cleverhans.git 4 | [submodule "externals/carlini/nn_robust_attacks"] 5 | path = externals/carlini/nn_robust_attacks 6 | url = https://github.com/mzweilin/nn_robust_attacks.git 7 | ignore = dirty 8 | [submodule "externals/keras_models"] 9 | path = externals/keras_models 10 | url = https://github.com/fchollet/deep-learning-models.git 11 | ignore = dirty 12 | [submodule "externals/MobileNetworks"] 13 | path = externals/MobileNetworks 14 | url = https://github.com/titu1994/MobileNetworks.git 15 | ignore = dirty 16 | [submodule "externals/universal"] 17 | path = externals/universal 18 | url = https://github.com/LTS4/universal.git 19 | ignore = dirty 20 | [submodule "externals/titu1994/DenseNet"] 21 | path = externals/titu1994/DenseNet 22 | url = https://github.com/titu1994/DenseNet.git 23 | ignore = dirty 24 | [submodule "externals/MagNet"] 25 | path = externals/MagNet 26 | url = https://github.com/mzweilin/MagNet.git 27 | ignore = dirty 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Weilin Xu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EvadeML-Zoo 2 | 3 | The goal of this project: 4 | * Several datasets ready to use: MNIST, CIFAR-10, ImageNet-ILSVRC and more. 5 | * Pre-trained state-of-the-art models to attack. [[See details]](models/README.md). 6 | * Existing attacking methods: FGSM, BIM, JSMA, Deepfool, Universal Perturbations, Carlini/Wagner-L2/Li/L0 and more. [[See details]](attacks/README.md). 7 | * Visualization of adversarial examples. 8 | * Existing defense methods as baseline. 9 | 10 | The code was developed on Python 2, but should be runnable on Python 3 with tiny modifications. 11 | 12 | > Please follow [the instructions](Reproduce_FeatureSqueezing.md) to reproduce the _**Feature Squeezing**_ results. 13 | 14 | ## 1. Install dependencies. 15 | 16 | ```bash 17 | pip install -r requirements_cpu.txt 18 | ``` 19 | 20 | If you are going to run the code on GPU, install this list instead: 21 | ```bash 22 | pip install -r requirements_gpu.txt 23 | ``` 24 | 25 | ## 2. Fetch submodules. 26 | ```bash 27 | git submodule update --init --recursive 28 | ``` 29 | 30 | ## 3. Download pre-trained models. 31 | ```bash 32 | mkdir downloads; curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/downloads.tar.gz | tar xzv -C downloads 33 | ``` 34 | 35 | ## 4. (Optional) Download the SVHN dataset and pre-trained model. 36 | ```bash 37 | python datasets/svhn_dataset/download_svhn_data.py 38 | curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/svhn_model_weights.tar.gz | tar xzv 39 | ``` 40 | 41 | ## 5. Usage of `python main.py` 42 | ``` 43 | usage: python main.py [-h] [--dataset_name DATASET_NAME] [--model_name MODEL_NAME] 44 | [--select [SELECT]] [--noselect] [--nb_examples NB_EXAMPLES] 45 | [--balance_sampling [BALANCE_SAMPLING]] [--nobalance_sampling] 46 | [--test_mode [TEST_MODE]] [--notest_mode] [--attacks ATTACKS] 47 | [--clip CLIP] [--visualize [VISUALIZE]] [--novisualize] 48 | [--robustness ROBUSTNESS] [--detection DETECTION] 49 | [--detection_train_test_mode [DETECTION_TRAIN_TEST_MODE]] 50 | [--nodetection_train_test_mode] [--result_folder RESULT_FOLDER] 51 | [--verbose [VERBOSE]] [--noverbose] 52 | 53 | optional arguments: 54 | -h, --help show this help message and exit 55 | --dataset_name DATASET_NAME 56 | Supported: MNIST, CIFAR-10, ImageNet, SVHN. 57 | --model_name MODEL_NAME 58 | Supported: cleverhans, cleverhans_adv_trained and 59 | carlini for MNIST; carlini and DenseNet for CIFAR-10; 60 | ResNet50, VGG19, Inceptionv3 and MobileNet for 61 | ImageNet; tohinz for SVHN. 62 | --select [SELECT] Select correctly classified examples for the 63 | experiement. 64 | --noselect 65 | --nb_examples NB_EXAMPLES 66 | The number of examples selected for attacks. 67 | --balance_sampling [BALANCE_SAMPLING] 68 | Select the same number of examples for each class. 69 | --nobalance_sampling 70 | --test_mode [TEST_MODE] 71 | Only select one sample for each class. 72 | --notest_mode 73 | --attacks ATTACKS Attack name and parameters in URL style, separated by 74 | semicolon. 75 | --clip CLIP L-infinity clip on the adversarial perturbations. 76 | --visualize [VISUALIZE] 77 | Output the image examples for each attack, enabled by 78 | default. 79 | --novisualize 80 | --robustness ROBUSTNESS 81 | Supported: FeatureSqueezing. 82 | --detection DETECTION 83 | Supported: feature_squeezing. 84 | --detection_train_test_mode [DETECTION_TRAIN_TEST_MODE] 85 | Split into train/test datasets. 86 | --nodetection_train_test_mode 87 | --result_folder RESULT_FOLDER 88 | The output folder for results. 89 | --verbose [VERBOSE] Stdout level. The hidden content will be saved to log 90 | files anyway. 91 | --noverbose 92 | ``` 93 | 94 | ### 5. Example. 95 | ```bash 96 | python main.py --dataset_name MNIST --model_name carlini \ 97 | --nb_examples 2000 --balance_sampling \ 98 | --attacks "FGSM?eps=0.1;" \ 99 | --robustness "none;FeatureSqueezing?squeezer=bit_depth_1;" \ 100 | --detection "FeatureSqueezing?squeezers=bit_depth_1,median_filter_2_2&distance_measure=l1&fpr=0.05;" 101 | ``` 102 | 103 | ## Cite this work 104 | 105 | You are encouraged to cite the following paper if you use `EvadeML-Zoo` for academic research. 106 | 107 | ``` 108 | @inproceedings{xu2018feature, 109 | title={{Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks}}, 110 | author={Xu, Weilin and Evans, David and Qi, Yanjun}, 111 | booktitle={Proceedings of the 2018 Network and Distributed Systems Security Symposium (NDSS)}, 112 | year={2018} 113 | } 114 | ``` 115 | -------------------------------------------------------------------------------- /Reproduce_FeatureSqueezing.md: -------------------------------------------------------------------------------- 1 | # Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks 2 | 3 | ![Feature Squeezing Detection Framework](https://xuweilin.org/publications/detection_framework.png) 4 | 5 | ``` 6 | @inproceedings{xu2018feature, 7 | title={{Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks}}, 8 | author={Xu, Weilin and Evans, David and Qi, Yanjun}, 9 | booktitle={Proceedings of the 2018 Network and Distributed Systems Security Symposium (NDSS)}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | The note was created to help reproduce the results of the [Feature Squeezing paper](https://arxiv.org/pdf/1704.01155.pdf). The code was developed on Python 2, but should be runnable on Python 3 with tiny modifications. 15 | 16 | ## 1. Install dependencies. 17 | 18 | ```bash 19 | pip install -r requirements_cpu.txt 20 | ``` 21 | 22 | If you are going to run the code on GPU, install this list instead: 23 | ```bash 24 | pip install -r requirements_gpu.txt 25 | ``` 26 | 27 | ## 2. Fetch submodules. 28 | ```bash 29 | git submodule update --init --recursive 30 | ``` 31 | 32 | ## 3. Download the pre-trained target models. 33 | ```bash 34 | mkdir downloads; curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/downloads.tar.gz | tar xzv -C downloads 35 | ``` 36 | 37 | 38 | ## 4. Download the pre-generated adversary examples. You may skip this step and generate adversarial examples on your machine. 39 | ```bash 40 | mkdir results 41 | curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/results_MNIST_100_317f6_carlini.tar.gz | tar xzv -C results 42 | curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/results_CIFAR-10_100_de671_densenet.tar.gz | tar xzv -C results 43 | curl -sL https://github.com/mzweilin/EvadeML-Zoo/releases/download/v0.1/results_ImageNet_100_a2749_mobilenet.tar.gz | tar xzv -C results 44 | ``` 45 | 46 | ## 5. Evaluate with the MNIST dataset. 47 | ```bash 48 | # Evaluate the robust classification accuracy and the detection performance. 49 | python main.py --dataset_name MNIST --model_name carlini \ 50 | --attacks "\ 51 | fgsm?eps=0.3;\ 52 | bim?eps=0.3&eps_iter=0.06;\ 53 | carlinili?targeted=next&batch_size=1&max_iterations=1000&confidence=10;\ 54 | carlinili?targeted=ll&batch_size=1&max_iterations=1000&confidence=10;\ 55 | carlinil2?targeted=next&batch_size=100&max_iterations=1000&confidence=10;\ 56 | carlinil2?targeted=ll&batch_size=100&max_iterations=1000&confidence=10;\ 57 | carlinil0?targeted=next&batch_size=1&max_iterations=1000&confidence=10;\ 58 | carlinil0?targeted=ll&batch_size=1&max_iterations=1000&confidence=10;\ 59 | jsma?targeted=next;\ 60 | jsma?targeted=ll;" \ 61 | --robustness "none;\ 62 | FeatureSqueezing?squeezer=bit_depth_1;\ 63 | FeatureSqueezing?squeezer=median_filter_2_2;\ 64 | FeatureSqueezing?squeezer=median_filter_3_3;" \ 65 | --detection "FeatureSqueezing?squeezers=bit_depth_1&distance_measure=l1&fpr=0.05;\ 66 | FeatureSqueezing?squeezers=bit_depth_2&distance_measure=l1&fpr=0.05;\ 67 | FeatureSqueezing?squeezers=bit_depth_1,median_filter_2_2&distance_measure=l1&fpr=0.05;" 68 | ``` 69 | 70 | 71 | ## 5. Evaluate with the CIFAR-10 dataset. 72 | ```bash 73 | python main.py --dataset_name CIFAR-10 --model_name DenseNet \ 74 | --attacks "fgsm?eps=0.0156;bim?eps=0.008&eps_iter=0.0012;carlinili?targeted=next&confidence=5;carlinili?targeted=ll&confidence=5;deepfool?overshoot=10;carlinil2?targeted=next&batch_size=100&max_iterations=1000&confidence=5;carlinil2?targeted=ll&batch_size=100&max_iterations=1000&confidence=5;carlinil0?targeted=next&confidence=5;carlinil0?targeted=ll&confidence=5;jsma?targeted=next;jsma?targeted=ll;" \ 75 | --robustness "none;\ 76 | FeatureSqueezing?squeezer=bit_depth_5;\ 77 | FeatureSqueezing?squeezer=bit_depth_4;\ 78 | FeatureSqueezing?squeezer=median_filter_2_2;\ 79 | FeatureSqueezing?squeezer=non_local_means_color_13_3_4;" \ 80 | --detection "\ 81 | FeatureSqueezing?squeezers=bit_depth_1&distance_measure=l1&fpr=0.05;\ 82 | FeatureSqueezing?squeezers=bit_depth_2&distance_measure=l1&fpr=0.05;\ 83 | FeatureSqueezing?squeezers=bit_depth_3&distance_measure=l1&fpr=0.05;\ 84 | FeatureSqueezing?squeezers=bit_depth_4&distance_measure=l1&fpr=0.05;\ 85 | FeatureSqueezing?squeezers=bit_depth_5&distance_measure=l1&fpr=0.05;\ 86 | FeatureSqueezing?squeezers=median_filter_2_2&distance_measure=l1&fpr=0.05;\ 87 | FeatureSqueezing?squeezers=median_filter_3_3&distance_measure=l1&fpr=0.05;\ 88 | FeatureSqueezing?squeezers=non_local_means_color_11_3_2&distance_measure=l1&fpr=0.05;\ 89 | FeatureSqueezing?squeezers=non_local_means_color_11_3_4&distance_measure=l1&fpr=0.05;\ 90 | FeatureSqueezing?squeezers=non_local_means_color_13_3_2&distance_measure=l1&fpr=0.05;\ 91 | FeatureSqueezing?squeezers=non_local_means_color_13_3_4&distance_measure=l1&fpr=0.05;\ 92 | FeatureSqueezing?squeezers=bit_depth_5,median_filter_2_2,non_local_means_color_13_3_2&distance_measure=l1&fpr=0.05;" 93 | ``` 94 | 95 | 96 | ## 6. Evaluate with the ImageNet dataset. 97 | ```bash 98 | python main.py --dataset_name ImageNet --model_name MobileNet --nb_examples 100 \ 99 | --attacks "fgsm?eps=0.0078;bim?eps=0.0040&eps_iter=0.0020;carlinili?batch_size=1&targeted=next&confidence=5;carlinili?batch_size=1&targeted=ll&confidence=5;deepfool?overshoot=35;carlinil2?max_iterations=1000&batch_size=10&targeted=next&confidence=5;carlinil2?max_iterations=1000&batch_size=50&targeted=ll&confidence=5;carlinil0?batch_size=1&targeted=next&confidence=5;carlinil0?batch_size=1&targeted=ll&confidence=5;" \ 100 | --detection "FeatureSqueezing?squeezers=bit_depth_5,median_filter_2_2,non_local_means_color_11_3_4&distance_measure=l1&fpr=0.05" 101 | ``` 102 | 103 | ## 7. Combine with Adversarial Training. 104 | ```bash 105 | # Compare with FGSM-based Adversarial Training. 106 | python main.py --dataset_name MNIST --model_name carlini --noselect --nb_examples 10000 \ 107 | --attacks "fgsm?eps=0.1;fgsm?eps=0.2;fgsm?eps=0.3;fgsm?eps=0.4;" \ 108 | --robustness "none;FeatureSqueezing?squeezer=bit_depth_1;" 109 | 110 | python main.py --dataset_name MNIST --model_name cleverhans_adv_trained --noselect --nb_examples 10000 \ 111 | --attacks "fgsm?eps=0.1;fgsm?eps=0.2;fgsm?eps=0.3;fgsm?eps=0.4;" \ 112 | --robustness "none;FeatureSqueezing?squeezer=bit_depth_1;" 113 | ``` 114 | 115 | ```bash 116 | # Compare with PGD-based Adversarial Training. 117 | python main.py --dataset_name MNIST --model_name pgdbase --noselect --nb_examples 10000 \ 118 | --attacks "pgdli?epsilon=0.1;pgdli?epsilon=0.2;pgdli?epsilon=0.3;pgdli?epsilon=0.4;" \ 119 | --robustness "none;FeatureSqueezing?squeezer=bit_depth_1;" 120 | 121 | python main.py --dataset_name MNIST --model_name pgdtrained --noselect --nb_examples 10000 \ 122 | --attacks "pgdli?epsilon=0.1;pgdli?epsilon=0.2;pgdli?epsilon=0.3;pgdli?epsilon=0.4;" \ 123 | --robustness "none;FeatureSqueezing?squeezer=bit_depth_1;" 124 | ``` 125 | 126 | ## 8. Compare with MagNet. 127 | ```bash 128 | # Evaluate with MNIST. 129 | python main.py --dataset_name MNIST --model_name carlini \ 130 | --attacks "fgsm?eps=0.3;bim?eps=0.3&eps_iter=0.06;carlinili?targeted=next&batch_size=1&max_iterations=1000&confidence=10;carlinili?targeted=ll&batch_size=1&max_iterations=1000&confidence=10;carlinil2?targeted=next&batch_size=100&max_iterations=1000&confidence=10;carlinil2?targeted=ll&batch_size=100&max_iterations=1000&confidence=10;carlinil0?targeted=next&batch_size=1&max_iterations=1000&confidence=10;carlinil0?targeted=ll&batch_size=1&max_iterations=1000&confidence=10;jsma?targeted=next;jsma?targeted=ll;" \ 131 | --detection "FeatureSqueezing?squeezers=bit_depth_1,median_filter_2_2&distance_measure=l1&threshold=1.2358;MagNet" 132 | ``` 133 | 134 | ```bash 135 | # Evaluate with CIFAR-10 136 | python main.py --dataset_name CIFAR-10 --model_name DenseNet \ 137 | --attacks "fgsm?eps=0.0156;bim?eps=0.008&eps_iter=0.0012;carlinili?targeted=next&confidence=5;carlinili?targeted=ll&confidence=5;deepfool?overshoot=10;carlinil2?targeted=next&batch_size=100&max_iterations=1000&confidence=5;carlinil2?targeted=ll&batch_size=100&max_iterations=1000&confidence=5;carlinil0?targeted=next&confidence=5;carlinil0?targeted=ll&confidence=5;jsma?targeted=next;jsma?targeted=ll;" \ 138 | --detection "FeatureSqueezing?squeezers=bit_depth_5,median_filter_2_2,non_local_means_color_13_3_2&distance_measure=l1&threshold=1.7547;MagNet" 139 | ``` 140 | 141 | 142 | ## 9. Evaluate Adaptive Adversary with MNIST. 143 | ```bash 144 | # Adaptive adversary by He et al. 145 | python main.py --dataset_name MNIST --model_name carlini \ 146 | --attacks "adaptive_carlini_l2?targeted=false&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;\ 147 | adaptive_carlini_l2?targeted=next&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;\ 148 | adaptive_carlini_l2?targeted=ll&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;" \ 149 | --detection "FeatureSqueezing?squeezers=binary_filter_0.5,median_filter_2_2&distance_measure=l1&threshold=0.002915;" \ 150 | --nodetection_train_test_mode 151 | ``` 152 | 153 | ```bash 154 | # Clip the adaptive adversarial examples by epsilon 0.3. 155 | python main.py --dataset_name MNIST --model_name carlini \ 156 | --attacks "adaptive_carlini_l2?targeted=false&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;adaptive_carlini_l2?targeted=next&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;adaptive_carlini_l2?targeted=ll&tf_squeezers=median_filter_2_2,binary_filter_0.5&distance_measure=l1&detector_threshold=0.002915;" \ 157 | --detection "FeatureSqueezing?squeezers=bit_depth_1,median_filter_2_2&distance_measure=l1&threshold=0.002915;" \ 158 | --nodetection_train_test_mode --clip 0.3\ 159 | ``` 160 | -------------------------------------------------------------------------------- /attacks/README.md: -------------------------------------------------------------------------------- 1 | # Attack Algorithms 2 | 3 | ## Default Parameters 4 | 5 | ### Cleverhans Attacks 6 | 7 | * eps: (required float) maximum distortion of adversarial example compared to original input 8 | * eps_iter: (required float) step size for each attack iteration 9 | * nb_iter: (required int) Number of attack iterations. 10 | * theta: (optional float) Perturbation introduced to modified components (can be positive or negative) 11 | * gamma: (optional float) Maximum percentage of perturbed features 12 | 13 | | Parameter | FGSM | BIM | JSMA | 14 | |------------|---------|--------|------| 15 | | eps | 0.1 | 0.1 |- | 16 | | eps_iter | - | 0.05 | - | 17 | | nb_iter | - | 10 | - | 18 | | theta | - | - | 1 | 19 | | gamma | - | - | 0.1 | 20 | 21 | 22 | ### C/W attacks 23 | | Parameter | C/W L2 | C/W Li | C/W L0 | 24 | |-----------------------|----------|-----------|--------| 25 | | batch_size | 1 | - | - | 26 | | confidence | 0 | - | - | 27 | | learning_rate | 0.01 | 0.005 | 0.01 | 28 | | binary_search_steps | 9 | - | - | 29 | | max_iterations | 10000 | 1000 | 1000 | 30 | | abort_early | true | true | true| 31 | | initial_const | 0.001 | 0.00001 | 0.001 | 32 | | largest_const | - | 2e1 | 2e6 | 33 | | reduce_const | - | false | false | 34 | | decrease_factor | - | 0.9 | - | 35 | | const_factor | - | 2.0 | 2.0 | 36 | | independent_channels | - | - | false | 37 | 38 | *Note*: C/W Li has an additional variable Tau to control the perturbations, thus the largest_const could be smaller to get successful adversarial examples while it significantly saves computation resource. 39 | 40 | ### DeepFool 41 | 42 | * num_classes: limits the number of classes to test against. 43 | * overshoot: used as a termination criterion to prevent vanishing updates 44 | * max_iter: maximum number of iterations for deepfool 45 | 46 | Example: `python --attacks "deepfool?overshoot=9&max_iter=50"` 47 | 48 | ### Universal Adversarial Perturbations 49 | 50 | * delta: controls the desired fooling rate (default = 80% fooling rate when delta == 0.2) 51 | * max_iter_uni: optional other termination criterion (maximum number of iteration, default = np.inf) 52 | * xi: controls the l_p magnitude of the perturbation (default = 10) 53 | * p: norm to be used (FOR NOW, ONLY p = 2, and p = np.inf ARE ACCEPTED!) (default = np.inf) 54 | * num_classes: num_classes (limits the number of classes to test against, by default = 10) 55 | * overshoot: used as a termination criterion to prevent vanishing updates (default = 0.02). 56 | * max_iter_df: maximum number of iterations for deepfool (default = 10) 57 | 58 | Example: `python --attacks "unipert?overshoot=9&max_iter_df=50"` 59 | 60 | | Parameter | DeepFool | Universal Adversarial Perturbations | 61 | |-----------------------|-------------|--------------------------------------| 62 | | num_classes | 10 | 10 | 63 | | overshoot | 0.02 | 0.02 | 64 | | max_iter | 50 | - | 65 | | max_iter_df | - | 10 | 66 | | max_iter_uni | - | np.inf | 67 | | delta | - | 0.2 | 68 | | xi | - | 10 | 69 | | p | - | np.inf | 70 | 71 | 72 | -------------------------------------------------------------------------------- /attacks/__init__.py: -------------------------------------------------------------------------------- 1 | from future.standard_library import install_aliases 2 | install_aliases() 3 | from urllib import parse as urlparse 4 | 5 | import pickle 6 | import numpy as np 7 | import os 8 | import time 9 | 10 | from .cleverhans_wrapper import generate_fgsm_examples, generate_jsma_examples, generate_bim_examples 11 | from .carlini_wrapper import generate_carlini_l2_examples, generate_carlini_li_examples, generate_carlini_l0_examples 12 | from .deepfool_wrapper import generate_deepfool_examples, generate_universal_perturbation_examples 13 | from .adaptive.adaptive_adversary import generate_adaptive_carlini_l2_examples 14 | from .pgd.pgd_wrapper import generate_pgdli_examples 15 | 16 | 17 | # TODO: replace pickle with .h5 for Python 2/3 compatibility issue. 18 | def maybe_generate_adv_examples(sess, model, x, y, X, Y, attack_name, attack_params, use_cache=False, verbose=True, attack_log_fpath=None): 19 | x_adv_fpath = use_cache 20 | if use_cache and os.path.isfile(x_adv_fpath): 21 | print ("Loading adversarial examples from [%s]." % os.path.basename(x_adv_fpath)) 22 | X_adv, duration = pickle.load(open(x_adv_fpath, "rb")) 23 | else: 24 | time_start = time.time() 25 | X_adv = generate_adv_examples(sess, model, x, y, X, Y, attack_name, attack_params, verbose, attack_log_fpath) 26 | duration = time.time() - time_start 27 | 28 | if not isinstance(X_adv, np.ndarray): 29 | X_adv, aux_info = X_adv 30 | else: 31 | aux_info = {} 32 | 33 | aux_info['duration'] = duration 34 | 35 | if use_cache: 36 | pickle.dump((X_adv, aux_info), open(x_adv_fpath, 'wb')) 37 | return X_adv, duration 38 | 39 | 40 | def generate_adv_examples(sess, model, x, y, X, Y, attack_name, attack_params, verbose, attack_log_fpath): 41 | if attack_name == 'none': 42 | return X 43 | elif attack_name == 'fgsm': 44 | generate_adv_examples_func = generate_fgsm_examples 45 | elif attack_name == 'jsma': 46 | generate_adv_examples_func = generate_jsma_examples 47 | elif attack_name == 'bim': 48 | generate_adv_examples_func = generate_bim_examples 49 | elif attack_name == 'carlinil2': 50 | generate_adv_examples_func = generate_carlini_l2_examples 51 | elif attack_name == 'carlinili': 52 | generate_adv_examples_func = generate_carlini_li_examples 53 | elif attack_name == 'carlinil0': 54 | generate_adv_examples_func = generate_carlini_l0_examples 55 | elif attack_name == 'deepfool': 56 | generate_adv_examples_func = generate_deepfool_examples 57 | elif attack_name == 'unipert': 58 | generate_adv_examples_func = generate_universal_perturbation_examples 59 | elif attack_name == 'adaptive_carlini_l2': 60 | generate_adv_examples_func = generate_adaptive_carlini_l2_examples 61 | elif attack_name == 'pgdli': 62 | generate_adv_examples_func = generate_pgdli_examples 63 | else: 64 | raise NotImplementedError("Unsuported attack [%s]." % attack_name) 65 | 66 | X_adv = generate_adv_examples_func(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath) 67 | 68 | return X_adv 69 | 70 | -------------------------------------------------------------------------------- /attacks/adaptive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/attacks/adaptive/__init__.py -------------------------------------------------------------------------------- /attacks/adaptive/adaptive_adversary.py: -------------------------------------------------------------------------------- 1 | """ 2 | Demo if adaptive adversary works against feature squeezing. 3 | 4 | Embed the diffrentiable filter layers in a model. 5 | Pass in the (average) gradient (part of loss) to an attack algorithm. 6 | Implement the gaussian-noise-iterative method for non-diffrentiable filter layers (bit depth reduction.) 7 | Introduce the randomized feature squeezing (need to verify with legitimate examples, should not harm the accuracy.) 8 | 9 | 10 | """ 11 | 12 | import os 13 | import tensorflow as tf 14 | import numpy as np 15 | import math 16 | 17 | # Core: Get the gradient of models for the attack algorithms. 18 | # We will combine the gradient of several models. 19 | 20 | 21 | from keras.models import Model 22 | from keras.layers import Lambda, Input 23 | 24 | def insert_pre_processing_layer_to_model(model, input_shape, func): 25 | # Output model: accept [-0.5, 0.5] input range instead of [0,1], output logits instead of softmax. 26 | # The output model will have three layers in abstract: Input, Lambda, TrainingModel. 27 | model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) 28 | 29 | input_tensor = Input(shape=input_shape) 30 | 31 | scaler_layer = Lambda(func, input_shape=input_shape)(input_tensor) 32 | output_tensor = model_logits(scaler_layer) 33 | 34 | model_new = Model(inputs=input_tensor, outputs=output_tensor) 35 | return model_new 36 | 37 | 38 | # maybe_generate_adv_examples(sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache = x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath) 39 | def adaptive_attack(sess, model, squeezers, x, y, X_test, Y_test_target, attack_name, attack_params): 40 | for squeeze_func in squeezers: 41 | predictions = model(squeeze_func(x)) 42 | 43 | 44 | # tf.contrib.distributions.kl(dist_a, dist_b, allow_nan=False, name=None) 45 | 46 | # from .median import median_filter as median_filter_tf 47 | # from .median import median_random_filter as median_random_filter_tf 48 | 49 | 50 | import sys 51 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 52 | 53 | from utils.squeeze import get_squeezer_by_name, reduce_precision_tf 54 | 55 | # if FLAGS.dataset_name == "MNIST": 56 | # # squeezers_name = ['median_smoothing_2', 'median_smoothing_3', 'binary_filter'] 57 | # squeezers_name = ['median_smoothing_2', 'binary_filter'] 58 | # elif FLAGS.dataset_name == "CIFAR-10": 59 | # squeezers_name = ["bit_depth_5", "bit_depth_4", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2', 'median_smoothing_1_3'] 60 | # elif FLAGS.dataset_name == "ImageNet": 61 | # squeezers_name = ["bit_depth_5", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2'] 62 | 63 | def get_tf_squeezer_by_name(name): 64 | return get_squeezer_by_name(name, 'tensorflow') 65 | 66 | tf_squeezers_name_mnist = ['median_filter_2_2', 'bit_depth_1'] 67 | tf_squeezers_name_cifar10 = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5', 'bit_depth_4'] 68 | tf_squeezers_name_imagenet = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5'] 69 | 70 | # tf_squeezers = map(get_tf_squeezer_by_name, tf_squeezers_name) 71 | 72 | def get_tf_squeezers_by_str(tf_squeezers_str): 73 | tf_squeezers_name = tf_squeezers_str.split(',') 74 | return map(get_tf_squeezer_by_name, tf_squeezers_name) 75 | 76 | def kl_tf(x1, x2, eps = 0.000000001): 77 | x1 = tf.clip_by_value(x1, eps, 1) 78 | x2 = tf.clip_by_value(x2, eps, 1) 79 | return tf.reduce_sum(x1 * tf.log(x1/x2), reduction_indices=[1]) 80 | 81 | def generate_adaptive_carlini_l2_examples(sess, model, x, y, X, Y_target, attack_params, verbose, attack_log_fpath): 82 | # (model, x, y, X, Y_target, tf_squeezers=tf_squeezers, detector_threshold = 0.2): 83 | # tf_squeezers=tf_squeezers 84 | eval_dir = os.path.dirname(attack_log_fpath) 85 | 86 | default_params = { 87 | 'batch_size': 100, 88 | 'confidence': 0, 89 | 'targeted': False, 90 | 'learning_rate': 9e-2, 91 | 'binary_search_steps': 9, 92 | 'max_iterations': 5000, 93 | 'abort_early': False, # TODO: not suported. 94 | 'initial_const': 0.0, 95 | 'detector_threshold': 0.3, 96 | 'uint8_optimized': False, 97 | 'tf_squeezers': [], 98 | 'distance_measure': 'l1', 99 | 'between_squeezers': False, 100 | } 101 | 102 | if 'tf_squeezers' in attack_params: 103 | tf_squeezers_str = attack_params['tf_squeezers'] 104 | tf_squeezers = get_tf_squeezers_by_str(tf_squeezers_str) 105 | attack_params['tf_squeezers'] = tf_squeezers 106 | 107 | accepted_params = default_params.keys() 108 | for k in attack_params: 109 | if k not in accepted_params: 110 | raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k) 111 | else: 112 | default_params[k] = attack_params[k] 113 | 114 | # assert batch_size <= len(X) 115 | if 'batch_size' in default_params and default_params['batch_size'] > len(X): 116 | default_params['batch_size'] = len(X) 117 | 118 | return adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, **default_params) 119 | 120 | 121 | def adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, batch_size, confidence, targeted, learning_rate, binary_search_steps, max_iterations, abort_early, initial_const, detector_threshold, uint8_optimized, tf_squeezers, distance_measure, between_squeezers): 122 | model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) 123 | 124 | # Need a determined batch size for coefficient vectors. 125 | x = tf.placeholder(shape=X.shape, dtype=tf.float32) 126 | y = tf.placeholder(shape=Y_target.shape, dtype=tf.float32) 127 | 128 | # Adapted from Warren and Carlini's code 129 | N0, H0, W0, C0 = X.shape 130 | # Range [0, 1], initialize as the original images. 131 | batch_images = X 132 | # Get the arctanh of the original images. 133 | batch_images_tanh = np.arctanh((batch_images - 0.5) / 0.501) 134 | batch_labels = Y_target 135 | 136 | x_star_tanh = tf.Variable(batch_images_tanh, dtype=tf.float32) 137 | # Range [0, 1], initialize as the original images. 138 | x_star = tf.tanh(x_star_tanh) / 2. + 0.5 139 | 140 | # The result is optimized for uint8. 141 | x_star_uint8 = reduce_precision_tf(x_star, 256) 142 | 143 | # Gradient required. 144 | y_pred_logits = model_logits(x_star) 145 | y_pred = model(x_star) 146 | print ("tf_squezers: %s" % tf_squeezers) 147 | y_squeezed_pred_list = [ model(func(x_star)) for func in tf_squeezers ] 148 | 149 | coeff = tf.placeholder(shape=(N0,), dtype=tf.float32) 150 | l2dist = tf.reduce_sum(tf.square(x_star - x), [1, 2, 3]) 151 | ground_truth_logits = tf.reduce_sum(y * y_pred_logits, 1) 152 | top_other_logits = tf.reduce_max((1 - y) * y_pred_logits - (y * 10000), 1) 153 | 154 | # Untargeted attack, minimize the ground_truth_logits. 155 | # target_penalty = tf.maximum(0., ground_truth_logits - top_other_logits) 156 | 157 | if targeted is False: 158 | # if untargeted, optimize for making this class least likely. 159 | target_penalty = tf.maximum(0.0, ground_truth_logits-top_other_logits+confidence) 160 | else: 161 | # if targetted, optimize for making the other class most likely 162 | target_penalty = tf.maximum(0.0, top_other_logits-ground_truth_logits+confidence) 163 | 164 | 165 | 166 | 167 | # Minimize the sum of L1 score. 168 | detector_penalty = None 169 | 170 | # TODO: include between squeezers l1. 171 | all_pred_list = [y_pred] + y_squeezed_pred_list 172 | 173 | if between_squeezers: 174 | print ("#Between squeezers") 175 | for i, pred_base in enumerate(all_pred_list): 176 | for j in range(i+1, len(all_pred_list)): 177 | pred_target = all_pred_list[j] 178 | if distance_measure == "l1": 179 | score = tf.reduce_sum(tf.abs(pred_base - pred_target), 1) 180 | elif distance_measure == 'kl_f': 181 | score = kl_tf(pred_base, pred_target) 182 | elif distance_measure == 'kl_b': 183 | score = kl_tf(pred_target, pred_base) 184 | detector_penalty_sub = tf.maximum(0., score - detector_threshold) 185 | 186 | if detector_penalty is None: 187 | detector_penalty = detector_penalty_sub 188 | else: 189 | detector_penalty += detector_penalty_sub 190 | else: 191 | for y_squeezed_pred in y_squeezed_pred_list: 192 | if distance_measure == "l1": 193 | score = tf.reduce_sum(tf.abs(y_pred - y_squeezed_pred), 1) 194 | elif distance_measure == 'kl_f': 195 | score = kl_tf(y_pred, y_squeezed_pred) 196 | elif distance_measure == 'kl_b': 197 | score = kl_tf(y_squeezed_pred, y_pred) 198 | detector_penalty_sub = tf.maximum(0., score - detector_threshold) 199 | 200 | if detector_penalty is None: 201 | detector_penalty = detector_penalty_sub 202 | else: 203 | detector_penalty += detector_penalty_sub 204 | 205 | 206 | 207 | # There could be different desion choices. E.g. add one coefficient for the detector penalty. 208 | loss = tf.add((target_penalty + detector_penalty) * coeff, l2dist) 209 | # Minimize loss by updating variables in var_list. 210 | train_adv_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=[x_star_tanh]) 211 | # Why the last four global variables are the optimizer variables? 212 | # 213 | # 214 | # 215 | # 216 | optimizer_variables = tf.global_variables()[-4:] 217 | 218 | # The result is optimized for uint8. Added by Weilin. 219 | if uint8_optimized: 220 | predictions = tf.argmax(model_logits(x_star_uint8), 1) 221 | else: 222 | predictions = tf.argmax(model_logits(x_star), 1) 223 | 224 | if targeted is False: 225 | correct_prediction = tf.equal(predictions, tf.argmax(y, 1)) 226 | else: 227 | correct_prediction = tf.not_equal(predictions, tf.argmax(y, 1)) 228 | 229 | # Initialize loss coefficients 230 | coeff_block_log = np.tile([[initial_const], [float('nan')], [float('nan')]], (1, N0)) 231 | coeff_curr_log = coeff_block_log[0] 232 | coeff_high_log = coeff_block_log[1] 233 | coeff_low_log = coeff_block_log[2] 234 | 235 | # Collect best adversarial images 236 | best_l2 = np.zeros((N0,)) + float('nan') 237 | best_coeff_log = np.zeros((N0,)) + float('nan') 238 | best_iter = np.zeros((N0,)) + float('nan') 239 | best_images = np.copy(batch_images) 240 | 241 | # I didn't find the initialization of random perturbations? 242 | for _ in range(binary_search_steps): 243 | # Reset x_star_tanh and optimizer 244 | sess.run(tf.variables_initializer([x_star_tanh] + optimizer_variables)) 245 | tf.assert_variables_initialized() 246 | 247 | print (coeff_curr_log) # %%% 248 | curr_coeff = np.exp(coeff_curr_log) 249 | # Initially, all are failed adversarial examples. 250 | all_fail = np.ones((N0,), dtype=np.bool) 251 | 252 | # Training loop 253 | improve_count = 0 254 | # 5000 iterations by default. 255 | for j in range(max_iterations): 256 | # Correct prediction means it is failed untargeted attacks. 257 | xst, adv_fail, l1o, l2d, _ = sess.run([x_star, correct_prediction, detector_penalty, l2dist, train_adv_step], feed_dict={ 258 | x: batch_images, 259 | y: batch_labels, 260 | coeff: curr_coeff, 261 | }) 262 | all_fail = np.logical_and(all_fail, adv_fail) 263 | for i in range(N0): 264 | if adv_fail[i] or l1o[i] > 0: 265 | continue 266 | # Save the best sucessful adversarial examples, with lowest L2. 267 | if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]: 268 | best_l2[i] = l2d[i] 269 | best_coeff_log[i] = coeff_curr_log[i] 270 | best_iter[i] = j 271 | best_images[i] = xst[i] 272 | improve_count += 1 273 | if j % 100 == 0: 274 | print("Adv. training iter. {}/{} improved {}".format(j, max_iterations, improve_count)) 275 | improve_count = 0 276 | 277 | xst, adv_fail, l1o, l2d = sess.run([x_star, correct_prediction, detector_penalty, l2dist], feed_dict={ 278 | x: batch_images, 279 | y: batch_labels, 280 | }) 281 | # Run it once more, becase the last iteration in for loop doesn't get evaluated. 282 | for i in range(N0): 283 | if adv_fail[i] or l1o[i] > 0: 284 | continue 285 | if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]: 286 | best_l2[i] = l2d[i] 287 | best_coeff_log[i] = coeff_curr_log[i] 288 | best_iter[i] = max_iterations 289 | best_images[i] = xst[i] 290 | improve_count += 1 291 | print("Finished training {}/{} improved {}".format(max_iterations, max_iterations, improve_count)) 292 | 293 | # Save generated examples and their coefficients 294 | np.save(eval_dir + '/combined_adv_imgs.npy', best_images) 295 | np.save(eval_dir + '/combined_adv_coeff_log.npy', best_coeff_log) 296 | 297 | # Update coeff 298 | for i, (fail, curr, high, low) in enumerate(zip(adv_fail, coeff_curr_log, coeff_high_log, coeff_low_log)): 299 | if fail: 300 | # increase to allow more distortion 301 | coeff_low_log[i] = low = curr 302 | if math.isnan(high): 303 | coeff_curr_log[i] = curr + 2.3 304 | else: 305 | coeff_curr_log[i] = (high + low) / 2 306 | else: 307 | # decrease to penalize distortion 308 | coeff_high_log[i] = high = curr 309 | if math.isnan(low): 310 | coeff_curr_log[i] = curr - 0.69 311 | else: 312 | coeff_curr_log[i] = (high + low) / 2 313 | np.save(eval_dir + '/combined_coeff_log.npy', coeff_block_log) 314 | 315 | return best_images 316 | 317 | 318 | 319 | 320 | -------------------------------------------------------------------------------- /attacks/carlini_wrapper.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import click 3 | import pdb 4 | import numpy as np 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 6 | 7 | from utils import load_externals 8 | 9 | from utils.output import disablePrint, enablePrint 10 | 11 | 12 | class CarliniModelWrapper: 13 | def __init__(self, logits, image_size, num_channels, num_labels): 14 | """ 15 | :image_size: (e.g., 28 for MNIST, 32 for CIFAR) 16 | :num_channels: 1 for greyscale, 3 for color images 17 | :num_labels: total number of valid labels (e.g., 10 for MNIST/CIFAR) 18 | """ 19 | self.logits = logits 20 | self.image_size = image_size 21 | self.num_channels = num_channels 22 | self.num_labels = num_labels 23 | 24 | # self.model = model_mnist_logits(img_rows=image_size, img_cols=image_size, nb_filters=64, nb_classes=num_labels) 25 | self.model = logits 26 | 27 | def predict(self, X): 28 | """ 29 | Run the prediction network *without softmax*. 30 | """ 31 | return self.model(X) 32 | 33 | from keras.models import Model 34 | from keras.layers import Lambda, Input 35 | 36 | def convert_model(model, input_shape): 37 | # Output model: accept [-0.5, 0.5] input range instead of [0,1], output logits instead of softmax. 38 | # The output model will have three layers in abstract: Input, Lambda, TrainingModel. 39 | model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) 40 | 41 | input_tensor = Input(shape=input_shape) 42 | 43 | scaler = lambda x: x+0.5 44 | scaler_layer = Lambda(scaler, input_shape=input_shape)(input_tensor) 45 | output_tensor = model_logits(scaler_layer) 46 | 47 | model_new = Model(inputs=input_tensor, outputs=output_tensor) 48 | return model_new 49 | 50 | 51 | def wrap_to_carlini_model(model, X, Y): 52 | image_size, num_channels = X.shape[1], X.shape[3] 53 | num_labels = Y.shape[1] 54 | model_logits = convert_model(model, input_shape=X.shape[1:]) 55 | model_wrapper = CarliniModelWrapper(model_logits, image_size=image_size, num_channels=num_channels, num_labels=num_labels) 56 | return model_wrapper 57 | 58 | 59 | from nn_robust_attacks.l2_attack import CarliniL2 60 | def generate_carlini_l2_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 61 | model_wrapper = wrap_to_carlini_model(model, X, Y) 62 | 63 | accepted_params = ['batch_size', 'confidence', 'targeted', 'learning_rate', 'binary_search_steps', 'max_iterations', 'abort_early', 'initial_const'] 64 | for k in attack_params: 65 | if k not in accepted_params: 66 | raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k) 67 | 68 | # assert batch_size <= len(X) 69 | if 'batch_size' in attack_params and attack_params['batch_size'] > len(X): 70 | attack_params['batch_size'] = len(X) 71 | 72 | if 'binary_search_steps' in attack_params: 73 | attack_params['binary_search_steps'] = int(attack_params['binary_search_steps']) 74 | 75 | attack = CarliniL2(sess, model_wrapper, **attack_params) 76 | 77 | if not verbose: 78 | disablePrint(attack_log_fpath) 79 | # The input range is [0, 1], convert to [-0.5, 0.5] by subtracting 0.5. 80 | # The return range is [-0.5, 0.5]. Convert back to [0,1] by adding 0.5. 81 | X_adv = attack.attack(X - 0.5, Y) + 0.5 82 | if not verbose: 83 | enablePrint() 84 | 85 | return X_adv 86 | 87 | 88 | from nn_robust_attacks.li_attack import CarliniLi 89 | def generate_carlini_li_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 90 | model_wrapper = wrap_to_carlini_model(model, X, Y) 91 | 92 | if 'batch_size' in attack_params: 93 | batch_size = attack_params['batch_size'] 94 | del attack_params['batch_size'] 95 | else: 96 | batch_size= 10 97 | 98 | accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'confidence'] 99 | for k in attack_params: 100 | if k not in accepted_params: 101 | raise NotImplementedError("Unsuporrted params in Carlini Li: %s" % k) 102 | 103 | attack = CarliniLi(sess, model_wrapper, **attack_params) 104 | 105 | X_adv_list = [] 106 | 107 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 108 | width=40, bar_template=' [%(bar)s] Carlini Li Attacking %(info)s', 109 | fill_char='>', empty_char='-') as bar: 110 | for i in bar: 111 | if i % batch_size == 0: 112 | X_sub = X[i:min(i+batch_size, len(X)),:] 113 | Y_sub = Y[i:min(i+batch_size, len(X)),:] 114 | if not verbose: 115 | disablePrint(attack_log_fpath) 116 | X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5 117 | if not verbose: 118 | enablePrint() 119 | X_adv_list.append(X_adv_sub) 120 | 121 | X_adv = np.vstack(X_adv_list) 122 | return X_adv 123 | 124 | 125 | from nn_robust_attacks.l0_attack import CarliniL0 126 | def generate_carlini_l0_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 127 | model_wrapper = wrap_to_carlini_model(model, X, Y) 128 | 129 | if 'batch_size' in attack_params: 130 | batch_size = attack_params['batch_size'] 131 | del attack_params['batch_size'] 132 | else: 133 | batch_size= 10 134 | 135 | accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'independent_channels', 'confidence'] 136 | for k in attack_params: 137 | if k not in accepted_params: 138 | raise NotImplementedError("Unsuporrted params in Carlini L0: %s" % k) 139 | 140 | attack = CarliniL0(sess, model_wrapper, **attack_params) 141 | 142 | X_adv_list = [] 143 | 144 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 145 | width=40, bar_template=' [%(bar)s] Carlini L0 Attacking %(info)s', 146 | fill_char='>', empty_char='-') as bar: 147 | for i in bar: 148 | if i % batch_size == 0: 149 | X_sub = X[i:min(i+batch_size, len(X)),:] 150 | Y_sub = Y[i:min(i+batch_size, len(X)),:] 151 | if not verbose: 152 | disablePrint(attack_log_fpath) 153 | X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5 154 | if not verbose: 155 | enablePrint() 156 | X_adv_list.append(X_adv_sub) 157 | 158 | X_adv = np.vstack(X_adv_list) 159 | return X_adv -------------------------------------------------------------------------------- /attacks/cleverhans_wrapper.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import click 4 | 5 | import pdb 6 | import sys, os 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | from utils import load_externals 10 | from cleverhans.utils_tf import model_loss, batch_eval 11 | 12 | import warnings 13 | 14 | def override_params(default, update): 15 | for key in default: 16 | if key in update: 17 | val = update[key] 18 | if key == 'ord': 19 | if val == 'li': 20 | val = np.inf 21 | elif val == 'l2': 22 | val = 2 23 | elif val == 'l1': 24 | val = 1 25 | else: 26 | raise ValueError("Unsuporrted ord: %s" % val) 27 | default[key] = val 28 | del update[key] 29 | 30 | if len(update) > 0: 31 | warnings.warn("Ignored arguments: %s" % update.keys()) 32 | return default 33 | 34 | 35 | from cleverhans.attacks import FastGradientMethod 36 | def generate_fgsm_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 37 | """ 38 | Untargeted attack. Y is not needed. 39 | """ 40 | fgsm = FastGradientMethod(model, back='tf', sess=sess) 41 | fgsm_params = {'eps': 0.1, 'ord': np.inf, 'y': None, 'clip_min': 0, 'clip_max': 1} 42 | fgsm_params = override_params(fgsm_params, attack_params) 43 | 44 | X_adv = fgsm.generate_np(X, **fgsm_params) 45 | return X_adv 46 | 47 | 48 | from cleverhans.attacks import BasicIterativeMethod 49 | def generate_bim_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 50 | """ 51 | Untargeted attack. Y is not needed. 52 | """ 53 | bim = BasicIterativeMethod(model, back='tf', sess=sess) 54 | bim_params = {'eps': 0.1, 'eps_iter':0.05, 'nb_iter':10, 'y':y, 55 | 'ord':np.inf, 'clip_min':0, 'clip_max':1 } 56 | bim_params = override_params(bim_params, attack_params) 57 | 58 | X_adv = bim.generate_np(X, **bim_params) 59 | return X_adv 60 | 61 | 62 | from cleverhans.attacks import SaliencyMapMethod 63 | def generate_jsma_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 64 | """ 65 | Targeted attack, with target classes in Y. 66 | """ 67 | Y_target = Y 68 | 69 | nb_classes = Y.shape[1] 70 | 71 | jsma = SaliencyMapMethod(model, back='tf', sess=sess) 72 | jsma_params = {'theta': 1., 'gamma': 0.1, 73 | 'nb_classes': nb_classes, 'clip_min': 0., 74 | 'clip_max': 1., 'targets': y, 75 | 'y_val': None} 76 | jsma_params = override_params(jsma_params, attack_params) 77 | 78 | adv_x_list = [] 79 | 80 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 81 | width=40, bar_template=' [%(bar)s] JSMA Attacking %(info)s', 82 | fill_char='>', empty_char='-') as bar: 83 | # Loop over the samples we want to perturb into adversarial examples 84 | for sample_ind in bar: 85 | sample = X[sample_ind:(sample_ind+1)] 86 | 87 | jsma_params['y_val'] = Y_target[[sample_ind],] 88 | adv_x = jsma.generate_np(sample, **jsma_params) 89 | adv_x_list.append(adv_x) 90 | 91 | return np.vstack(adv_x_list) 92 | 93 | -------------------------------------------------------------------------------- /attacks/deepfool_wrapper.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from keras.models import Model 4 | 5 | import sys, os 6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 7 | from utils.output import disablePrint, enablePrint 8 | from utils import load_externals 9 | from deepfool import deepfool 10 | from universal_pert import universal_perturbation 11 | 12 | import warnings 13 | import click 14 | 15 | def override_params(default, update): 16 | for key in default: 17 | if key in update: 18 | val = update[key] 19 | default[key] = val 20 | del update[key] 21 | 22 | if len(update) > 0: 23 | warnings.warn("Ignored arguments: %s" % update.keys()) 24 | return default 25 | 26 | 27 | def prepare_attack(sess, model, x, y, X, Y): 28 | nb_classes = Y.shape[1] 29 | 30 | f = model.predict 31 | 32 | model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) 33 | 34 | persisted_input = x 35 | persisted_output = model_logits(x) 36 | 37 | print('>> Compiling the gradient tensorflow functions. This might take some time...') 38 | scalar_out = [tf.slice(persisted_output, [0, i], [1, 1]) for i in range(0, nb_classes)] 39 | dydx = [tf.gradients(scalar_out[i], [persisted_input])[0] for i in range(0, nb_classes)] 40 | 41 | print('>> Computing gradient function...') 42 | def grad_fs(image_inp, inds): return [sess.run(dydx[i], feed_dict={persisted_input: image_inp}) for i in inds] 43 | 44 | return f, grad_fs 45 | 46 | def generate_deepfool_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 47 | """ 48 | Untargeted attack. Y is not needed. 49 | """ 50 | 51 | # TODO: insert a uint8 filter to f. 52 | f, grad_fs = prepare_attack(sess, model, x, y, X, Y) 53 | 54 | params = {'num_classes': 10, 'overshoot': 0.02, 'max_iter': 50} 55 | params = override_params(params, attack_params) 56 | 57 | adv_x_list = [] 58 | aux_info = {} 59 | aux_info['r_tot'] = [] 60 | aux_info['loop_i'] = [] 61 | aux_info['k_i'] = [] 62 | 63 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 64 | width=40, bar_template=' [%(bar)s] DeepFool Attacking %(info)s', 65 | fill_char='>', empty_char='-') as bar: 66 | # Loop over the samples we want to perturb into adversarial examples 67 | for i in bar: 68 | image = X[i:i+1,:,:,:] 69 | 70 | if not verbose: 71 | disablePrint(attack_log_fpath) 72 | 73 | r_tot, loop_i, k_i, pert_image = deepfool(image, f, grad_fs, **params) 74 | 75 | if not verbose: 76 | enablePrint() 77 | 78 | adv_x_list.append(pert_image) 79 | 80 | aux_info['r_tot'].append(r_tot) 81 | aux_info['loop_i'].append(loop_i) 82 | aux_info['k_i'].append(k_i) 83 | 84 | return np.vstack(adv_x_list), aux_info 85 | 86 | 87 | def generate_universal_perturbation_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 88 | """ 89 | Untargeted attack. Y is not needed. 90 | """ 91 | 92 | # TODO: insert a uint8 filter to f. 93 | f, grad_fs = prepare_attack(sess, model, x, y, X, Y) 94 | 95 | params = {'delta': 0.2, 96 | 'max_iter_uni': np.inf, 97 | 'xi': 10, 98 | 'p': np.inf, 99 | 'num_classes': 10, 100 | 'overshoot': 0.02, 101 | 'max_iter_df': 10, 102 | } 103 | 104 | params = override_params(params, attack_params) 105 | 106 | if not verbose: 107 | disablePrint(attack_log_fpath) 108 | 109 | # X is randomly shuffled in unipert. 110 | X_copy = X.copy() 111 | v = universal_perturbation(X_copy, f, grad_fs, **params) 112 | del X_copy 113 | 114 | if not verbose: 115 | enablePrint() 116 | 117 | return X + v 118 | -------------------------------------------------------------------------------- /attacks/pgd/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /attacks/pgd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/attacks/pgd/__init__.py -------------------------------------------------------------------------------- /attacks/pgd/pgd_attack.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementation of attack methods. Running this file as a program will 3 | apply the attack to the model specified by the config file and store 4 | the examples in an .npy file. 5 | """ 6 | from __future__ import absolute_import 7 | from __future__ import division 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | import numpy as np 12 | 13 | 14 | class LinfPGDAttack: 15 | def __init__(self, model, epsilon, k, a, random_start, loss_func): 16 | """Attack parameter initialization. The attack performs k steps of 17 | size a, while always staying within epsilon from the initial 18 | point.""" 19 | self.model = model 20 | self.epsilon = epsilon 21 | self.k = k 22 | self.a = a 23 | self.rand = random_start 24 | 25 | if loss_func == 'xent': 26 | loss = model.xent 27 | elif loss_func == 'cw': 28 | label_mask = tf.one_hot(model.y_input, 29 | 10, 30 | on_value=1.0, 31 | off_value=0.0, 32 | dtype=tf.float32) 33 | correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1) 34 | wrong_logit = tf.reduce_max((1-label_mask) * model.pre_softmax, axis=1) 35 | loss = -tf.nn.relu(correct_logit - wrong_logit + 50) 36 | else: 37 | print('Unknown loss function. Defaulting to cross-entropy') 38 | loss = model.xent 39 | 40 | self.grad = tf.gradients(loss, model.x_input)[0] 41 | 42 | def perturb(self, x_nat, y, sess): 43 | """Given a set of examples (x_nat, y), returns a set of adversarial 44 | examples within epsilon of x_nat in l_infinity norm.""" 45 | if self.rand: 46 | x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape) 47 | else: 48 | x = np.copy(x_nat) 49 | 50 | for i in range(self.k): 51 | grad = sess.run(self.grad, feed_dict={self.model.x_input: x, 52 | self.model.y_input: y}) 53 | 54 | x += self.a * np.sign(grad) 55 | 56 | x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon) 57 | x = np.clip(x, 0, 1) # ensure valid pixel range 58 | 59 | return x 60 | 61 | 62 | if __name__ == '__main__': 63 | import json 64 | import sys 65 | import math 66 | 67 | from tensorflow.examples.tutorials.mnist import input_data 68 | 69 | from model import Model 70 | 71 | with open('config.json') as config_file: 72 | config = json.load(config_file) 73 | 74 | model_file = tf.train.latest_checkpoint(config['model_dir']) 75 | if model_file is None: 76 | print('No model found') 77 | sys.exit() 78 | 79 | model = Model() 80 | attack = LinfPGDAttack(model, 81 | config['epsilon'], 82 | config['k'], 83 | config['a'], 84 | config['random_start'], 85 | config['loss_func']) 86 | saver = tf.train.Saver() 87 | 88 | mnist = input_data.read_data_sets('MNIST_data', one_hot=False) 89 | 90 | with tf.Session() as sess: 91 | # Restore the checkpoint 92 | saver.restore(sess, model_file) 93 | 94 | # Iterate over the samples batch-by-batch 95 | num_eval_examples = config['num_eval_examples'] 96 | eval_batch_size = config['eval_batch_size'] 97 | num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) 98 | 99 | x_adv = [] # adv accumulator 100 | 101 | print('Iterating over {} batches'.format(num_batches)) 102 | 103 | for ibatch in range(num_batches): 104 | bstart = ibatch * eval_batch_size 105 | bend = min(bstart + eval_batch_size, num_eval_examples) 106 | print('batch size: {}'.format(bend - bstart)) 107 | 108 | x_batch = mnist.test.images[bstart:bend, :] 109 | y_batch = mnist.test.labels[bstart:bend] 110 | 111 | x_batch_adv = attack.perturb(x_batch, y_batch, sess) 112 | 113 | x_adv.append(x_batch_adv) 114 | 115 | print('Storing examples') 116 | path = config['store_adv_path'] 117 | x_adv = np.concatenate(x_adv, axis=0) 118 | np.save(path, x_adv) 119 | print('Examples stored in {}'.format(path)) -------------------------------------------------------------------------------- /attacks/pgd/pgd_wrapper.py: -------------------------------------------------------------------------------- 1 | 2 | import warnings 3 | from .pgd_attack import LinfPGDAttack 4 | 5 | from keras.models import Model 6 | import tensorflow as tf 7 | import numpy as np 8 | 9 | def override_params(default, update): 10 | for key in default: 11 | if key in update: 12 | val = update[key] 13 | if key == 'k': 14 | val = int(val) 15 | default[key] = val 16 | del update[key] 17 | 18 | if len(update) > 0: 19 | warnings.warn("Ignored arguments: %s" % update.keys()) 20 | return default 21 | 22 | 23 | class PGDModelWrapper: 24 | def __init__(self, keras_model, x, y): 25 | model_logits = Model(inputs=keras_model.layers[0].input, outputs=keras_model.layers[-2].output) 26 | 27 | self.x_input = x 28 | self.y_input = tf.argmax(y, 1) 29 | self.pre_softmax = model_logits(x) 30 | 31 | y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits( 32 | labels=self.y_input, logits=self.pre_softmax) 33 | self.xent = tf.reduce_sum(y_xent) 34 | 35 | self.y_pred = tf.argmax(self.pre_softmax, 1) 36 | 37 | 38 | def generate_pgdli_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 39 | model_for_pgd = PGDModelWrapper(model, x, y) 40 | params = {'model': model_for_pgd, 'epsilon': 0.3, 'k':40, 'a':0.01, 'random_start':True, 41 | 'loss_func':'xent' } 42 | params = override_params(params, attack_params) 43 | attack = LinfPGDAttack(**params) 44 | 45 | Y_class = np.argmax(Y, 1) 46 | X_adv = attack.perturb(X, Y_class, sess) 47 | return X_adv 48 | -------------------------------------------------------------------------------- /attacks/tohinz_wrapper.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import click 3 | import pdb 4 | import numpy as np 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 6 | 7 | from utils import load_externals 8 | 9 | from utils.output import disablePrint, enablePrint 10 | 11 | class tohinzModelWrapper: 12 | def __init__(self, logits, image_size, num_channels, num_labels): 13 | self.logits = logits 14 | self.image_size = image_size 15 | self.num_channels = num_channels 16 | self.num_labels = num_labels 17 | 18 | def predict(self, X): 19 | return self.model(X) 20 | 21 | from keras.models import Model 22 | from keras.layers import Lambda, Input 23 | 24 | def convert_model(model, input_shape): 25 | model_logits = Model(inputs=model.layers[0].input,outputs = model.layers[-2].output) 26 | 27 | input_tensor = Input(shape=input_shape) 28 | 29 | scaler = lambda x: x 30 | 31 | scaler_layer = Lambda(scaler, input_shape=input_shape)(input_tensor) 32 | output_tensor = model_logits(scaler_layer) 33 | 34 | model_new = Model(inputs=input_tensor, outputs=output_tensor) 35 | return model_new 36 | 37 | def wrap_to_tohinz_model(model,X,Y): 38 | image_size, num_channels = X.shape[1], X.shape[3] 39 | num_labels = Y.shape[1] 40 | model_logits = convert_model(model, input_shape=X.shape[1:]) 41 | model_wrapper = tohinzModelWrapper(model_logits,image_size=image_size,num_channels=num_channels, 42 | num_labels = num_labels) 43 | return model_wrapper 44 | 45 | from nn_robust_attacks.l2_attack import CarliniL2 46 | def generate_carlini_l2_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 47 | model_wrapper = wrap_to_tohinz_model(model, X, Y) 48 | 49 | accepted_params = ['batch_size', 'confidence', 'targeted', 'learning_rate', 'binary_search_steps', 'max_iterations', 'abort_early', 'initial_const'] 50 | for k in attack_params: 51 | if k not in accepted_params: 52 | raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k) 53 | 54 | # assert batch_size <= len(X) 55 | if 'batch_size' in attack_params and attack_params['batch_size'] > len(X): 56 | attack_params['batch_size'] = len(X) 57 | 58 | if 'binary_search_steps' in attack_params: 59 | attack_params['binary_search_steps'] = int(attack_params['binary_search_steps']) 60 | 61 | attack = CarliniL2(sess, model_wrapper, **attack_params) 62 | 63 | if not verbose: 64 | disablePrint(attack_log_fpath) 65 | # The input range is [0, 1], convert to [-0.5, 0.5] by subtracting 0.5. 66 | # The return range is [-0.5, 0.5]. Convert back to [0,1] by adding 0.5. 67 | X_adv = attack.attack(X - 0.5, Y) + 0.5 68 | if not verbose: 69 | enablePrint() 70 | 71 | return X_adv 72 | 73 | 74 | from nn_robust_attacks.li_attack import CarliniLi 75 | def generate_carlini_li_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 76 | model_wrapper = wrap_to_tohinz_model(model, X, Y) 77 | 78 | if 'batch_size' in attack_params: 79 | batch_size = attack_params['batch_size'] 80 | del attack_params['batch_size'] 81 | else: 82 | batch_size= 10 83 | 84 | accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'confidence'] 85 | for k in attack_params: 86 | if k not in accepted_params: 87 | raise NotImplementedError("Unsuporrted params in Carlini Li: %s" % k) 88 | 89 | attack = CarliniLi(sess, model_wrapper, **attack_params) 90 | 91 | X_adv_list = [] 92 | 93 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 94 | width=40, bar_template=' [%(bar)s] Carlini Li Attacking %(info)s', 95 | fill_char='>', empty_char='-') as bar: 96 | for i in bar: 97 | if i % batch_size == 0: 98 | X_sub = X[i:min(i+batch_size, len(X)),:] 99 | Y_sub = Y[i:min(i+batch_size, len(X)),:] 100 | if not verbose: 101 | disablePrint(attack_log_fpath) 102 | X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5 103 | if not verbose: 104 | enablePrint() 105 | X_adv_list.append(X_adv_sub) 106 | 107 | X_adv = np.vstack(X_adv_list) 108 | return X_adv 109 | 110 | 111 | from nn_robust_attacks.l0_attack import CarliniL0 112 | def generate_carlini_l0_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath): 113 | model_wrapper = wrap_to_tohinz_model(model, X, Y) 114 | 115 | if 'batch_size' in attack_params: 116 | batch_size = attack_params['batch_size'] 117 | del attack_params['batch_size'] 118 | else: 119 | batch_size= 10 120 | 121 | accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'independent_channels', 'confidence'] 122 | for k in attack_params: 123 | if k not in accepted_params: 124 | raise NotImplementedError("Unsuporrted params in Carlini L0: %s" % k) 125 | 126 | attack = CarliniL0(sess, model_wrapper, **attack_params) 127 | 128 | X_adv_list = [] 129 | 130 | with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 131 | width=40, bar_template=' [%(bar)s] Carlini L0 Attacking %(info)s', 132 | fill_char='>', empty_char='-') as bar: 133 | for i in bar: 134 | if i % batch_size == 0: 135 | X_sub = X[i:min(i+batch_size, len(X)),:] 136 | Y_sub = Y[i:min(i+batch_size, len(X)),:] 137 | if not verbose: 138 | disablePrint(attack_log_fpath) 139 | X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5 140 | if not verbose: 141 | enablePrint() 142 | X_adv_list.append(X_adv_sub) 143 | 144 | X_adv = np.vstack(X_adv_list) 145 | return X_adv 146 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .datasets_utils import * 2 | from .mnist import MNISTDataset 3 | from .cifar10 import CIFAR10Dataset 4 | from .imagenet import ImageNetDataset 5 | from .svhn import SVHNDataset 6 | -------------------------------------------------------------------------------- /datasets/cifar10.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | 4 | from utils import load_externals 5 | 6 | from models.carlini_models import carlini_cifar10_model 7 | from models.cleverhans_models import cleverhans_cifar10_model 8 | from models.densenet_models import densenet_cifar10_model, get_densenet_weights_path 9 | 10 | from keras.datasets import cifar10 11 | from keras.utils import np_utils 12 | 13 | 14 | class CIFAR10Dataset: 15 | def __init__(self): 16 | self.dataset_name = "CIFAR-10" 17 | self.image_size = 32 18 | self.num_channels = 3 19 | self.num_classes = 10 20 | 21 | def get_test_dataset(self): 22 | (X_train, y_train), (X_test, y_test) = cifar10.load_data() 23 | X_test = X_test.reshape(X_test.shape[0], self.image_size, self.image_size, self.num_channels) 24 | X_test = X_test.astype('float32') 25 | X_test /= 255 26 | Y_test = np_utils.to_categorical(y_test, self.num_classes) 27 | del X_train, y_train 28 | return X_test, Y_test 29 | 30 | def get_val_dataset(self): 31 | (X_train, y_train), (X_test, y_test) = cifar10.load_data() 32 | val_size = 5000 33 | X_val = X_train[:val_size] 34 | X_val = X_val.reshape(X_val.shape[0], self.image_size, self.image_size, self.num_channels) 35 | X_val = X_val.astype('float32') / 255 36 | y_val = y_train[:val_size] 37 | Y_val = np_utils.to_categorical(y_val, self.num_classes) 38 | del X_train, y_train, X_test, y_test 39 | 40 | return X_val, Y_val 41 | 42 | 43 | def load_model_by_name(self, model_name, logits=False, input_range_type=1, pre_filter=lambda x:x): 44 | """ 45 | :params logits: return logits(input of softmax layer) if True; return softmax output otherwise. 46 | :params input_range_type: {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} 47 | """ 48 | if model_name not in ["cleverhans", 'cleverhans_adv_trained', 'carlini', 'densenet']: 49 | raise NotImplementedError("Undefined model [%s] for %s." % (model_name, self.dataset_name)) 50 | self.model_name = model_name 51 | 52 | model_weights_fpath = "%s_%s.keras_weights.h5" % (self.dataset_name, model_name) 53 | model_weights_fpath = os.path.join('downloads/trained_models', model_weights_fpath) 54 | 55 | if model_name in ["cleverhans", 'cleverhans_adv_trained']: 56 | model = cleverhans_cifar10_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 57 | elif model_name == "carlini": 58 | model = carlini_cifar10_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 59 | elif model_name == "densenet": 60 | model = densenet_cifar10_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 61 | model_weights_fpath = get_densenet_weights_path(self.dataset_name) 62 | print("\n===Defined TensorFlow model graph.") 63 | model.load_weights(model_weights_fpath) 64 | print ("---Loaded CIFAR-10-%s model.\n" % model_name) 65 | return model 66 | 67 | if __name__ == '__main__': 68 | dataset = CIFAR10Dataset() 69 | X_test, Y_test = dataset.get_test_dataset() 70 | print (X_test.shape) 71 | print (Y_test.shape) -------------------------------------------------------------------------------- /datasets/datasets_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from functools import reduce 3 | import pdb 4 | 5 | from .visualization import show_imgs_in_rows 6 | 7 | 8 | def get_next_class(Y_test): 9 | num_classes = Y_test.shape[1] 10 | Y_test_labels = np.argmax(Y_test, axis=1) 11 | Y_test_labels = (Y_test_labels + 1) % num_classes 12 | return np.eye(num_classes)[Y_test_labels] 13 | 14 | def get_least_likely_class(Y_pred): 15 | num_classes = Y_pred.shape[1] 16 | Y_target_labels = np.argmin(Y_pred, axis=1) 17 | return np.eye(num_classes)[Y_target_labels] 18 | 19 | def get_first_n_examples_id_each_class(Y_test, n=1): 20 | """ 21 | Only return the classes with samples. 22 | """ 23 | num_classes = Y_test.shape[1] 24 | Y_test_labels = np.argmax(Y_test, axis=1) 25 | 26 | selected_idx = [] 27 | for i in range(num_classes): 28 | loc = np.where(Y_test_labels==i)[0] 29 | if len(loc) > 0 : 30 | selected_idx.append(list(loc[:n])) 31 | 32 | selected_idx = reduce(lambda x,y:x+y, zip(*selected_idx)) 33 | 34 | return np.array(selected_idx) 35 | 36 | def get_first_example_id_each_class(Y_test): 37 | return get_first_n_examples_id_each_class(Y_test, n=1) 38 | 39 | def get_correct_prediction_idx(Y_pred, Y_label): 40 | """ 41 | Get the index of the correct predicted samples. 42 | :param Y_pred: softmax output, probability matrix. 43 | :param Y_label: groundtruth classes in shape (#samples, #classes) 44 | :return: the index of samples being corrected predicted. 45 | """ 46 | pred_classes = np.argmax(Y_pred, axis = 1) 47 | labels_classes = np.argmax(Y_label, axis = 1) 48 | 49 | return np.where(pred_classes == labels_classes)[0] 50 | 51 | 52 | def calculate_mean_confidence(Y_pred, Y_target): 53 | """ 54 | Calculate the mean confidence on target classes. 55 | :param Y_pred: softmax output 56 | :param Y_target: target classes in shape (#samples, #classes) 57 | :return: the mean confidence. 58 | """ 59 | assert len(Y_pred) == len(Y_target) 60 | confidence = np.multiply(Y_pred, Y_target) 61 | confidence = np.max(confidence, axis=1) 62 | 63 | mean_confidence = np.mean(confidence) 64 | 65 | return mean_confidence 66 | 67 | def get_match_pred_vec(Y_pred, Y_label): 68 | assert len(Y_pred) == len(Y_label) 69 | Y_pred_class = np.argmax(Y_pred, axis = 1) 70 | Y_label_class = np.argmax(Y_label, axis = 1) 71 | return Y_pred_class == Y_label_class 72 | 73 | 74 | def calculate_accuracy(Y_pred, Y_label): 75 | match_pred_vec = get_match_pred_vec(Y_pred, Y_label) 76 | 77 | accuracy = np.sum(match_pred_vec) / float(len(Y_label)) 78 | # pdb.set_trace() 79 | return accuracy 80 | 81 | 82 | def calculate_mean_distance(X1, X2): 83 | img_size = X1.shape[1] * X1.shape[2] 84 | nb_channels = X1.shape[3] 85 | 86 | mean_l2_dist = np.mean([ np.sum((X1[i]-X2[i])**2)**.5 for i in range(len(X1))]) 87 | mean_li_dist = np.mean([ np.max(np.abs(X1[i]-X2[i])) for i in range(len(X1))]) 88 | mean_l0_dist_value = np.mean([ np.sum(X1[i]-X2[i] != 0) for i in range(len(X1))]) 89 | mean_l0_dist_value = mean_l0_dist_value / (img_size*nb_channels) 90 | 91 | diff_channel_list = np.split(X1-X2 != 0, nb_channels, axis=3) 92 | l0_channel_dependent_list = np.sum(reduce(lambda x,y: x|y, diff_channel_list), axis = (1,2,3)) 93 | mean_l0_dist_pixel = np.mean(l0_channel_dependent_list) / img_size 94 | 95 | return mean_l2_dist, mean_li_dist, mean_l0_dist_value, mean_l0_dist_pixel 96 | 97 | 98 | def evaluate_adversarial_examples(X_test, Y_test, X_test_adv, Y_test_target, targeted, Y_test_adv_pred): 99 | success_rate = calculate_accuracy(Y_test_adv_pred, Y_test_target) 100 | success_idx = get_match_pred_vec(Y_test_adv_pred, Y_test_target) 101 | 102 | if targeted is False: 103 | success_rate = 1 - success_rate 104 | success_idx = np.logical_not(success_idx) 105 | 106 | # Calculate the mean confidence of the successful adversarial examples. 107 | mean_conf = calculate_mean_confidence(Y_test_adv_pred[success_idx], Y_test_target[success_idx]) 108 | if targeted is False: 109 | mean_conf = 1 - mean_conf 110 | 111 | mean_l2_dist, mean_li_dist, mean_l0_dist_value, mean_l0_dist_pixel = calculate_mean_distance(X_test[success_idx], X_test_adv[success_idx]) 112 | # print ("\n---Attack: %s" % attack_string) 113 | print ("Success rate: %.2f%%, Mean confidence of SAEs: %.2f%%" % (success_rate*100, mean_conf*100)) 114 | print ("### Statistics of the SAEs:") 115 | print ("L2 dist: %.4f, Li dist: %.4f, L0 dist_value: %.1f%%, L0 dist_pixel: %.1f%%" % (mean_l2_dist, mean_li_dist, mean_l0_dist_value*100, mean_l0_dist_pixel*100)) 116 | 117 | rec = {} 118 | rec['success_rate'] = success_rate 119 | rec['mean_confidence'] = mean_conf 120 | rec['mean_l2_dist'] = mean_l2_dist 121 | rec['mean_li_dist'] = mean_li_dist 122 | rec['mean_l0_dist_value'] = mean_l0_dist_value 123 | rec['mean_l0_dist_pixel'] = mean_l0_dist_pixel 124 | 125 | return rec 126 | -------------------------------------------------------------------------------- /datasets/imagenet.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | 4 | import numpy as np 5 | import os 6 | # from multiprocessing import Pool 7 | from keras.preprocessing import image 8 | 9 | from models.keras_models import keras_resnet50_imagenet_model 10 | from models.keras_models import keras_vgg19_imagenet_model 11 | from models.keras_models import keras_inceptionv3_imagenet_model 12 | from models.mobilenets_model import mobilenet_imagenet_model 13 | 14 | # pool = Pool() 15 | 16 | def load_single_image(img_path, img_size=224): 17 | size = (img_size,img_size) 18 | img = image.load_img(img_path, target_size=size) 19 | x = image.img_to_array(img) 20 | x = np.expand_dims(x, axis=0) 21 | # Embeded preprocessing in the model. 22 | # x = preprocess_input(x) 23 | return x 24 | 25 | 26 | def _load_single_image(args): 27 | img_path, img_size = args 28 | return load_single_image(img_path, img_size) 29 | 30 | 31 | def data_imagenet(img_folder, img_size, label_style = 'caffe', label_size = 1000, selected_idx = None): 32 | fnames = os.listdir(img_folder) 33 | fnames = sorted(fnames, key = lambda x: int(x.split('.')[1])) 34 | 35 | if isinstance(selected_idx, list): 36 | selected_fnames = [fnames[i] for i in selected_idx] 37 | elif isinstance(selected_idx, int): 38 | selected_fnames = fnames[:selected_idx] 39 | else: 40 | selected_fnames = fnames 41 | 42 | labels = map(lambda x: int(x.split('.')[0]), selected_fnames) 43 | img_path_list = map(lambda x: [os.path.join(img_folder, x), img_size], selected_fnames) 44 | X = map(_load_single_image, img_path_list) 45 | X = np.concatenate(X, axis=0) 46 | Y = np.eye(1000)[labels] 47 | return X, Y 48 | 49 | 50 | class ImageNetDataset: 51 | def __init__(self): 52 | self.dataset_name = "ImageNet" 53 | # self.image_size = 224 54 | self.num_channels = 3 55 | self.num_classes = 1000 56 | self.img_folder = "/tmp/ILSVRC2012_img_val_labeled_caffe" 57 | 58 | if not os.path.isdir: 59 | raise Exception("Please prepare the ImageNet dataset first: EvadeML-Zoo/datasets/imagenet_dataset/label_as_filename.py.") 60 | 61 | def get_test_dataset(self, img_size=224, num_images=100): 62 | self.image_size = img_size 63 | X, Y = data_imagenet(self.img_folder, self.image_size, selected_idx=num_images) 64 | X /= 255 65 | return X, Y 66 | 67 | def get_test_data(self, img_size, idx_begin, idx_end): 68 | # Return part of the dataset. 69 | self.image_size = img_size 70 | X, Y = data_imagenet(self.img_folder, self.image_size, selected_idx=range(idx_begin, idx_end)) 71 | X /= 255 72 | return X, Y 73 | 74 | def load_model_by_name(self, model_name, logits=False, input_range_type=1, input_tensor=None, pre_filter=lambda x:x): 75 | """ 76 | :params logits: no softmax layer if True. 77 | :params scaling: expect [-0.5,0.5] input range if True, otherwise [0, 1] 78 | """ 79 | if model_name == 'resnet50': 80 | model = keras_resnet50_imagenet_model(logits=logits, input_range_type=input_range_type) 81 | elif model_name == 'vgg19': 82 | model = keras_vgg19_imagenet_model(logits=logits, input_range_type=input_range_type) 83 | elif model_name == 'inceptionv3': 84 | model = keras_inceptionv3_imagenet_model(logits=logits, input_range_type=input_range_type) 85 | elif model_name == 'mobilenet': 86 | model = mobilenet_imagenet_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 87 | else: 88 | raise Exception("Unsupported model: [%s]" % model_name) 89 | 90 | return model 91 | 92 | if __name__ == '__main__': 93 | # label_style = 'caffe' 94 | # # img_folder = "/mnt/nfs/taichi/imagenet_data/data_val_labeled_%s" % label_style 95 | # img_folder = "/tmp/ILSVRC2012_img_val_labeled_caffe" 96 | # X, Y = data_imagenet(img_folder, selected_idx=10) 97 | # print (X.shape) 98 | # print (np.argmax(Y, axis=1)) 99 | 100 | dataset = ImageNetDataset() 101 | 102 | X, Y = dataset.get_test_dataset() 103 | model = dataset.load_model_by_name('ResNet50') 104 | 105 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /datasets/imagenet_dataset/label_as_filename.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pdb 4 | 5 | src_folder, style = sys.argv[1:] 6 | 7 | """ 8 | https://gist.github.com/ksimonyan/fd8800eeb36e276cd6f9#note 9 | 10 | mkdir data_val 11 | tar xf ILSVRC2012_img_val.tar -C data_val 12 | 13 | mkdir /tmp/ILSVRC2012_img_val/ 14 | tar -xf ~/Downloads/ILSVRC2012_img_val.tar -C /tmp/ILSVRC2012_img_val/ 15 | python label_as_filename.py /tmp/ILSVRC2012_img_val caffe 16 | 17 | mkdir /tmp/ILSVRC2012_img_val/ 18 | tar -xf /mnt/nfs/seedcake/imagenet-data/ILSVRC2012_img_val.tar -C /tmp/ILSVRC2012_img_val/ 19 | python label_as_filename.py /tmp/ILSVRC2012_img_val caffe 20 | """ 21 | 22 | ground_truth_file = {'official': "ILSVRC2014_clsloc_validation_ground_truth.txt", 23 | 'caffe': "caffe_clsloc_validation_ground_truth.txt"} 24 | 25 | if style == 'official': 26 | get_class_id_func = lambda x: int(x)-1 27 | elif style == 'caffe': 28 | get_class_id_func = lambda x: int(x.split()[1]) 29 | 30 | labels_text = open(ground_truth_file[style]).readlines() 31 | labels = map(get_class_id_func, labels_text) 32 | 33 | tgt_folder = "ILSVRC2012_img_val_labeled_%s" % style 34 | tgt_folder = os.path.join(os.path.dirname(src_folder), tgt_folder) 35 | if not os.path.isdir(tgt_folder): 36 | os.makedirs(tgt_folder) 37 | 38 | for i in range(1, 50001): 39 | src_fname = "ILSVRC2012_val_%08d.JPEG" % i 40 | tgt_fname = "%d.%d.JPEG" % (labels[i-1], i) 41 | os.symlink(os.path.abspath(os.path.join(src_folder, src_fname)), os.path.join(tgt_folder, tgt_fname)) 42 | -------------------------------------------------------------------------------- /datasets/mnist.py: -------------------------------------------------------------------------------- 1 | from keras.datasets import mnist 2 | from keras.utils import np_utils 3 | 4 | import sys, os 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 6 | 7 | from models.carlini_models import carlini_mnist_model 8 | from models.cleverhans_models import cleverhans_mnist_model 9 | from models.pgdtrained_models import pgdtrained_mnist_model 10 | 11 | class MNISTDataset: 12 | def __init__(self): 13 | self.dataset_name = "MNIST" 14 | self.image_size = 28 15 | self.num_channels = 1 16 | self.num_classes = 10 17 | 18 | def get_test_dataset(self): 19 | (X_train, y_train), (X_test, y_test) = mnist.load_data() 20 | 21 | X_test = X_test.reshape(X_test.shape[0], self.image_size, self.image_size, self.num_channels) 22 | X_test = X_test.astype('float32') 23 | X_test /= 255 24 | Y_test = np_utils.to_categorical(y_test, self.num_classes) 25 | del X_train, y_train 26 | return X_test, Y_test 27 | 28 | def get_val_dataset(self): 29 | (X_train, y_train), (X_test, y_test) = mnist.load_data() 30 | val_size = 5000 31 | X_val = X_train[:val_size] 32 | X_val = X_val.reshape(X_val.shape[0], self.image_size, self.image_size, self.num_channels) 33 | X_val = X_val.astype('float32') / 255 34 | y_val = y_train[:val_size] 35 | Y_val = np_utils.to_categorical(y_val, self.num_classes) 36 | del X_train, y_train, X_test, y_test 37 | 38 | return X_val, Y_val 39 | 40 | def load_model_by_name(self, model_name, logits=False, input_range_type=1, pre_filter=lambda x:x): 41 | """ 42 | :params logits: return logits(input of softmax layer) if True; return softmax output otherwise. 43 | :params input_range_type: {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} 44 | """ 45 | if model_name not in ["cleverhans", 'cleverhans_adv_trained', 'carlini', 'pgdtrained', 'pgdbase']: 46 | raise NotImplementedError("Undefined model [%s] for %s." % (model_name, self.dataset_name)) 47 | self.model_name = model_name 48 | 49 | model_weights_fpath = "%s_%s.keras_weights.h5" % (self.dataset_name, model_name) 50 | model_weights_fpath = os.path.join('downloads/trained_models', model_weights_fpath) 51 | 52 | # self.maybe_download_model() 53 | if model_name in ["cleverhans", 'cleverhans_adv_trained']: 54 | model = cleverhans_mnist_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 55 | elif model_name in ['carlini']: 56 | model = carlini_mnist_model(logits=logits, input_range_type = input_range_type, pre_filter=pre_filter) 57 | elif model_name in ['pgdtrained', 'pgdbase']: 58 | model = pgdtrained_mnist_model(logits=logits, input_range_type = input_range_type, pre_filter=pre_filter) 59 | print("\n===Defined TensorFlow model graph.") 60 | model.load_weights(model_weights_fpath) 61 | print ("---Loaded MNIST-%s model.\n" % model_name) 62 | return model 63 | 64 | if __name__ == '__main__': 65 | # from datasets.mnist import * 66 | dataset = MNISTDataset() 67 | X_test, Y_test = dataset.get_test_dataset() 68 | print (X_test.shape) 69 | print (Y_test.shape) 70 | 71 | model_name = 'cleverhans' 72 | model = dataset.load_model_by_name(model_name) 73 | 74 | model.compile(loss='categorical_crossentropy',optimizer='sgd', metrics=['acc']) 75 | _,accuracy = model.evaluate(X_test, Y_test, batch_size=128) 76 | print ("\nTesting accuracy: %.4f" % accuracy) 77 | -------------------------------------------------------------------------------- /datasets/svhn.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | sys.path.append('datasets/svhn_dataset/') 4 | import download_svhn_data 5 | 6 | import numpy as np 7 | import scipy.io as sio 8 | 9 | from keras.utils import np_utils 10 | 11 | from models.carlini_models import carlini_mnist_model 12 | from models.cleverhans_models import cleverhans_mnist_model 13 | from models.pgdtrained_models import pgdtrained_mnist_model 14 | from models.tohinz_models import tohinz_svhn_model 15 | 16 | class SVHNDataset: 17 | def __init__(self): 18 | self.dataset_name = "SVHN" 19 | self.image_size = 32 20 | self.num_channels = 3 21 | self.num_classes = 10 22 | 23 | def get_test_dataset(self): 24 | test_path = "test_32x32.mat" 25 | test_path = os.path.join('datasets/svhn_dataset', test_path) 26 | test = sio.loadmat(test_path) 27 | X_test = test['X'] 28 | y_test = test['y'] 29 | y_test[y_test == 10] = 0 30 | 31 | y_test = y_test.ravel() 32 | 33 | X_test = np.transpose(X_test,(3,0,1,2)) 34 | X_test = X_test.astype('float32') / 255 35 | Y_test = np_utils.to_categorical(y_test) 36 | 37 | del y_test 38 | return X_test, Y_test 39 | 40 | def get_val_dataset(self): 41 | train_path = "train_32x32.mat" 42 | train_path = os.path.join('datasets/svhn_dataset', train_path) 43 | train = sio.loadmat(train_path) 44 | X_train = train['X'] 45 | y_train = train['y'] 46 | y_train[y_train == 10] = 0 47 | 48 | y_train = y_train.ravel() 49 | val_size = 5000 50 | 51 | 52 | X_val = X_train[:val_size] 53 | X_val = np.transpose(X_val,(3,0,1,2)) 54 | X_val = X_val.astype('float32') / 255 55 | y_val = y_train[:val_size] 56 | Y_val = np_utils.to_categorical(y_val) 57 | del X_train, y_train 58 | return X_val, Y_val 59 | 60 | def load_model_by_name(self, model_name, logits=False, input_range_type=1, pre_filter=lambda x:x): 61 | 62 | if model_name not in ['tohinz']: 63 | raise NotImplementedError("Undefined model [%s] for %s." % (model_name, self.dataset_name)) 64 | self.model_name = model_name 65 | 66 | model_weights_fpath = "%s_%s.keras_weights.h5" % (self.dataset_name, model_name) 67 | model_weights_fpath = os.path.join('downloads/trained_models', model_weights_fpath) 68 | 69 | if model_name in ["tohinz"]: 70 | model = tohinz_svhn_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 71 | print("\n===Defined TensorFlow model graph.") 72 | model.load_weights(model_weights_fpath) 73 | print ("---Loaded SVHN-%s model.\n" % model_name) 74 | return model 75 | 76 | if __name__ == '__main__': 77 | # from datasets.mnist import * 78 | dataset = SVHNDataset() 79 | X_test, Y_test = dataset.get_test_dataset() 80 | print (X_test.shape) 81 | print (Y_test.shape) 82 | 83 | model_name = 'tohinz' 84 | model = dataset.load_model_by_name(model_name) 85 | 86 | model.compile(loss='categorical_crossentropy',optimizer='sgd', metrics=['acc']) 87 | _,accuracy = model.evaluate(X_test, Y_test, batch_size=128) 88 | print ("\nTesting accuracy: %.4f" % accuracy) 89 | -------------------------------------------------------------------------------- /datasets/svhn_dataset/download_svhn_data.py: -------------------------------------------------------------------------------- 1 | import urllib2 2 | import os, sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | sys.path.append('datasets/svhn_dataset/') 6 | 7 | filename = ['test_32x32.mat','train_32x32.mat'] 8 | addr = ['http://ufldl.stanford.edu/housenumbers/test_32x32.mat','http://ufldl.stanford.edu/housenumbers/train_32x32.mat'] 9 | 10 | for i in range(2): 11 | f = os.path.join('datasets/svhn_dataset',filename[i]) 12 | if not os.path.exists(f): 13 | output = open(f,'w') 14 | output.write(urllib2.urlopen(addr[i]).read()) 15 | output.close() 16 | -------------------------------------------------------------------------------- /datasets/visualization.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | import pdb 4 | 5 | def show_img(pixel_array, mode=None): 6 | img = Image.fromarray(pixel_array*255, mode=mode) 7 | img.show() 8 | 9 | 10 | def show_imgs_in_rows(rows, fpath=None): 11 | # TODO: get the maximum. 12 | width_num = len(rows[0]) 13 | height_num = len(rows) 14 | image_size = rows[0][0].shape[:2] 15 | img_width, img_height = image_size 16 | 17 | x_margin = 2 18 | y_margin = 2 19 | 20 | # pdb.set_trace() 21 | 22 | total_width = width_num * img_width + (width_num-1)*x_margin 23 | total_height = height_num * img_height + (height_num-1)*y_margin 24 | 25 | new_im = Image.new('RGB', (total_width, total_height), (255,255,255)) 26 | 27 | x_offset = 0 28 | y_offset = 0 29 | 30 | for imgs in rows: 31 | imgs_row = list(imgs) 32 | for img_array in imgs_row: 33 | # pdb.set_trace() 34 | img = Image.fromarray((np.squeeze(img_array)*255).astype(np.uint8)) 35 | new_im.paste(img, (x_offset,y_offset)) 36 | x_offset += img_width + x_margin 37 | 38 | x_offset = 0 39 | y_offset += img_height + y_margin 40 | 41 | if fpath is not None: 42 | new_im.save(fpath) 43 | new_im.show() 44 | -------------------------------------------------------------------------------- /detections/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/detections/__init__.py -------------------------------------------------------------------------------- /detections/base.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import numpy as np 7 | import random 8 | import pdb 9 | import sklearn 10 | import os 11 | 12 | from sklearn.metrics import roc_curve, auc 13 | from .feature_squeezing import FeatureSqueezingDetector 14 | from .magnet_mnist import MagNetDetector as MagNetDetectorMNIST 15 | from .magnet_cifar import MagNetDetector as MagNetDetectorCIFAR 16 | 17 | from tensorflow.python.platform import flags 18 | FLAGS = flags.FLAGS 19 | from utils.output import write_to_csv 20 | 21 | def get_tpr_fpr(true_labels, pred_labels): 22 | TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1)) 23 | FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0)) 24 | 25 | AP = np.sum(true_labels) 26 | AN = np.sum(1-true_labels) 27 | 28 | tpr = TP/AP if AP>0 else np.nan 29 | fpr = FP/AN if AN>0 else np.nan 30 | 31 | return tpr, fpr, TP, AP 32 | 33 | 34 | def evalulate_detection_test(Y_detect_test, Y_detect_pred): 35 | accuracy = sklearn.metrics.accuracy_score(Y_detect_test, Y_detect_pred, normalize=True, sample_weight=None) 36 | tpr, fpr, tp, ap = get_tpr_fpr(Y_detect_test, Y_detect_pred) 37 | return accuracy, tpr, fpr, tp, ap 38 | 39 | 40 | from tinydb import TinyDB, Query 41 | 42 | class DetectionEvaluator: 43 | """ 44 | Get a dataset; 45 | Failed adv as benign / Failed adv as adversarial. 46 | For each detector: 47 | Train 48 | Test 49 | Report performance 50 | Detection rate on each attack. 51 | Detection on SAEs / FAEs. 52 | ROC-AUC. 53 | 54 | A detector should have this simplified interface: 55 | Y_pred = detector(X) 56 | """ 57 | def __init__(self, model, result_folder, csv_fname, dataset_name): 58 | pass 59 | # set_base_model() 60 | self.model = model 61 | self.task_dir = result_folder 62 | self.csv_fpath = os.path.join(result_folder, csv_fname) 63 | self.dataset_name = dataset_name 64 | 65 | if not os.path.isdir(self.task_dir): 66 | os.makedirs(self.task_dir) 67 | 68 | def get_attack_id(self, attack_name): 69 | return self.attack_name_id[attack_name] 70 | 71 | def build_detection_dataset(self, X, Y_label, Y_pred, selected_idx, X_adv_list, Y_adv_pred_list, attack_names, attack_string_hash, clip, Y_test_target_next, Y_test_target_ll): 72 | # X_train, Y_train, X_test, Y_test, test_idx, failed_adv_idx = \ 73 | # get_detection_train_test_set(X, Y_label, X_adv_list, Y_adv_pred_list, attack_names) 74 | 75 | """ 76 | Data Model: 77 | index, attack_id, misclassified, train 78 | 14, 0, 0, 1 79 | """ 80 | 81 | self.attack_names = attack_names 82 | self.attack_name_id = {} 83 | self.attack_name_id['legitimate'] = 0 84 | for i,attack_name in enumerate(attack_names): 85 | self.attack_name_id[attack_name] = i+1 86 | 87 | X_adv_all = np.concatenate(X_adv_list) 88 | X_leg_all = X[:len(X_adv_all)] 89 | 90 | self.X_detect = X_detect = np.concatenate([X_leg_all, X_adv_all]) 91 | # TODO: this could be wrong in non-default data selection mode. 92 | Y_label_adv = Y_label[selected_idx] 93 | 94 | detection_db_path = os.path.join(self.task_dir, "detection_db_%s_clip_%s.json" % (attack_string_hash, clip)) 95 | 96 | if os.path.isfile(detection_db_path): 97 | self.db = TinyDB(detection_db_path) 98 | self.query = Query() 99 | print ("Loaded an existing detection dataset.") 100 | return 101 | else: 102 | print ("Preparing the detection dataset...") 103 | 104 | # 1. Split Train and Test 105 | random.seed(1234) 106 | length = len(X_detect) 107 | train_ratio = 0.5 108 | train_idx = random.sample(range(length), int(train_ratio*length)) 109 | train_test_seq = [1 if idx in train_idx else 0 for idx in range(length) ] 110 | 111 | # 2. Tag the misclassified examples, both legitimate and adversarial. 112 | # TODO: Differentiate the successful examples between targeted and non-targeted. 113 | misclassified_seq = list(np.argmax(Y_label[:len(X_leg_all)], axis=1) != np.argmax(Y_pred[:len(X_leg_all)], axis=1)) 114 | for Y_adv_pred in Y_adv_pred_list: 115 | misclassified_seq_adv = list(np.argmax(Y_adv_pred, axis=1) != np.argmax(Y_label_adv, axis=1)) 116 | misclassified_seq += misclassified_seq_adv 117 | 118 | success_adv_seq = [False] * len(X_leg_all) 119 | for i, Y_adv_pred in enumerate(Y_adv_pred_list): 120 | attack_name = attack_names[i] 121 | if 'targeted=ll' in attack_name: 122 | success_adv_seq_attack = list(np.argmax(Y_adv_pred, axis=1) == np.argmax(Y_test_target_ll, axis=1)) 123 | elif 'targeted=next' in attack_name: 124 | success_adv_seq_attack = list(np.argmax(Y_adv_pred, axis=1) == np.argmax(Y_test_target_next, axis=1)) 125 | else: 126 | # The same as misclassified. 127 | success_adv_seq_attack = list(np.argmax(Y_adv_pred, axis=1) != np.argmax(Y_label_adv, axis=1)) 128 | success_adv_seq += success_adv_seq_attack 129 | 130 | 131 | 132 | # 3. Tag the attack ID, 0 as legitimate. 133 | attack_id_seq = [0]*len(X_leg_all) 134 | for i,attack_name in enumerate(attack_names): 135 | attack_id_seq += [i+1]*len(X_adv_list[0]) 136 | 137 | assert len(X_detect) == len(train_test_seq) == len(misclassified_seq) == len(attack_id_seq) 138 | 139 | self.db = TinyDB(detection_db_path) 140 | self.query = Query() 141 | 142 | for i in range(len(X_detect)): 143 | attack_id = attack_id_seq[i] 144 | misclassified = 1 if misclassified_seq[i] == True else 0 145 | success = 1 if success_adv_seq[i] == True else 0 146 | train = train_test_seq[i] 147 | rec = {'index': i, 'attack_id': attack_id, 'misclassified': misclassified, 'success': success, 'train': train} 148 | self.db.insert(rec) 149 | 150 | def get_data_from_db_records(self, recs): 151 | if len(recs) == 0: 152 | return None, None 153 | X_idx = [rec['index'] for rec in recs] 154 | X = self.X_detect[np.array(X_idx)] 155 | Y = np.array([1 if rec['attack_id']>0 else 0 for rec in recs]) 156 | return X, Y 157 | 158 | def get_training_testing_data(self, train = True): 159 | db = self.db 160 | query = self.query 161 | 162 | recs = db.search(query.train == 1) 163 | X_train, Y_train = self.get_data_from_db_records(recs) 164 | 165 | recs = db.search(query.train == 0) 166 | X_test, Y_test = self.get_data_from_db_records(recs) 167 | 168 | return X_train, Y_train, X_test, Y_test 169 | 170 | def get_adversarial_data(self, only_testing, success, attack_name=None, include_legitimate=False): 171 | db = self.db 172 | query = self.query 173 | 174 | conditions_and = [] 175 | if only_testing: 176 | conditions_and.append(query.train == 0) 177 | 178 | if attack_name is None: 179 | conditions_and.append(query.attack_id > 0) 180 | else: 181 | attack_id = self.get_attack_id(attack_name) 182 | conditions_and.append(query.attack_id == attack_id) 183 | 184 | if success: 185 | conditions_and.append(query.success == 1) 186 | else: 187 | conditions_and.append(query.success == 0) 188 | 189 | conditions = reduce(lambda a,b:a&b, conditions_and) 190 | # print ("conditions: %s " % conditions) 191 | 192 | recs = db.search(conditions) 193 | 194 | if include_legitimate: 195 | if only_testing: 196 | conditions = (query.attack_id == 0) & (query.train == 0) 197 | else: 198 | conditions = query.attack_id == 0 199 | # print ("additional conditions: %s " % conditions) 200 | recs += db.search(conditions) 201 | 202 | return self.get_data_from_db_records(recs) 203 | 204 | def get_sae_testing_data(self, attack_name=None): 205 | return self.get_adversarial_data(only_testing=True, success=True, attack_name=attack_name) 206 | 207 | def get_sae_data(self, attack_name=None): 208 | return self.get_adversarial_data(only_testing=False, success=True, attack_name=attack_name) 209 | 210 | def get_fae_testing_data(self, attack_name=None): 211 | return self.get_adversarial_data(only_testing=True, success=False, attack_name=attack_name) 212 | 213 | def get_fae_data(self, attack_name=None): 214 | return self.get_adversarial_data(only_testing=False, success=False, attack_name=attack_name) 215 | 216 | def get_all_non_fae_testing_data(self, attack_name=None): 217 | return self.get_adversarial_data(only_testing=True, success=True, attack_name=attack_name, include_legitimate=True) 218 | 219 | def get_all_non_fae_data(self, attack_name=None): 220 | return self.get_adversarial_data(only_testing=False, success=True, attack_name=attack_name, include_legitimate=True) 221 | 222 | def get_detector_by_name(self, detector_name): 223 | model = self.model 224 | detector = None 225 | 226 | if detector_name.startswith('FeatureSqueezing'): 227 | detector = FeatureSqueezingDetector(model, detector_name) 228 | elif detector_name.startswith('MagNet'): 229 | if self.dataset_name == 'MNIST': 230 | detector = MagNetDetectorMNIST(model, detector_name) 231 | elif self.dataset_name == "CIFAR-10": 232 | detector = MagNetDetectorCIFAR(model, detector_name) 233 | 234 | return detector 235 | 236 | def evaluate_detections(self, params_str): 237 | X_train, Y_train, X_test, Y_test = self.get_training_testing_data() 238 | 239 | # Example: --detection "FeatureSqueezing?distance_measure=l1&squeezers=median_smoothing_2,bit_depth_4;" 240 | detector_names = [ele.strip() for ele in params_str.split(';') if ele.strip()!= ''] 241 | 242 | dataset_name = self.dataset_name 243 | csv_fpath = "./detection_%s_saes.csv" % dataset_name 244 | fieldnames = ['detector', 'threshold', 'fpr'] + self.attack_names + ['overall'] 245 | to_csv = [] 246 | 247 | for detector_name in detector_names: 248 | detector = self.get_detector_by_name(detector_name) 249 | if detector is None: 250 | print ("Skipped an unknown detector [%s]" % detector_name.split('?')[0]) 251 | continue 252 | detector.train(X_train, Y_train) 253 | Y_test_pred, Y_test_pred_score = detector.test(X_test) 254 | 255 | accuracy, tpr, fpr, tp, ap = evalulate_detection_test(Y_test, Y_test_pred) 256 | fprs, tprs, thresholds = roc_curve(Y_test, Y_test_pred_score) 257 | roc_auc = auc(fprs, tprs) 258 | 259 | print ("Detector: %s" % detector_name) 260 | print ("Accuracy: %f\tTPR: %f\tFPR: %f\tROC-AUC: %f" % (accuracy, tpr, fpr, roc_auc)) 261 | 262 | rec = {} 263 | rec['detector'] = detector_name 264 | if hasattr(detector, 'threshold'): 265 | rec['threshold'] = detector.threshold 266 | else: 267 | rec['threshold'] = None 268 | rec['fpr'] = fpr 269 | overall_detection_rate_saes = 0 270 | nb_saes = 0 271 | for attack_name in self.attack_names: 272 | # No adversarial examples for training for the current detection methods. 273 | # X_sae, Y_sae = self.get_sae_testing_data(attack_name) 274 | if FLAGS.detection_train_test_mode: 275 | X_sae, Y_sae = self.get_sae_testing_data(attack_name) 276 | else: 277 | X_sae, Y_sae = self.get_sae_data(attack_name) 278 | Y_test_pred, Y_test_pred_score = detector.test(X_sae) 279 | _, tpr, _, tp, ap = evalulate_detection_test(Y_sae, Y_test_pred) 280 | print ("Detection rate on SAEs: %.4f \t %3d/%3d \t %s" % (tpr, tp, ap, attack_name)) 281 | overall_detection_rate_saes += tpr * len(Y_sae) 282 | nb_saes += len(Y_sae) 283 | rec[attack_name] = tpr 284 | # print ("overall_detection_rate_saes/nb_saes: %d/%d" % (overall_detection_rate_saes, nb_saes)) 285 | 286 | print ("Overall detection rate on SAEs: %f (%d/%d)" % (overall_detection_rate_saes/nb_saes, overall_detection_rate_saes, nb_saes)) 287 | rec['overall'] = float(overall_detection_rate_saes/nb_saes) 288 | to_csv.append(rec) 289 | 290 | # No adversarial examples for training for the current detection methods. 291 | # X_sae_all, Y_sae_all = self.get_sae_testing_data() 292 | print ("### Excluding FAEs:") 293 | if FLAGS.detection_train_test_mode: 294 | X_nfae_all, Y_nfae_all = self.get_all_non_fae_testing_data() 295 | else: 296 | X_nfae_all, Y_nfae_all = self.get_all_non_fae_data() 297 | Y_pred, Y_pred_score = detector.test(X_nfae_all) 298 | _, tpr, _, tp, ap = evalulate_detection_test(Y_nfae_all, Y_pred) 299 | fprs, tprs, thresholds = roc_curve(Y_nfae_all, Y_pred_score) 300 | 301 | # print ("threshold\tfpr\ttpr") 302 | # for i, threshold in enumerate(thresholds): 303 | # print ("%.4f\t%.4f\t%.4f" % (threshold, fprs[i], tprs[i])) 304 | 305 | roc_auc = auc(fprs, tprs) 306 | print ("Overall TPR: %f\tROC-AUC: %f" % (tpr, roc_auc)) 307 | 308 | # FAEs 309 | if FLAGS.detection_train_test_mode: 310 | X_fae, Y_fae = self.get_fae_testing_data() 311 | else: 312 | X_fae, Y_fae = self.get_fae_data() 313 | Y_test_pred, Y_test_pred_score = detector.test(X_fae) 314 | _, tpr, _, tp, ap = evalulate_detection_test(Y_fae, Y_test_pred) 315 | print ("Overall detection rate on FAEs: %.4f \t %3d/%3d" % (tpr, tp, ap)) 316 | 317 | write_to_csv(to_csv, csv_fpath, fieldnames) -------------------------------------------------------------------------------- /detections/feature_squeezing.py: -------------------------------------------------------------------------------- 1 | import sklearn 2 | from sklearn.metrics import roc_curve, auc 3 | import numpy as np 4 | from scipy.stats import entropy 5 | from keras.models import Model 6 | 7 | import operator 8 | import functools 9 | import pdb 10 | import random 11 | import sys, os 12 | 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 14 | # from utils.visualization import draw_plot 15 | from utils.squeeze import get_squeezer_by_name 16 | from utils.parameter_parser import parse_params 17 | 18 | def reshape_2d(x): 19 | if len(x.shape) > 2: 20 | # Reshape to [#num_examples, ?] 21 | batch_size = x.shape[0] 22 | num_dim = functools.reduce(operator.mul, x.shape, 1) 23 | x = x.reshape((batch_size, num_dim/batch_size)) 24 | return x 25 | 26 | 27 | # Normalization. 28 | # Two approaches: 1. softmax; 2. unit-length vector (unit norm). 29 | 30 | # Code Source: ? 31 | def softmax(z): 32 | assert len(z.shape) == 2 33 | s = np.max(z, axis=1) 34 | s = s[:, np.newaxis] # necessary step to do broadcasting 35 | e_x = np.exp(z - s) 36 | div = np.sum(e_x, axis=1) 37 | div = div[:, np.newaxis] # dito 38 | return e_x / div 39 | 40 | 41 | from sklearn.preprocessing import normalize 42 | def unit_norm(x): 43 | """ 44 | x: a 2D array: (batch_size, vector_length) 45 | """ 46 | return normalize(x, axis=1) 47 | 48 | 49 | l1_dist = lambda x1,x2: np.sum(np.abs(x1 - x2), axis=tuple(range(len(x1.shape))[1:])) 50 | l2_dist = lambda x1,x2: np.sum((x1-x2)**2, axis=tuple(range(len(x1.shape))[1:]))**.5 51 | 52 | 53 | # Note: KL-divergence is not symentric. 54 | # Designed for probability distribution (e.g. softmax output). 55 | def kl(x1, x2): 56 | assert x1.shape == x2.shape 57 | # x1_2d, x2_2d = reshape_2d(x1), reshape_2d(x2) 58 | 59 | # Transpose to [?, #num_examples] 60 | x1_2d_t = x1.transpose() 61 | x2_2d_t = x2.transpose() 62 | 63 | # pdb.set_trace() 64 | e = entropy(x1_2d_t, x2_2d_t) 65 | e[np.where(e==np.inf)] = 2 66 | return e 67 | 68 | 69 | class FeatureSqueezingDetector: 70 | def __init__(self, model, param_str): 71 | self.model = model 72 | subject, params = parse_params(param_str) 73 | 74 | layer_id = len(model.layers)-1 75 | normalizer = 'none' 76 | metric = params['distance_measure'] 77 | squeezers_name = params['squeezers'].split(',') 78 | self.set_config(layer_id, normalizer, metric, squeezers_name) 79 | 80 | if params.has_key('threshold'): 81 | self.threshold = float(params['threshold']) 82 | else: 83 | self.threshold = None 84 | self.train_fpr = float(params['fpr']) 85 | 86 | def get_squeezer_by_name(self, name): 87 | return get_squeezer_by_name(name, 'python') 88 | 89 | def get_normalizer_by_name(self, name): 90 | d = {'unit_norm': unit_norm, 'softmax': softmax, 'none': lambda x:x} 91 | return d[name] 92 | 93 | def get_metric_by_name(self, name): 94 | d = {'kl_f': lambda x1,x2: kl(x1, x2), 'kl_b': lambda x1,x2: kl(x2, x1), 'l1': l1_dist, 'l2': l2_dist} 95 | return d[name] 96 | 97 | def set_config(self, layer_id, normalizer_name, metric_name, squeezers_name): 98 | self.layer_id = layer_id 99 | self.normalizer_name = normalizer_name 100 | self.metric_name = metric_name 101 | self.squeezers_name = squeezers_name 102 | 103 | def get_config(self): 104 | return self.layer_id, self.normalizer_name, self.metric_name, self.squeezers_name 105 | 106 | # Visualize the propagation of perturbations. 107 | # Scenerio 1: Assume we have a perfect squeezer that always recover adversarial example to legitimate. The distance of legitimate is zero. 108 | # Scenerio 2: Use one(or several) feature squeezer(s) that barely affect the legitimate example. The distance of legitimate may be positive. 109 | def view_adv_propagation(self, X, X_adv, squeezers_name): 110 | """ 111 | Assume we have a perfeect feature squeezer that always recover a given adversariale example to the legitimate version. 112 | The distance of legitimate is zero then. 113 | We want to find out which layer has the most different output between the adversarial and legitimate example pairs, 114 | under several measurements. 115 | """ 116 | model = self.model 117 | 118 | for layer in model.layers: 119 | shape_size = functools.reduce(operator.mul, layer.output_shape[1:]) 120 | print (layer.name, shape_size) 121 | 122 | xs = np.arange(len(model.layers)) 123 | 124 | ret = [] 125 | 126 | for normalizer in ['unit_norm', 'softmax', 'none']: 127 | normalize_func = self.get_normalizer_by_name(normalizer) 128 | label_list = [] 129 | series_list = [] 130 | 131 | if normalizer == "softmax": 132 | metric_list = ['kl_f', 'kl_b', 'l1', 'l2'] 133 | else: 134 | metric_list = ['l1', 'l2'] 135 | for distance_metric_name in metric_list: 136 | distance_func = self.get_metric_by_name(distance_metric_name) 137 | 138 | series = [] 139 | 140 | for layer_id in range(len(model.layers)): 141 | self.set_config(layer_id, normalizer, distance_metric_name, squeezers_name) 142 | 143 | if len(squeezers_name) > 0: 144 | # With feature squeezers: Scenerio 2. 145 | distance = self.get_distance(X_adv) - self.get_distance(X) 146 | else: 147 | # Assume a perfect feature squeezer: Scenerio 1. 148 | distance = self.get_distance(X, X_adv) 149 | mean_dist = np.mean(distance) 150 | series.append(mean_dist) 151 | 152 | series = np.array(series).astype(np.double) 153 | series = series/np.max(series) 154 | series_list.append(series) 155 | label_list.append("%s_%s" % (normalizer, distance_metric_name)) 156 | 157 | layer_id = np.argmax(series) 158 | print ("Best: Metric-%s at Layer-%d, normalized by %s" % (distance_metric_name, layer_id, normalizer)) 159 | ret.append([layer_id, normalizer, distance_metric_name]) 160 | 161 | draw_plot(xs, series_list, label_list, "./%s_%s.png" % (self.name_prefix, normalizer)) 162 | 163 | return ret 164 | 165 | def calculate_distance_max(self, val_orig, vals_squeezed, metric_name): 166 | distance_func = self.get_metric_by_name(metric_name) 167 | 168 | dist_array = [] 169 | for val_squeezed in vals_squeezed: 170 | dist = distance_func(val_orig, val_squeezed) 171 | dist_array.append(dist) 172 | 173 | dist_array = np.array(dist_array) 174 | return np.max(dist_array, axis=0) 175 | 176 | def get_distance(self, X1, X2=None): 177 | layer_id, normalizer_name, metric_name, squeezers_name = self.get_config() 178 | 179 | normalize_func = self.get_normalizer_by_name(normalizer_name) 180 | input_to_normalized_output = lambda x: normalize_func(reshape_2d(self.eval_layer_output(x, layer_id))) 181 | 182 | val_orig_norm = input_to_normalized_output(X1) 183 | 184 | if X2 is None: 185 | vals_squeezed = [] 186 | for squeezer_name in squeezers_name: 187 | squeeze_func = self.get_squeezer_by_name(squeezer_name) 188 | val_squeezed_norm = input_to_normalized_output(squeeze_func(X1)) 189 | vals_squeezed.append(val_squeezed_norm) 190 | distance = self.calculate_distance_max(val_orig_norm, vals_squeezed, metric_name) 191 | else: 192 | val_1_norm = val_orig_norm 193 | val_2_norm = input_to_normalized_output(X2) 194 | distance_func = self.get_metric_by_name(metric_name) 195 | distance = distance_func(val_1_norm, val_2_norm) 196 | 197 | return distance 198 | 199 | def eval_layer_output(self, X, layer_id): 200 | layer_output = Model(inputs=self.model.layers[0].input, outputs=self.model.layers[layer_id].output) 201 | return layer_output.predict(X) 202 | 203 | 204 | def output_distance_csv(self, X_list, field_name_list, csv_fpath): 205 | from utils.output import write_to_csv 206 | distances_list = [] 207 | for X in X_list: 208 | distances = self.get_distance(X) 209 | distances_list.append(distances) 210 | 211 | to_csv = [] 212 | for i in range(len(X_list[0])): 213 | record = {} 214 | for j, field_name in enumerate(field_name_list): 215 | if len(distances_list[j]) > i: 216 | record[field_name] = distances_list[j][i] 217 | else: 218 | record[field_name] = None 219 | to_csv.append(record) 220 | 221 | write_to_csv(to_csv, csv_fpath, field_name_list) 222 | 223 | 224 | # Only examine the legitimate examples to get the threshold, ensure low False Positive rate. 225 | def train(self, X, Y): 226 | """ 227 | Calculating distance depends on: 228 | layer_id 229 | normalizer 230 | distance metric 231 | feature squeezer(s) 232 | """ 233 | 234 | if self.threshold is not None: 235 | print ("Loaded a pre-defined threshold value %f" % self.threshold) 236 | else: 237 | layer_id, normalizer_name, metric_name, squeezers_name = self.get_config() 238 | 239 | neg_idx = np.where(Y == 0)[0] 240 | X_neg = X[neg_idx] 241 | distances = self.get_distance(X_neg) 242 | 243 | selected_distance_idx = int(np.ceil(len(X_neg) * (1-self.train_fpr))) 244 | threshold = sorted(distances)[selected_distance_idx-1] 245 | self.threshold = threshold 246 | print ("Selected %f as the threshold value." % self.threshold) 247 | return self.threshold 248 | 249 | def test(self, X): 250 | layer_id, normalizer_name, metric_name, squeezers_name = self.get_config() 251 | 252 | distances = self.get_distance(X) 253 | threshold = self.threshold 254 | Y_pred = distances > threshold 255 | 256 | return Y_pred, distances 257 | -------------------------------------------------------------------------------- /detections/magnet_cifar.py: -------------------------------------------------------------------------------- 1 | ## test_defense.py -- test defense 2 | ## 3 | ## Copyright (C) 2017, Dongyu Meng . 4 | ## 5 | ## This program is licenced under the BSD 2-Clause licence, 6 | ## contained in the LICENCE file in this directory. 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | from __future__ import unicode_literals 12 | 13 | # Load external module: MagNet 14 | import sys, os 15 | project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 16 | sys.path.append(project_dir) 17 | 18 | from externals.MagNet.setup_cifar import CIFAR 19 | from externals.MagNet.utils import prepare_data 20 | from externals.MagNet.worker import AEDetector, DBDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator 21 | 22 | import numpy as np 23 | import os 24 | 25 | from keras.models import Model, Sequential 26 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 27 | from keras.activations import softmax 28 | 29 | class ClassifierWrapper: 30 | def __init__(self, model): 31 | """ 32 | Keras classifier wrapper. 33 | Note that the wrapped classifier should spit logits as output. 34 | """ 35 | layer_id = len(model.layers)-2 36 | self.model = Model(inputs=model.layers[0].input, outputs=model.layers[layer_id].output) 37 | self.softmax = Sequential() 38 | self.softmax.add(Lambda(lambda X: softmax(X, axis=1), input_shape=(10,))) 39 | 40 | def classify(self, X, option="logit", T=1): 41 | if option == "logit": 42 | return self.model.predict(X) 43 | if option == "prob": 44 | logits = self.model.predict(X)/T 45 | return self.softmax.predict(logits) 46 | 47 | def print(self): 48 | return "Classifier:"+self.path.split("/")[-1] 49 | 50 | class MagNetDetector: 51 | def __init__(self, model, detector_name): 52 | classifier = ClassifierWrapper(model) 53 | 54 | autoencoder_model_fpath = os.path.join(project_dir, "downloads/MagNet/defensive_models/CIFAR") 55 | 56 | reformer = SimpleReformer(autoencoder_model_fpath) 57 | id_reformer = IdReformer() 58 | 59 | # Note: we may swap the two. 60 | reconstructor = id_reformer 61 | prober = reformer 62 | # reconstructor = reformer 63 | # prober = id_reformer 64 | 65 | eb_detector = AEDetector(autoencoder_model_fpath, p=1) 66 | db_detector_I = DBDetector(reconstructor, prober, classifier, T=10) 67 | db_detector_II = DBDetector(reconstructor, prober, classifier, T=40) 68 | 69 | detector_dict = dict() 70 | detector_dict["db-I"] = db_detector_I 71 | detector_dict["db-II"] = db_detector_II 72 | detector_dict['eb'] = eb_detector 73 | 74 | self.operator = Operator(CIFAR(), classifier, detector_dict, reformer) 75 | 76 | def train(self, X=None, Y=None, fpr=None): 77 | # CIFAR-10 78 | drop_rate={"db-I": 0.01, "db-II": 0.01, "eb": 0.005} 79 | # drop_rate={"db-I": fpr, "db-II": fpr, "eb": fpr} 80 | print("\n==========================================================") 81 | print("Drop Rate:", drop_rate) 82 | self.thrs = self.operator.get_thrs(drop_rate) 83 | print("Thresholds:", self.thrs) 84 | 85 | 86 | def test(self, X): 87 | all_pass, detector_breakdown = self.operator.filter(X, self.thrs) 88 | print ("detector_breakdown", detector_breakdown) 89 | ret_detection = np.array([ False if i in all_pass else True for i in range(len(X)) ]) 90 | return ret_detection, ret_detection 91 | 92 | if __name__ == '__main__': 93 | magnet_detector = MagNetDetector() 94 | magnet_detector.train() 95 | 96 | X = magnet_detector.operator.data.test_data 97 | Y_detected, _ = magnet_detector.test(X) 98 | 99 | print ("False positive rate: %f" % (np.sum(Y_detected)/float(len(X)))) 100 | -------------------------------------------------------------------------------- /detections/magnet_mnist.py: -------------------------------------------------------------------------------- 1 | ## test_defense.py -- test defense 2 | ## 3 | ## Copyright (C) 2017, Dongyu Meng . 4 | ## 5 | ## This program is licenced under the BSD 2-Clause licence, 6 | ## contained in the LICENCE file in this directory. 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | from __future__ import unicode_literals 12 | 13 | # Load external module: MagNet 14 | import sys, os 15 | project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 16 | sys.path.append(project_dir) 17 | 18 | from externals.MagNet.setup_mnist import MNIST 19 | from externals.MagNet.utils import prepare_data 20 | from externals.MagNet.worker import AEDetector, SimpleReformer, IdReformer, AttackData, Classifier, Operator, Evaluator 21 | 22 | import numpy as np 23 | import os 24 | 25 | from keras.models import Model, Sequential 26 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 27 | from keras.activations import softmax 28 | 29 | class ClassifierWrapper: 30 | def __init__(self, model): 31 | """ 32 | Keras classifier wrapper. 33 | Note that the wrapped classifier should spit logits as output. 34 | """ 35 | layer_id = len(model.layers)-2 36 | self.model = Model(inputs=model.layers[0].input, outputs=model.layers[layer_id].output) 37 | self.softmax = Sequential() 38 | self.softmax.add(Lambda(lambda X: softmax(X, axis=1), input_shape=(10,))) 39 | 40 | def classify(self, X, option="logit", T=1): 41 | if option == "logit": 42 | return self.model.predict(X) 43 | if option == "prob": 44 | logits = self.model.predict(X)/T 45 | return self.softmax.predict(logits) 46 | 47 | def print(self): 48 | return "Classifier:"+self.path.split("/")[-1] 49 | 50 | class MagNetDetector: 51 | def __init__(self, model, detector_name): 52 | cur_folder = os.path.dirname(os.path.abspath(__file__)) 53 | 54 | autoencoder_model_I_fpath = os.path.join(project_dir, "downloads/MagNet/defensive_models/MNIST_I") 55 | autoencoder_model_II_fpath = os.path.join(project_dir, "downloads/MagNet/defensive_models/MNIST_II") 56 | 57 | detector_I = AEDetector(autoencoder_model_I_fpath, p=2) 58 | detector_II = AEDetector(autoencoder_model_II_fpath, p=1) 59 | reformer = SimpleReformer(autoencoder_model_I_fpath) 60 | 61 | id_reformer = IdReformer() 62 | classifier = ClassifierWrapper(model) 63 | 64 | detector_dict = dict() 65 | detector_dict["I"] = detector_I 66 | detector_dict["II"] = detector_II 67 | 68 | self.operator = Operator(MNIST(), classifier, detector_dict, reformer) 69 | 70 | def train(self, X=None, Y=None, fpr=None): 71 | drop_rate={"I": 0.001, "II": 0.001} 72 | # drop_rate={"I": fpr*0.5, "II": fpr*0.5} 73 | print("\n==========================================================") 74 | print("Drop Rate:", drop_rate) 75 | self.thrs = self.operator.get_thrs(drop_rate) 76 | print("Thresholds:", self.thrs) 77 | 78 | def test(self, X): 79 | all_pass, detector_breakdown = self.operator.filter(X, self.thrs) 80 | print ("detector_breakdown", detector_breakdown) 81 | ret_detection = np.array([ False if i in all_pass else True for i in range(len(X)) ]) 82 | return ret_detection, ret_detection 83 | 84 | if __name__ == '__main__': 85 | magnet_detector = MagNetDetector() 86 | magnet_detector.train() 87 | 88 | X = magnet_detector.operator.data.test_data 89 | Y_detected, _ = magnet_detector.test(X) 90 | 91 | print ("False positive rate: %f" % (np.sum(Y_detected)/float(len(X)))) 92 | -------------------------------------------------------------------------------- /externals/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/externals/__init__.py -------------------------------------------------------------------------------- /models/README.md: -------------------------------------------------------------------------------- 1 | # Pre-trained Models 2 | 3 | *Note*: Make sure the last layer in Keras model definition is the Softmax activation layer, *i.e.* model.layers[-2].output is the logits and model.layers[-1].output is the softmax activation, because some attack algorithms require the logits output. 4 | 5 | ## Dataset: MNIST 6 | 7 | | Model Name | # Trainable Parameters | Testing Accuracy | Mean Confidence | 8 | |------------|-------------------------|------------------|------------------| 9 | | Cleverhans | 710,218 | 0.9919 | 0.8897 | 10 | | Carlini | 312,202 | 0.9943 | 0.9939 | 11 | | PGDbase | 3,274,634 | 0.9917 | 0.9915 | 12 | | PGDtrained | 3,274,634 | 0.9853 | 0.9777 | 13 | 14 | ## Dataset: CIFAR-10 15 | 16 | | Model Name | # Trainable Parameters | Testing Accuracy | Mean Confidence | 17 | |---------------------|--------------------------|------------------|------------------| 18 | | Carlini | 1,147,978 | 0.7796 | 0.7728 | 19 | | DenseNet(L=40,k=12) | 1,019,722 | 0.9484 | 0.9215 | 20 | 21 | 22 | ## Dataset: ImageNet (ILSVRC) 23 | 24 | | Model Name | # Trainable Parameters | Top-1 Accuracy | Top-5 Accuracy | 25 | |------------|-------------------------|------------------|-----------------| 26 | | MobileNet | 4,231,976 | 0.68360 | 0.88250 | 27 | |Inception v3| 23,817,352 | 0.76276 | 0.93032 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/models/__init__.py -------------------------------------------------------------------------------- /models/carlini_models.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 3 | from keras.layers import MaxPooling2D, Conv2D 4 | 5 | 6 | def carlini_mnist_model(logits=True, input_range_type=2, pre_filter=lambda x:x): 7 | input_shape=(28, 28, 1) 8 | nb_filters = 32 9 | nb_denses = [200,200,10] 10 | return carlini_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter) 11 | 12 | 13 | def carlini_cifar10_model(logits=True, input_range_type=2, pre_filter=lambda x:x): 14 | input_shape=(32, 32, 3) 15 | nb_filters = 64 16 | nb_denses = [256,256,10] 17 | return carlini_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter) 18 | 19 | 20 | def carlini_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter): 21 | """ 22 | :params logits: return logits(input of softmax layer) if True; return softmax output otherwise. 23 | :params input_range_type: the expected input range, {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} 24 | """ 25 | 26 | model = Sequential() 27 | 28 | if input_range_type == 1: 29 | # The input data range is [0, 1]. 30 | # Convert to [-0.5,0.5] by x-0.5. 31 | scaler = lambda x: x-0.5 32 | elif input_range_type == 2: 33 | # The input data range is [-0.5, 0.5]. 34 | # Don't need to do scaling for carlini models, as it is the input range by default. 35 | scaler = lambda x: x 36 | elif input_range_type == 3: 37 | # The input data range is [-1, 1]. Convert to [-0.5,0.5] by x/2. 38 | scaler = lambda x: x/2 39 | 40 | model.add(Lambda(scaler, input_shape=input_shape)) 41 | model.add(Lambda(pre_filter, output_shape=input_shape)) 42 | 43 | model.add(Conv2D(nb_filters, (3, 3))) 44 | model.add(Activation('relu')) 45 | model.add(Conv2D(nb_filters, (3, 3))) 46 | model.add(Activation('relu')) 47 | model.add(MaxPooling2D(pool_size=(2, 2))) 48 | 49 | model.add(Conv2D(nb_filters*2, (3, 3))) 50 | model.add(Activation('relu')) 51 | model.add(Conv2D(nb_filters*2, (3, 3))) 52 | model.add(Activation('relu')) 53 | model.add(MaxPooling2D(pool_size=(2, 2))) 54 | 55 | model.add(Flatten()) 56 | model.add(Dense(nb_denses[0])) 57 | model.add(Activation('relu')) 58 | model.add(Dense(nb_denses[1])) 59 | model.add(Activation('relu')) 60 | model.add(Dense(nb_denses[2])) 61 | 62 | if not logits: 63 | model.add(Activation('softmax')) 64 | 65 | return model -------------------------------------------------------------------------------- /models/cleverhans_models.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | 4 | from utils import load_externals 5 | from cleverhans.utils import conv_2d 6 | 7 | from keras.models import Sequential 8 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 9 | from keras.layers import MaxPooling2D, Conv2D 10 | 11 | def cleverhans_mnist_model(logits=False, input_range_type=1, pre_filter=lambda x:x): 12 | input_shape = (28, 28, 1) 13 | nb_filters = 64 14 | nb_classes = 10 15 | return cleverhans_model(input_shape, nb_filters, nb_classes, logits, input_range_type, pre_filter) 16 | 17 | 18 | def cleverhans_cifar10_model(logits=False, input_range_type=1, pre_filter=lambda x:x): 19 | input_shape = (32, 32, 3) 20 | nb_filters = 64 21 | nb_classes = 10 22 | return cleverhans_model(input_shape, nb_filters, nb_classes, logits, input_range_type, pre_filter) 23 | 24 | 25 | def cleverhans_model(input_shape, nb_filters, nb_classes, logits, input_range_type, pre_filter): 26 | """ 27 | Defines a CNN model using Keras sequential model 28 | :params logits: return logits(input of softmax layer) if True; return softmax output otherwise. 29 | :params input_range_type: the expected input range, {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} 30 | :return: 31 | """ 32 | model = Sequential() 33 | 34 | if input_range_type == 1: 35 | # The input data range is [0, 1]. 36 | # Don't need to do scaling for cleverhans models, as it is the input range by default. 37 | scaler = lambda x: x 38 | elif input_range_type == 2: 39 | # The input data range is [-0.5, 0.5]. Convert to [0,1] by adding 0.5 element-wise. 40 | scaler = lambda x: x+0.5 41 | elif input_range_type == 3: 42 | # The input data range is [-1, 1]. Convert to [0,1] by x/2+0.5. 43 | scaler = lambda x: x/2+0.5 44 | 45 | layers = [Lambda(scaler, input_shape=input_shape)] 46 | layers += [Lambda(pre_filter, output_shape=input_shape)] 47 | 48 | layers += [Dropout(0.2), 49 | conv_2d(nb_filters, (8, 8), (2, 2), "same"), 50 | Activation('relu'), 51 | conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"), 52 | Activation('relu'), 53 | conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"), 54 | Activation('relu'), 55 | Dropout(0.5), 56 | Flatten(), 57 | Dense(nb_classes)] 58 | 59 | for layer in layers: 60 | model.add(layer) 61 | 62 | if not logits: 63 | model.add(Activation('softmax')) 64 | 65 | return model -------------------------------------------------------------------------------- /models/densenet_models.py: -------------------------------------------------------------------------------- 1 | from keras.applications.imagenet_utils import _obtain_input_shape 2 | from keras.utils.data_utils import get_file 3 | import keras.backend as K 4 | from keras.layers import Input 5 | from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D 6 | from keras.regularizers import l2 7 | from keras.layers.normalization import BatchNormalization 8 | from keras.layers.core import Dense, Dropout, Activation, Reshape 9 | from keras.layers.pooling import GlobalAveragePooling2D 10 | from keras.models import Model 11 | 12 | import sys, os 13 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 14 | 15 | from utils import load_externals 16 | from densenet import __transition_block, __dense_block, TF_WEIGHTS_PATH, TF_WEIGHTS_PATH_NO_TOP 17 | 18 | 19 | 20 | def get_densenet_weights_path(dataset_name="CIFAR-10", include_top=True): 21 | assert dataset_name == "CIFAR-10" 22 | if include_top: 23 | weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels.h5', 24 | TF_WEIGHTS_PATH, 25 | cache_subdir='models') 26 | else: 27 | weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels_no_top.h5', 28 | TF_WEIGHTS_PATH_NO_TOP, 29 | cache_subdir='models') 30 | return weights_path 31 | 32 | 33 | def densenet_cifar10_model(logits=False, input_range_type=1, pre_filter=lambda x:x): 34 | assert input_range_type == 1 35 | 36 | batch_size = 64 37 | nb_classes = 10 38 | 39 | img_rows, img_cols = 32, 32 40 | img_channels = 3 41 | 42 | img_dim = (img_channels, img_rows, img_cols) if K.image_dim_ordering() == "th" else (img_rows, img_cols, img_channels) 43 | depth = 40 44 | nb_dense_block = 3 45 | growth_rate = 12 46 | nb_filter = 16 47 | dropout_rate = 0.0 # 0.0 for data augmentation 48 | input_tensor = None 49 | include_top=True 50 | 51 | if logits is True: 52 | activation = None 53 | else: 54 | activation = "softmax" 55 | 56 | # Determine proper input shape 57 | input_shape = _obtain_input_shape(img_dim, 58 | default_size=32, 59 | min_size=8, 60 | data_format=K.image_data_format(), 61 | include_top=include_top) 62 | 63 | if input_tensor is None: 64 | img_input = Input(shape=input_shape) 65 | else: 66 | if not K.is_keras_tensor(input_tensor): 67 | img_input = Input(tensor=input_tensor, shape=input_shape) 68 | else: 69 | img_input = input_tensor 70 | 71 | x = __create_dense_net(nb_classes, img_input, True, depth, nb_dense_block, 72 | growth_rate, nb_filter, -1, False, 0.0, 73 | dropout_rate, 1E-4, activation) 74 | 75 | # Ensure that the model takes into account 76 | # any potential predecessors of `input_tensor`. 77 | if input_tensor is not None: 78 | inputs = get_source_inputs(input_tensor) 79 | else: 80 | inputs = img_input 81 | # Create model. 82 | model = Model(inputs, x, name='densenet') 83 | return model 84 | 85 | 86 | # Source: https://github.com/titu1994/DenseNet 87 | def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, 88 | nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1E-4, 89 | activation='softmax'): 90 | ''' Build the DenseNet model 91 | Args: 92 | nb_classes: number of classes 93 | img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels) 94 | include_top: flag to include the final Dense layer 95 | depth: number or layers 96 | nb_dense_block: number of dense blocks to add to end (generally = 3) 97 | growth_rate: number of filters to add per dense block 98 | nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate 99 | nb_layers_per_block: number of layers in each dense block. 100 | Can be a -1, positive integer or a list. 101 | If -1, calculates nb_layer_per_block from the depth of the network. 102 | If positive integer, a set number of layers per dense block. 103 | If list, nb_layer is used as provided. Note that list size must 104 | be (nb_dense_block + 1) 105 | bottleneck: add bottleneck blocks 106 | reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression 107 | dropout_rate: dropout rate 108 | weight_decay: weight decay 109 | activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'. 110 | Note that if sigmoid is used, classes must be 1. 111 | Returns: keras tensor with nb_layers of conv_block appended 112 | ''' 113 | 114 | concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 115 | 116 | assert (depth - 4) % 3 == 0, 'Depth must be 3 N + 4' 117 | if reduction != 0.0: 118 | assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0' 119 | 120 | # layers in each dense block 121 | if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple: 122 | nb_layers = list(nb_layers_per_block) # Convert tuple to list 123 | 124 | assert len(nb_layers) == (nb_dense_block + 1), 'If list, nb_layer is used as provided. ' \ 125 | 'Note that list size must be (nb_dense_block + 1)' 126 | final_nb_layer = nb_layers[-1] 127 | nb_layers = nb_layers[:-1] 128 | else: 129 | if nb_layers_per_block == -1: 130 | count = int((depth - 4) / 3) 131 | nb_layers = [count for _ in range(nb_dense_block)] 132 | final_nb_layer = count 133 | else: 134 | final_nb_layer = nb_layers_per_block 135 | nb_layers = [nb_layers_per_block] * nb_dense_block 136 | 137 | if bottleneck: 138 | nb_layers = [int(layer // 2) for layer in nb_layers] 139 | 140 | # compute initial nb_filter if -1, else accept users initial nb_filter 141 | if nb_filter <= 0: 142 | nb_filter = 2 * growth_rate 143 | 144 | # compute compression factor 145 | compression = 1.0 - reduction 146 | 147 | # Initial convolution 148 | x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_uniform', padding='same', name='initial_conv2D', 149 | use_bias=False, kernel_regularizer=l2(weight_decay))(img_input) 150 | 151 | # Add dense blocks 152 | for block_idx in range(nb_dense_block - 1): 153 | x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck, 154 | dropout_rate=dropout_rate, weight_decay=weight_decay) 155 | # add transition_block 156 | x = __transition_block(x, nb_filter, compression=compression, dropout_rate=dropout_rate, 157 | weight_decay=weight_decay) 158 | nb_filter = int(nb_filter * compression) 159 | 160 | # The last dense_block does not have a transition_block 161 | x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck, 162 | dropout_rate=dropout_rate, weight_decay=weight_decay) 163 | 164 | x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), 165 | beta_regularizer=l2(weight_decay))(x) 166 | x = Activation('relu')(x) 167 | x = GlobalAveragePooling2D()(x) 168 | 169 | if include_top: 170 | x = Dense(nb_classes, kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x) 171 | if activation != None: 172 | x = Activation(activation)(x) 173 | 174 | return x -------------------------------------------------------------------------------- /models/keras_models/__init__.py: -------------------------------------------------------------------------------- 1 | from .keras_models import keras_resnet50_imagenet_model 2 | from .keras_models import keras_vgg19_imagenet_model 3 | from .keras_models import keras_inceptionv3_imagenet_model -------------------------------------------------------------------------------- /models/keras_models/inceptionv3_model.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 3 | 4 | import tensorflow as tf 5 | from utils import load_externals 6 | from inception_v3 import * 7 | 8 | from keras.applications.imagenet_utils import _obtain_input_shape 9 | from keras.layers import Lambda, Activation 10 | 11 | # from .keras_models import scaling_tf 12 | 13 | import pdb 14 | 15 | 16 | def scaling_tf(X, input_range_type): 17 | """ 18 | Convert to [-1, 1]. 19 | """ 20 | if input_range_type == 1: 21 | # The input data range is [0, 1]. Convert to [-1, 1] by 22 | X = X - 0.5 23 | X = X * 2. 24 | elif input_range_type == 2: 25 | # The input data range is [-0.5, 0.5]. Convert to [-1,1] by 26 | X = X * 2. 27 | elif input_range_type == 3: 28 | # The input data range is [-1, 1]. 29 | X = X 30 | 31 | return X 32 | 33 | 34 | def InceptionV3(include_top=True, 35 | weights='imagenet', 36 | input_tensor=None, 37 | input_shape=None, 38 | pooling=None, 39 | classes=1000, 40 | logits=False, 41 | input_range_type=1): 42 | """Instantiates the Inception v3 architecture. 43 | Optionally loads weights pre-trained 44 | on ImageNet. Note that when using TensorFlow, 45 | for best performance you should set 46 | `image_data_format="channels_last"` in your Keras config 47 | at ~/.keras/keras.json. 48 | The model and the weights are compatible with both 49 | TensorFlow and Theano. The data format 50 | convention used by the model is the one 51 | specified in your Keras config file. 52 | Note that the default input image size for this model is 299x299. 53 | Arguments: 54 | include_top: whether to include the fully-connected 55 | layer at the top of the network. 56 | weights: one of `None` (random initialization) 57 | or "imagenet" (pre-training on ImageNet). 58 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 59 | to use as image input for the model. 60 | input_shape: optional shape tuple, only to be specified 61 | if `include_top` is False (otherwise the input shape 62 | has to be `(299, 299, 3)` (with `channels_last` data format) 63 | or `(3, 299, 299)` (with `channels_first` data format). 64 | It should have exactly 3 inputs channels, 65 | and width and height should be no smaller than 139. 66 | E.g. `(150, 150, 3)` would be one valid value. 67 | pooling: Optional pooling mode for feature extraction 68 | when `include_top` is `False`. 69 | - `None` means that the output of the model will be 70 | the 4D tensor output of the 71 | last convolutional layer. 72 | - `avg` means that global average pooling 73 | will be applied to the output of the 74 | last convolutional layer, and thus 75 | the output of the model will be a 2D tensor. 76 | - `max` means that global max pooling will 77 | be applied. 78 | classes: optional number of classes to classify images 79 | into, only to be specified if `include_top` is True, and 80 | if no `weights` argument is specified. 81 | Returns: 82 | A Keras model instance. 83 | Raises: 84 | ValueError: in case of invalid argument for `weights`, 85 | or invalid input shape. 86 | """ 87 | if weights not in {'imagenet', None}: 88 | raise ValueError('The `weights` argument should be either ' 89 | '`None` (random initialization) or `imagenet` ' 90 | '(pre-training on ImageNet).') 91 | 92 | if weights == 'imagenet' and include_top and classes != 1000: 93 | raise ValueError('If using `weights` as imagenet with `include_top`' 94 | ' as true, `classes` should be 1000') 95 | 96 | # Determine proper input shape 97 | input_shape = _obtain_input_shape( 98 | input_shape, 99 | default_size=299, 100 | min_size=139, 101 | data_format=K.image_data_format(), 102 | include_top=include_top) 103 | 104 | if input_tensor is None: 105 | img_input = Input(shape=input_shape) 106 | else: 107 | img_input = Input(tensor=input_tensor, shape=input_shape) 108 | 109 | if K.image_data_format() == 'channels_first': 110 | channel_axis = 1 111 | else: 112 | channel_axis = 3 113 | 114 | # Scaling 115 | x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input) 116 | x = conv2d_bn(x, 32, 3, 3, strides=(2, 2), padding='valid') 117 | x = conv2d_bn(x, 32, 3, 3, padding='valid') 118 | x = conv2d_bn(x, 64, 3, 3) 119 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 120 | 121 | x = conv2d_bn(x, 80, 1, 1, padding='valid') 122 | x = conv2d_bn(x, 192, 3, 3, padding='valid') 123 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 124 | 125 | # mixed 0, 1, 2: 35 x 35 x 256 126 | branch1x1 = conv2d_bn(x, 64, 1, 1) 127 | 128 | branch5x5 = conv2d_bn(x, 48, 1, 1) 129 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 130 | 131 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 132 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 133 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 134 | 135 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 136 | branch_pool = conv2d_bn(branch_pool, 32, 1, 1) 137 | x = layers.concatenate( 138 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 139 | axis=channel_axis, 140 | name='mixed0') 141 | 142 | # mixed 1: 35 x 35 x 256 143 | branch1x1 = conv2d_bn(x, 64, 1, 1) 144 | 145 | branch5x5 = conv2d_bn(x, 48, 1, 1) 146 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 147 | 148 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 149 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 150 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 151 | 152 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 153 | branch_pool = conv2d_bn(branch_pool, 64, 1, 1) 154 | x = layers.concatenate( 155 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 156 | axis=channel_axis, 157 | name='mixed1') 158 | 159 | # mixed 2: 35 x 35 x 256 160 | branch1x1 = conv2d_bn(x, 64, 1, 1) 161 | 162 | branch5x5 = conv2d_bn(x, 48, 1, 1) 163 | branch5x5 = conv2d_bn(branch5x5, 64, 5, 5) 164 | 165 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 166 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 167 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 168 | 169 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 170 | branch_pool = conv2d_bn(branch_pool, 64, 1, 1) 171 | x = layers.concatenate( 172 | [branch1x1, branch5x5, branch3x3dbl, branch_pool], 173 | axis=channel_axis, 174 | name='mixed2') 175 | 176 | # mixed 3: 17 x 17 x 768 177 | branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid') 178 | 179 | branch3x3dbl = conv2d_bn(x, 64, 1, 1) 180 | branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3) 181 | branch3x3dbl = conv2d_bn( 182 | branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid') 183 | 184 | branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x) 185 | x = layers.concatenate( 186 | [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3') 187 | 188 | # mixed 4: 17 x 17 x 768 189 | branch1x1 = conv2d_bn(x, 192, 1, 1) 190 | 191 | branch7x7 = conv2d_bn(x, 128, 1, 1) 192 | branch7x7 = conv2d_bn(branch7x7, 128, 1, 7) 193 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 194 | 195 | branch7x7dbl = conv2d_bn(x, 128, 1, 1) 196 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) 197 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7) 198 | branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1) 199 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 200 | 201 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 202 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 203 | x = layers.concatenate( 204 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 205 | axis=channel_axis, 206 | name='mixed4') 207 | 208 | # mixed 5, 6: 17 x 17 x 768 209 | for i in range(2): 210 | branch1x1 = conv2d_bn(x, 192, 1, 1) 211 | 212 | branch7x7 = conv2d_bn(x, 160, 1, 1) 213 | branch7x7 = conv2d_bn(branch7x7, 160, 1, 7) 214 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 215 | 216 | branch7x7dbl = conv2d_bn(x, 160, 1, 1) 217 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) 218 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7) 219 | branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1) 220 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 221 | 222 | branch_pool = AveragePooling2D( 223 | (3, 3), strides=(1, 1), padding='same')(x) 224 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 225 | x = layers.concatenate( 226 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 227 | axis=channel_axis, 228 | name='mixed' + str(5 + i)) 229 | 230 | # mixed 7: 17 x 17 x 768 231 | branch1x1 = conv2d_bn(x, 192, 1, 1) 232 | 233 | branch7x7 = conv2d_bn(x, 192, 1, 1) 234 | branch7x7 = conv2d_bn(branch7x7, 192, 1, 7) 235 | branch7x7 = conv2d_bn(branch7x7, 192, 7, 1) 236 | 237 | branch7x7dbl = conv2d_bn(x, 192, 1, 1) 238 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) 239 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 240 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1) 241 | branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7) 242 | 243 | branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) 244 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 245 | x = layers.concatenate( 246 | [branch1x1, branch7x7, branch7x7dbl, branch_pool], 247 | axis=channel_axis, 248 | name='mixed7') 249 | 250 | # mixed 8: 8 x 8 x 1280 251 | branch3x3 = conv2d_bn(x, 192, 1, 1) 252 | branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, 253 | strides=(2, 2), padding='valid') 254 | 255 | branch7x7x3 = conv2d_bn(x, 192, 1, 1) 256 | branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7) 257 | branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1) 258 | branch7x7x3 = conv2d_bn( 259 | branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid') 260 | 261 | branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x) 262 | x = layers.concatenate( 263 | [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8') 264 | 265 | # mixed 9: 8 x 8 x 2048 266 | for i in range(2): 267 | branch1x1 = conv2d_bn(x, 320, 1, 1) 268 | 269 | branch3x3 = conv2d_bn(x, 384, 1, 1) 270 | branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3) 271 | branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1) 272 | branch3x3 = layers.concatenate( 273 | [branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i)) 274 | 275 | branch3x3dbl = conv2d_bn(x, 448, 1, 1) 276 | branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3) 277 | branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3) 278 | branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1) 279 | branch3x3dbl = layers.concatenate( 280 | [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis) 281 | 282 | branch_pool = AveragePooling2D( 283 | (3, 3), strides=(1, 1), padding='same')(x) 284 | branch_pool = conv2d_bn(branch_pool, 192, 1, 1) 285 | x = layers.concatenate( 286 | [branch1x1, branch3x3, branch3x3dbl, branch_pool], 287 | axis=channel_axis, 288 | name='mixed' + str(9 + i)) 289 | if include_top: 290 | # Classification block 291 | x = GlobalAveragePooling2D(name='avg_pool')(x) 292 | x = Dense(classes, name='predictions')(x) 293 | if not logits: 294 | x = Activation('softmax')(x) 295 | else: 296 | if pooling == 'avg': 297 | x = GlobalAveragePooling2D()(x) 298 | elif pooling == 'max': 299 | x = GlobalMaxPooling2D()(x) 300 | 301 | # Ensure that the model takes into account 302 | # any potential predecessors of `input_tensor`. 303 | if input_tensor is not None: 304 | inputs = get_source_inputs(input_tensor) 305 | else: 306 | inputs = img_input 307 | # Create model. 308 | model = Model(inputs, x, name='inception_v3') 309 | 310 | # load weights 311 | if weights == 'imagenet': 312 | if K.image_data_format() == 'channels_first': 313 | if K.backend() == 'tensorflow': 314 | warnings.warn('You are using the TensorFlow backend, yet you ' 315 | 'are using the Theano ' 316 | 'image data format convention ' 317 | '(`image_data_format="channels_first"`). ' 318 | 'For best performance, set ' 319 | '`image_data_format="channels_last"` in ' 320 | 'your Keras config ' 321 | 'at ~/.keras/keras.json.') 322 | if include_top: 323 | weights_path = get_file( 324 | 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5', 325 | WEIGHTS_PATH, 326 | cache_subdir='models', 327 | md5_hash='9a0d58056eeedaa3f26cb7ebd46da564') 328 | else: 329 | weights_path = get_file( 330 | 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', 331 | WEIGHTS_PATH_NO_TOP, 332 | cache_subdir='models', 333 | md5_hash='bcbd6486424b2319ff4ef7d526e38f63') 334 | model.load_weights(weights_path) 335 | if K.backend() == 'theano': 336 | convert_all_kernels_in_model(model) 337 | return model -------------------------------------------------------------------------------- /models/keras_models/keras_models.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | 4 | import tensorflow as tf 5 | 6 | VGG_MEAN = [103.939, 116.779, 123.68] 7 | 8 | def scaling_tf(X, input_range_type): 9 | """ 10 | Convert to [0, 255], then subtracting means, convert to BGR. 11 | """ 12 | 13 | if input_range_type == 1: 14 | # The input data range is [0, 1]. 15 | # Convert to [0, 255] by multiplying 255 16 | X = X*255 17 | elif input_range_type == 2: 18 | # The input data range is [-0.5, 0.5]. Convert to [0,255] by adding 0.5 element-wise. 19 | X = (X+0.5)*255 20 | elif input_range_type == 3: 21 | # The input data range is [-1, 1]. Convert to [0,1] by x/2+0.5. 22 | X = (X/2+0.5)*255 23 | 24 | # Caution: Resulting in zero gradients. 25 | # X_uint8 = tf.clip_by_value(tf.rint(X), 0, 255) 26 | red, green, blue = tf.split(X, 3, 3) 27 | X_bgr = tf.concat([ 28 | blue - VGG_MEAN[0], 29 | green - VGG_MEAN[1], 30 | red - VGG_MEAN[2], 31 | # TODO: swap 0 and 2. should be 2,1,0 according to Keras' original code. 32 | ], 3) 33 | 34 | # x[:, :, :, 0] -= 103.939 35 | # x[:, :, :, 1] -= 116.779 36 | # x[:, :, :, 2] -= 123.68 37 | return X_bgr 38 | 39 | # It looks Keras-Lambda layer doesn't support numpy operations. 40 | from keras.applications.imagenet_utils import preprocess_input 41 | def scaling_np(X, scaling=False): 42 | if scaling: 43 | X = X + 0.5 44 | X_uint8 = np.clip(np.rint(X*255), 0, 255) 45 | X_bgr = preprocess_input(X_uint8) 46 | return X_bgr 47 | 48 | from .resnet50_model import ResNet50 49 | def keras_resnet50_imagenet_model(logits=False, input_range_type=1): 50 | """ 51 | Run the prediction network *without softmax*. 52 | """ 53 | input_shape = (224, 224, 3) 54 | # if scaling: 55 | # x = x + 0.5 56 | # x_bgr = scaling_tf(x) 57 | model = ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=input_shape, pooling=None, classes=1000, logits=logits, input_range_type=input_range_type) 58 | # predictions = model.outputs[0] 59 | # return predictions 60 | return model 61 | 62 | from .vgg19_model import VGG19 63 | def keras_vgg19_imagenet_model(logits=False, input_range_type=1): 64 | """ 65 | Run the prediction network *without softmax*. 66 | """ 67 | input_shape = (224, 224, 3) 68 | model = VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=input_shape, pooling=None, classes=1000, logits=logits, input_range_type=input_range_type) 69 | return model 70 | 71 | from .inceptionv3_model import InceptionV3 72 | def keras_inceptionv3_imagenet_model(logits=False, input_range_type=1): 73 | input_shape = (299, 299, 3) 74 | model = InceptionV3(include_top=True, 75 | weights='imagenet', 76 | input_tensor=None, 77 | input_shape=input_shape, 78 | pooling=None, 79 | classes=1000, 80 | logits=logits, 81 | input_range_type=input_range_type) 82 | return model 83 | 84 | if __name__ == '__main__': 85 | x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3)) 86 | model = keras_resnet50_imagenet_model(x) -------------------------------------------------------------------------------- /models/keras_models/resnet50_model.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 3 | 4 | import tensorflow as tf 5 | from utils import load_externals 6 | from resnet50 import * 7 | 8 | from keras.applications.imagenet_utils import _obtain_input_shape 9 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 10 | 11 | from .keras_models import scaling_tf 12 | 13 | # Override the original definition 14 | def ResNet50(include_top=True, weights='imagenet', 15 | input_tensor=None, input_shape=None, 16 | pooling=None, 17 | classes=1000, 18 | logits=False, 19 | input_range_type=1): 20 | """Instantiates the ResNet50 architecture. 21 | 22 | Optionally loads weights pre-trained 23 | on ImageNet. Note that when using TensorFlow, 24 | for best performance you should set 25 | `image_data_format="channels_last"` in your Keras config 26 | at ~/.keras/keras.json. 27 | 28 | The model and the weights are compatible with both 29 | TensorFlow and Theano. The data format 30 | convention used by the model is the one 31 | specified in your Keras config file. 32 | 33 | # Arguments 34 | include_top: whether to include the fully-connected 35 | layer at the top of the network. 36 | weights: one of `None` (random initialization) 37 | or "imagenet" (pre-training on ImageNet). 38 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 39 | to use as image input for the model. 40 | input_shape: optional shape tuple, only to be specified 41 | if `include_top` is False (otherwise the input shape 42 | has to be `(224, 224, 3)` (with `channels_last` data format) 43 | or `(3, 224, 244)` (with `channels_first` data format). 44 | It should have exactly 3 inputs channels, 45 | and width and height should be no smaller than 197. 46 | E.g. `(200, 200, 3)` would be one valid value. 47 | pooling: Optional pooling mode for feature extraction 48 | when `include_top` is `False`. 49 | - `None` means that the output of the model will be 50 | the 4D tensor output of the 51 | last convolutional layer. 52 | - `avg` means that global average pooling 53 | will be applied to the output of the 54 | last convolutional layer, and thus 55 | the output of the model will be a 2D tensor. 56 | - `max` means that global max pooling will 57 | be applied. 58 | classes: optional number of classes to classify images 59 | into, only to be specified if `include_top` is True, and 60 | if no `weights` argument is specified. 61 | 62 | # Returns 63 | A Keras model instance. 64 | 65 | # Raises 66 | ValueError: in case of invalid argument for `weights`, 67 | or invalid input shape. 68 | """ 69 | if weights not in {'imagenet', None}: 70 | raise ValueError('The `weights` argument should be either ' 71 | '`None` (random initialization) or `imagenet` ' 72 | '(pre-training on ImageNet).') 73 | 74 | if weights == 'imagenet' and include_top and classes != 1000: 75 | raise ValueError('If using `weights` as imagenet with `include_top`' 76 | ' as true, `classes` should be 1000') 77 | 78 | # Determine proper input shape 79 | input_shape = _obtain_input_shape(input_shape, 80 | default_size=224, 81 | min_size=197, 82 | data_format=K.image_data_format(), 83 | include_top=include_top) 84 | 85 | if input_tensor is None: 86 | img_input = Input(shape=input_shape) 87 | else: 88 | if not K.is_keras_tensor(input_tensor): 89 | img_input = Input(tensor=input_tensor, shape=input_shape) 90 | else: 91 | img_input = input_tensor 92 | if K.image_data_format() == 'channels_last': 93 | bn_axis = 3 94 | else: 95 | bn_axis = 1 96 | 97 | x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input) 98 | x = ZeroPadding2D((3, 3))(x) 99 | 100 | # x = ZeroPadding2D((3, 3))(img_input) 101 | x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x) 102 | x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) 103 | x = Activation('relu')(x) 104 | x = MaxPooling2D((3, 3), strides=(2, 2))(x) 105 | 106 | x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) 107 | x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') 108 | x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') 109 | 110 | x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') 111 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') 112 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') 113 | x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') 114 | 115 | x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') 116 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') 117 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') 118 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') 119 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') 120 | x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') 121 | 122 | x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') 123 | x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') 124 | x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') 125 | 126 | x = AveragePooling2D((7, 7), name='avg_pool')(x) 127 | 128 | if include_top: 129 | x = Flatten()(x) 130 | x = Dense(classes, name='fc1000')(x) 131 | if not logits: 132 | x = Activation('softmax')(x) 133 | else: 134 | if pooling == 'avg': 135 | x = GlobalAveragePooling2D()(x) 136 | elif pooling == 'max': 137 | x = GlobalMaxPooling2D()(x) 138 | 139 | # Ensure that the model takes into account 140 | # any potential predecessors of `input_tensor`. 141 | if input_tensor is not None: 142 | inputs = get_source_inputs(input_tensor) 143 | else: 144 | inputs = img_input 145 | # Create model. 146 | model = Model(inputs, x, name='resnet50') 147 | 148 | # load weights 149 | if weights == 'imagenet': 150 | if include_top: 151 | weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5', 152 | WEIGHTS_PATH, 153 | cache_subdir='models', 154 | md5_hash='a7b3fe01876f51b976af0dea6bc144eb') 155 | else: 156 | weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', 157 | WEIGHTS_PATH_NO_TOP, 158 | cache_subdir='models', 159 | md5_hash='a268eb855778b3df3c7506639542a6af') 160 | model.load_weights(weights_path) 161 | if K.backend() == 'theano': 162 | layer_utils.convert_all_kernels_in_model(model) 163 | 164 | if K.image_data_format() == 'channels_first': 165 | if include_top: 166 | maxpool = model.get_layer(name='avg_pool') 167 | shape = maxpool.output_shape[1:] 168 | dense = model.get_layer(name='fc1000') 169 | layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first') 170 | 171 | if K.backend() == 'tensorflow': 172 | warnings.warn('You are using the TensorFlow backend, yet you ' 173 | 'are using the Theano ' 174 | 'image data format convention ' 175 | '(`image_data_format="channels_first"`). ' 176 | 'For best performance, set ' 177 | '`image_data_format="channels_last"` in ' 178 | 'your Keras config ' 179 | 'at ~/.keras/keras.json.') 180 | return model -------------------------------------------------------------------------------- /models/keras_models/vgg19_model.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 3 | 4 | import tensorflow as tf 5 | from utils import load_externals 6 | from vgg19 import * 7 | 8 | from keras.applications.imagenet_utils import _obtain_input_shape 9 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 10 | 11 | from .keras_models import scaling_tf 12 | 13 | def VGG19(include_top=True, weights='imagenet', 14 | input_tensor=None, input_shape=None, 15 | pooling=None, 16 | classes=1000, 17 | logits=False, 18 | input_range_type=1): 19 | """Instantiates the VGG19 architecture. 20 | 21 | Optionally loads weights pre-trained 22 | on ImageNet. Note that when using TensorFlow, 23 | for best performance you should set 24 | `image_data_format="channels_last"` in your Keras config 25 | at ~/.keras/keras.json. 26 | 27 | The model and the weights are compatible with both 28 | TensorFlow and Theano. The data format 29 | convention used by the model is the one 30 | specified in your Keras config file. 31 | 32 | # Arguments 33 | include_top: whether to include the 3 fully-connected 34 | layers at the top of the network. 35 | weights: one of `None` (random initialization) 36 | or "imagenet" (pre-training on ImageNet). 37 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 38 | to use as image input for the model. 39 | input_shape: optional shape tuple, only to be specified 40 | if `include_top` is False (otherwise the input shape 41 | has to be `(224, 224, 3)` (with `channels_last` data format) 42 | or `(3, 224, 244)` (with `channels_first` data format). 43 | It should have exactly 3 inputs channels, 44 | and width and height should be no smaller than 48. 45 | E.g. `(200, 200, 3)` would be one valid value. 46 | pooling: Optional pooling mode for feature extraction 47 | when `include_top` is `False`. 48 | - `None` means that the output of the model will be 49 | the 4D tensor output of the 50 | last convolutional layer. 51 | - `avg` means that global average pooling 52 | will be applied to the output of the 53 | last convolutional layer, and thus 54 | the output of the model will be a 2D tensor. 55 | - `max` means that global max pooling will 56 | be applied. 57 | classes: optional number of classes to classify images 58 | into, only to be specified if `include_top` is True, and 59 | if no `weights` argument is specified. 60 | 61 | # Returns 62 | A Keras model instance. 63 | 64 | # Raises 65 | ValueError: in case of invalid argument for `weights`, 66 | or invalid input shape. 67 | """ 68 | if weights not in {'imagenet', None}: 69 | raise ValueError('The `weights` argument should be either ' 70 | '`None` (random initialization) or `imagenet` ' 71 | '(pre-training on ImageNet).') 72 | 73 | if weights == 'imagenet' and include_top and classes != 1000: 74 | raise ValueError('If using `weights` as imagenet with `include_top`' 75 | ' as true, `classes` should be 1000') 76 | # Determine proper input shape 77 | input_shape = _obtain_input_shape(input_shape, 78 | default_size=224, 79 | min_size=48, 80 | data_format=K.image_data_format(), 81 | include_top=include_top) 82 | 83 | if input_tensor is None: 84 | img_input = Input(shape=input_shape) 85 | else: 86 | if not K.is_keras_tensor(input_tensor): 87 | img_input = Input(tensor=input_tensor, shape=input_shape) 88 | else: 89 | img_input = input_tensor 90 | # Scaling 91 | x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input) 92 | # Block 1 93 | x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x) 94 | x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) 95 | x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) 96 | 97 | # Block 2 98 | x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) 99 | x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) 100 | x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) 101 | 102 | # Block 3 103 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) 104 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) 105 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) 106 | x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x) 107 | x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) 108 | 109 | # Block 4 110 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) 111 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) 112 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) 113 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x) 114 | x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) 115 | 116 | # Block 5 117 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) 118 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) 119 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) 120 | x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x) 121 | x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) 122 | 123 | if include_top: 124 | # Classification block 125 | x = Flatten(name='flatten')(x) 126 | x = Dense(4096, activation='relu', name='fc1')(x) 127 | x = Dense(4096, activation='relu', name='fc2')(x) 128 | x = Dense(classes, name='predictions')(x) 129 | if not logits: 130 | x = Activation('softmax')(x) 131 | else: 132 | if pooling == 'avg': 133 | x = GlobalAveragePooling2D()(x) 134 | elif pooling == 'max': 135 | x = GlobalMaxPooling2D()(x) 136 | 137 | # Ensure that the model takes into account 138 | # any potential predecessors of `input_tensor`. 139 | if input_tensor is not None: 140 | inputs = get_source_inputs(input_tensor) 141 | else: 142 | inputs = img_input 143 | # Create model. 144 | model = Model(inputs, x, name='vgg19') 145 | 146 | # load weights 147 | if weights == 'imagenet': 148 | if include_top: 149 | weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5', 150 | WEIGHTS_PATH, 151 | cache_subdir='models') 152 | else: 153 | weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', 154 | WEIGHTS_PATH_NO_TOP, 155 | cache_subdir='models') 156 | model.load_weights(weights_path) 157 | if K.backend() == 'theano': 158 | layer_utils.convert_all_kernels_in_model(model) 159 | 160 | if K.image_data_format() == 'channels_first': 161 | if include_top: 162 | maxpool = model.get_layer(name='block5_pool') 163 | shape = maxpool.output_shape[1:] 164 | dense = model.get_layer(name='fc1') 165 | layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first') 166 | 167 | if K.backend() == 'tensorflow': 168 | warnings.warn('You are using the TensorFlow backend, yet you ' 169 | 'are using the Theano ' 170 | 'image data format convention ' 171 | '(`image_data_format="channels_first"`). ' 172 | 'For best performance, set ' 173 | '`image_data_format="channels_last"` in ' 174 | 'your Keras config ' 175 | 'at ~/.keras/keras.json.') 176 | return model 177 | -------------------------------------------------------------------------------- /models/mobilenets_model/__init__.py: -------------------------------------------------------------------------------- 1 | from .mobilenets_model import mobilenet_imagenet_model 2 | -------------------------------------------------------------------------------- /models/mobilenets_model/mobilenets_model.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 3 | 4 | from utils import load_externals 5 | 6 | from mobilenets import * 7 | from mobilenets import _obtain_input_shape, __conv_block, __depthwise_conv_block 8 | 9 | from keras.layers import Lambda 10 | 11 | def scaling_tf(X, input_range_type): 12 | """ 13 | Convert to [-1, 1]. 14 | """ 15 | if input_range_type == 1: 16 | # The input data range is [0, 1]. Convert to [-1, 1] by 17 | X = X - 0.5 18 | X = X * 2. 19 | elif input_range_type == 2: 20 | # The input data range is [-0.5, 0.5]. Convert to [-1,1] by 21 | X = X * 2. 22 | elif input_range_type == 3: 23 | # The input data range is [-1, 1]. 24 | X = X 25 | 26 | return X 27 | 28 | 29 | def __create_mobilenet(classes, img_input, include_top, alpha, depth_multiplier, dropout, pooling, logits): 30 | ''' Creates a MobileNet model with specified parameters 31 | Args: 32 | classes: Number of output classes 33 | img_input: Input tensor or layer 34 | include_top: Flag to include the last dense layer 35 | alpha: width multiplier of the MobileNet. 36 | depth_multiplier: depth multiplier for depthwise convolution 37 | (also called the resolution multiplier) 38 | dropout: dropout rate 39 | pooling: Optional pooling mode for feature extraction 40 | when `include_top` is `False`. 41 | - `None` means that the output of the model will be 42 | the 4D tensor output of the 43 | last convolutional layer. 44 | - `avg` means that global average pooling 45 | will be applied to the output of the 46 | last convolutional layer, and thus 47 | the output of the model will be a 2D tensor. 48 | - `max` means that global max pooling will 49 | be applied. 50 | Returns: a Keras Model 51 | ''' 52 | 53 | x = __conv_block(img_input, 32, alpha, strides=(2, 2)) 54 | x = __depthwise_conv_block(x, 64, alpha, depth_multiplier, id=1) 55 | 56 | x = __depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), id=2) 57 | x = __depthwise_conv_block(x, 128, alpha, depth_multiplier, id=3) 58 | 59 | x = __depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), id=4) 60 | x = __depthwise_conv_block(x, 256, alpha, depth_multiplier, id=5) 61 | 62 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), id=6) 63 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=7) 64 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=8) 65 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=9) 66 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=10) 67 | x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=11) 68 | 69 | x = __depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), id=12) 70 | x = __depthwise_conv_block(x, 1024, alpha, depth_multiplier, id=13) 71 | 72 | if include_top: 73 | if K.image_data_format() == 'channels_first': 74 | shape = (int(1024 * alpha), 1, 1) 75 | else: 76 | shape = (1, 1, int(1024 * alpha)) 77 | 78 | x = GlobalAveragePooling2D()(x) 79 | x = Reshape(shape, name='reshape_1')(x) 80 | x = Dropout(dropout, name='dropout')(x) 81 | x = Convolution2D(classes, (1, 1), padding='same', name='conv_preds')(x) 82 | # Reshape from (?, 1, 1, 1000) to (?, 1000) 83 | x = Reshape((classes,), name='reshape_2')(x) 84 | 85 | # Move Reshape before Actionvation. Otherwise, Cleverhans gets confused in fetching logits output. 86 | if not logits: 87 | x = Activation('softmax', name='activation')(x) 88 | else: 89 | if pooling == 'avg': 90 | x = GlobalAveragePooling2D()(x) 91 | elif pooling == 'max': 92 | x = GlobalMaxPooling2D()(x) 93 | return x 94 | 95 | 96 | def MobileNets(input_shape=None, alpha=1.0, depth_multiplier=1, 97 | dropout=1e-3, include_top=True, weights='imagenet', 98 | input_tensor=None, pooling=None, classes=1000, 99 | logits=False, input_range_type=1, pre_filter=lambda x:x): 100 | ''' Instantiate the MobileNet architecture. 101 | Note that only TensorFlow is supported for now, 102 | therefore it only works with the data format 103 | `image_data_format='channels_last'` in your Keras config 104 | at ~/.keras/keras.json. 105 | # Arguments 106 | input_shape: optional shape tuple, only to be specified 107 | if `include_top` is False (otherwise the input shape 108 | has to be `(224, 224, 3)` (with `channels_last` data format) 109 | or (3, 224, 224) (with `channels_first` data format). 110 | It should have exactly 3 inputs channels, 111 | and width and height should be no smaller than 32. 112 | E.g. `(200, 200, 3)` would be one valid value. 113 | alpha: width multiplier of the MobileNet. 114 | depth_multiplier: depth multiplier for depthwise convolution 115 | (also called the resolution multiplier) 116 | dropout: dropout rate 117 | include_top: whether to include the fully-connected 118 | layer at the top of the network. 119 | weights: `None` (random initialization) or 120 | `imagenet` (ImageNet weights) 121 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 122 | to use as image input for the model. 123 | pooling: Optional pooling mode for feature extraction 124 | when `include_top` is `False`. 125 | - `None` means that the output of the model will be 126 | the 4D tensor output of the 127 | last convolutional layer. 128 | - `avg` means that global average pooling 129 | will be applied to the output of the 130 | last convolutional layer, and thus 131 | the output of the model will be a 2D tensor. 132 | - `max` means that global max pooling will 133 | be applied. 134 | classes: optional number of classes to classify images 135 | into, only to be specified if `include_top` is True, and 136 | if no `weights` argument is specified. 137 | # Returns 138 | A Keras model instance. 139 | ''' 140 | 141 | if K.backend() == 'theano': 142 | raise AttributeError('Theano backend is not currently supported, ' 143 | 'as Theano does not support depthwise convolution yet.') 144 | 145 | if weights not in {'imagenet', None}: 146 | raise ValueError('The `weights` argument should be either ' 147 | '`None` (random initialization) or `imagenet` ' 148 | '(pre-training on ImageNet).') 149 | 150 | if weights == 'imagenet' and include_top and classes != 1000: 151 | raise ValueError('If using `weights` as ImageNet with `include_top`' 152 | ' as true, `classes` should be 1001') 153 | 154 | if weights == 'imagenet': 155 | assert depth_multiplier == 1, "If imagenet weights are being loaded, depth multiplier must be 1" 156 | 157 | assert alpha in [0.25, 0.50, 0.75, 1.0], "If imagenet weights are being loaded, alpha can be one of" \ 158 | "`0.25`, `0.50`, `0.75` or `1.0` only." 159 | 160 | if alpha == 1.0: 161 | alpha_text = "1_0" 162 | elif alpha == 0.75: 163 | alpha_text = "7_5" 164 | elif alpha == 0.50: 165 | alpha_text = "5_0" 166 | else: 167 | alpha_text = "2_5" 168 | 169 | rows, cols = (0, 1) if K.image_data_format() == 'channels_last' else (1, 2) 170 | 171 | rows = int(input_shape[rows]) 172 | cols = int(input_shape[cols]) 173 | 174 | assert rows == cols and rows in [None, 128, 160, 192, 224], "If imagenet weights are being loaded," \ 175 | "image must be square and be one of " \ 176 | "(128,128), (160,160), (192,192), or (224, 224)." \ 177 | "Given (%d, %d)" % (rows, cols) 178 | 179 | # Determine proper input shape. Note, include_top is False by default, as 180 | # input shape can be anything larger than 32x32 and the same number of parameters 181 | # will be used. 182 | input_shape = _obtain_input_shape(input_shape, 183 | default_size=224, 184 | min_size=32, 185 | data_format=K.image_data_format(), 186 | include_top=False) 187 | 188 | # If input shape is still None, provide a default input shape 189 | if input_shape is None: 190 | input_shape = (224, 224, 3) if K.image_data_format() == 'channels_last' else (3, 224, 224) 191 | 192 | if input_tensor is None: 193 | img_input = Input(shape=input_shape) 194 | else: 195 | if not K.is_keras_tensor(input_tensor): 196 | img_input = Input(tensor=input_tensor, shape=input_shape) 197 | else: 198 | img_input = input_tensor 199 | 200 | # Scaling 201 | # x = __create_mobilenet(classes, img_input, include_top, alpha, depth_multiplier, dropout, pooling, logits) 202 | x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input) 203 | x = Lambda(pre_filter, output_shape=input_shape)(x) 204 | x = __create_mobilenet(classes, x, include_top, alpha, depth_multiplier, dropout, pooling, logits) 205 | 206 | # Ensure that the model takes into account 207 | # any potential predecessors of `input_tensor`. 208 | if input_tensor is not None: 209 | inputs = get_source_inputs(input_tensor) 210 | else: 211 | inputs = img_input 212 | # Create model. 213 | model = Model(inputs, x, name='mobilenet') 214 | 215 | # load weights 216 | if weights == 'imagenet': 217 | if K.image_data_format() == 'channels_first': 218 | raise AttributeError('Weights for Channels Last format are not available') 219 | 220 | if (alpha == 1.) and (depth_multiplier == 1.): 221 | if include_top: 222 | model_name = "mobilenet_%s_%d_tf.h5" % (alpha_text, rows) 223 | weigh_path = BASE_WEIGHT_PATH + model_name 224 | weights_path = get_file(model_name, 225 | weigh_path, 226 | cache_subdir='models') 227 | else: 228 | model_name = "mobilenet_%s_%d_tf_no_top.h5" % (alpha_text, rows) 229 | weigh_path = BASE_WEIGHT_PATH + model_name 230 | weights_path = get_file(model_name, 231 | weigh_path, 232 | cache_subdir='models') 233 | 234 | model.load_weights(weights_path) 235 | 236 | return model 237 | 238 | 239 | def mobilenet_imagenet_model(logits=False, input_range_type=1, pre_filter=None): 240 | input_shape = (224, 224, 3) 241 | model = MobileNets(input_shape=input_shape, alpha=1.0, depth_multiplier=1, 242 | dropout=1e-3, include_top=True, weights='imagenet', 243 | input_tensor=None, pooling=None, classes=1000, 244 | logits=logits, input_range_type=input_range_type, pre_filter=pre_filter) 245 | return model -------------------------------------------------------------------------------- /models/pgdtrained_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | The model is adapted from 3 | https://github.com/MadryLab/mnist_challenge/blob/master/model.py 4 | """ 5 | 6 | from keras.models import Sequential 7 | from keras.layers import Lambda, Conv2D, MaxPooling2D, Reshape, Dense, Activation 8 | 9 | def pgdtrained_mnist_model(logits=True, input_range_type=1, pre_filter=lambda x:x): 10 | model = Sequential() 11 | 12 | input_shape = (28, 28, 1) 13 | 14 | if input_range_type == 1: 15 | # The input data range is [0, 1]. 16 | # Don't need to do scaling for cleverhans models, as it is the input range by default. 17 | scaler = lambda x: x 18 | elif input_range_type == 2: 19 | # The input data range is [-0.5, 0.5]. Convert to [0,1] by adding 0.5 element-wise. 20 | scaler = lambda x: x+0.5 21 | elif input_range_type == 3: 22 | # The input data range is [-1, 1]. Convert to [0,1] by x/2+0.5. 23 | scaler = lambda x: x/2+0.5 24 | 25 | model.add(Lambda(scaler, input_shape=input_shape)) 26 | model.add(Lambda(pre_filter, output_shape=input_shape)) 27 | 28 | model.add(Conv2D(filters=32, 29 | kernel_size=(5, 5), 30 | strides=(1, 1), 31 | padding='same', 32 | activation='relu', 33 | )) 34 | model.add(MaxPooling2D(pool_size=(2, 2), 35 | strides=(2, 2), 36 | padding='same', 37 | )) 38 | 39 | model.add(Conv2D(filters=64, 40 | kernel_size=(5, 5), 41 | strides=(1, 1), 42 | padding='same', 43 | activation='relu', 44 | )) 45 | model.add(MaxPooling2D(pool_size=(2, 2), 46 | strides=(2, 2), 47 | padding='same', 48 | )) 49 | 50 | model.add(Reshape((7*7*64,))) 51 | model.add(Dense(units=1024, activation='relu')) 52 | model.add(Dense(units=10)) 53 | if not logits: 54 | model.add(Activation('softmax')) 55 | 56 | return model 57 | -------------------------------------------------------------------------------- /models/tohinz_models.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda 3 | from keras.layers import MaxPooling2D, Conv2D 4 | from keras.layers.normalization import BatchNormalization 5 | 6 | 7 | def tohinz_svhn_model(logits=False, input_range_type=2, pre_filter=lambda x:x): 8 | input_shape=(32, 32, 3) 9 | nb_filters = 32 10 | nb_denses = [512,10] 11 | return tohinz_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter) 12 | 13 | 14 | 15 | def tohinz_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter): 16 | model = Sequential() 17 | 18 | if input_range_type == 1: 19 | # The input data range is [0, 1]. 20 | # Convert to [-0.5,0.5] by x-0.5. 21 | scaler = lambda x: x-0.5 22 | elif input_range_type == 2: 23 | # The input data range is [-0.5, 0.5]. 24 | # Don't need to do scaling for carlini models, as it is the input range by default. 25 | scaler = lambda x: x 26 | elif input_range_type == 3: 27 | # The input data range is [-1, 1]. Convert to [-0.5,0.5] by x/2. 28 | scaler = lambda x: x/2 29 | 30 | model.add(Lambda(scaler, input_shape=input_shape)) 31 | model.add(Lambda(pre_filter, output_shape=input_shape)) 32 | 33 | model.add(Conv2D(nb_filters, kernel_size=3, input_shape=input_shape, padding="same")) 34 | model.add(BatchNormalization()) 35 | model.add(Activation('relu')) 36 | model.add(Conv2D(nb_filters, 3, padding="same")) 37 | model.add(BatchNormalization()) 38 | model.add(Activation('relu')) 39 | model.add(MaxPooling2D(pool_size=2)) 40 | model.add(Dropout(0.3)) 41 | 42 | model.add(Conv2D(nb_filters*2, 3)) 43 | model.add(BatchNormalization()) 44 | model.add(Activation('relu')) 45 | model.add(Conv2D(nb_filters*2, 3, padding="same")) 46 | model.add(BatchNormalization()) 47 | model.add(Activation('relu')) 48 | model.add(MaxPooling2D(pool_size=2)) 49 | model.add(Dropout(0.3)) 50 | 51 | model.add(Conv2D(nb_filters*4, 3)) 52 | model.add(BatchNormalization()) 53 | model.add(Activation('relu')) 54 | model.add(Conv2D(nb_filters*4, 3, padding="same")) 55 | model.add(BatchNormalization()) 56 | model.add(Activation('relu')) 57 | model.add(MaxPooling2D(pool_size=2)) 58 | model.add(Dropout(0.3)) 59 | 60 | model.add(Flatten()) 61 | model.add(Dense(nb_denses[0], activation='relu')) 62 | model.add(Dropout(0.3)) 63 | 64 | model.add(Dense(nb_denses[1])) 65 | 66 | if not logits: 67 | model.add(Activation('softmax')) 68 | 69 | return model 70 | -------------------------------------------------------------------------------- /requirements_cpu.txt: -------------------------------------------------------------------------------- 1 | tensorflow==1.1.0 2 | keras==2.0.4 3 | numpy==1.13.3 4 | matplotlib 5 | h5py 6 | pillow 7 | scikit-learn 8 | click 9 | future 10 | opencv-python 11 | tinydb 12 | -------------------------------------------------------------------------------- /requirements_gpu.txt: -------------------------------------------------------------------------------- 1 | tensorflow-gpu==1.1.0 2 | keras==2.0.4 3 | numpy==1.13.3 4 | matplotlib 5 | h5py 6 | pillow 7 | scikit-learn 8 | click 9 | future 10 | opencv-python 11 | tinydb 12 | -------------------------------------------------------------------------------- /robustness/__init__.py: -------------------------------------------------------------------------------- 1 | from base import evaluate_robustness -------------------------------------------------------------------------------- /robustness/base.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import hashlib 7 | import sys, os 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | 10 | from datasets import calculate_accuracy 11 | from datasets.visualization import show_imgs_in_rows 12 | 13 | 14 | class RobustClassifierBase: 15 | def __init__(self, model, rc_name): 16 | self.model_predict = lambda x: model.predict(x) 17 | 18 | def predict(self, X): 19 | return self.model_predict(X) 20 | 21 | from .feature_squeezing import FeatureSqueezingRC 22 | from .magnet import MagNetRC 23 | 24 | def get_robust_classifier_by_name(model, rc_name): 25 | if rc_name.startswith('Base') or rc_name.startswith('none'): 26 | rc = RobustClassifierBase(model, rc_name) 27 | elif rc_name.startswith("FeatureSqueezing"): 28 | rc = FeatureSqueezingRC(model, rc_name) 29 | elif rc_name.startswith("MagNet"): 30 | rc = MagNetRC(model, rc_name) 31 | else: 32 | raise Exception("Unknown robust classifier [%s]" % rc) 33 | return rc 34 | 35 | def evaluate_robustness(params_str, model, Y, X, Y_adv, attack_string_list, X_adv_list, fname_prefix, selected_idx_vis, result_folder): 36 | if not os.path.isdir(result_folder): 37 | os.makedirs(result_folder) 38 | robustness_string_hash = hashlib.sha1(params_str.encode('utf-8')).hexdigest()[:5] 39 | csv_fname = "%s_%s.csv" % (fname_prefix, robustness_string_hash) 40 | csv_fpath = os.path.join(result_folder, csv_fname) 41 | print ("Saving robustness test results at %s" % csv_fpath) 42 | 43 | RC_names = [ele.strip() for ele in params_str.split(';') if ele.strip()!= ''] 44 | 45 | accuracy_rows = [] 46 | fieldnames = ['RobustClassifier', 'legitimate_%d' % len(X)] + attack_string_list 47 | 48 | selected_idx_vis = selected_idx_vis[:10] 49 | legitimate_examples = X[selected_idx_vis] 50 | 51 | for RC_name in RC_names: 52 | rc = get_robust_classifier_by_name(model, RC_name) 53 | accuracy_rec = {} 54 | accuracy_rec['RobustClassifier'] = RC_name 55 | 56 | accuracy = calculate_accuracy(rc.predict(X), Y) 57 | accuracy_rec['legitimate_%d' % len(X)] = accuracy 58 | 59 | img_fpath = os.path.join(result_folder, '%s_%s.png' % (fname_prefix, RC_name) ) 60 | rows = [legitimate_examples] 61 | 62 | for i, attack_name in enumerate(attack_string_list): 63 | X_adv = X_adv_list[i] 64 | if hasattr(rc, 'visualize_and_predict'): 65 | X_adv_filtered, Y_pred_adv = rc.visualize_and_predict(X_adv) 66 | rows += map(lambda x:x[selected_idx_vis], [X_adv, X_adv_filtered]) 67 | else: 68 | Y_pred_adv = rc.predict(X_adv) 69 | accuracy = calculate_accuracy(Y_pred_adv, Y_adv) 70 | accuracy_rec[attack_name] = accuracy 71 | 72 | accuracy_rows.append(accuracy_rec) 73 | 74 | # Visualize the filtered images. 75 | if len(rows) > 1: 76 | show_imgs_in_rows(rows, img_fpath) 77 | 78 | # Output in a CSV file. 79 | import csv 80 | with open(csv_fpath, 'w') as csvfile: 81 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames) 82 | writer.writeheader() 83 | for row in accuracy_rows: 84 | writer.writerow(row) 85 | 86 | -------------------------------------------------------------------------------- /robustness/feature_squeezing.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import sys, os 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | from utils.squeeze import get_squeezer_by_name 10 | from utils.parameter_parser import parse_params 11 | 12 | class FeatureSqueezingRC: 13 | def __init__(self, keras_model, rc_name): 14 | # Example of rc_name: FeatureSqueezing?squeezer=bit_depth_1 15 | self.model_predict = lambda x: keras_model.predict(x) 16 | subject, params = parse_params(rc_name) 17 | assert subject == 'FeatureSqueezing' 18 | 19 | if params.has_key('squeezer'): 20 | self.filter = get_squeezer_by_name(params['squeezer'], 'python') 21 | elif params.has_key('squeezers'): 22 | squeezer_names = params['squeezers'].split(',') 23 | self.filters = [ get_squeezer_by_name(squeezer_name, 'python') for squeezer_name in squeezer_names ] 24 | 25 | def filter_func(x, funcs): 26 | x_p = x 27 | for func in funcs: 28 | x_p = func(x_p) 29 | return x_p 30 | 31 | self.filter = lambda x: filter_func(x, self.filters) 32 | 33 | 34 | 35 | def predict(self, X): 36 | X_filtered = self.filter(X) 37 | Y_pred = self.model_predict(X_filtered) 38 | return Y_pred 39 | 40 | def visualize_and_predict(self, X): 41 | X_filtered = self.filter(X) 42 | Y_pred = self.model_predict(X_filtered) 43 | return X_filtered, Y_pred -------------------------------------------------------------------------------- /robustness/magnet.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import sys, os 7 | project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 8 | sys.path.append(project_path) 9 | 10 | from tensorflow.python.platform import flags 11 | FLAGS = flags.FLAGS 12 | 13 | from utils.parameter_parser import parse_params 14 | from externals.MagNet.worker import SimpleReformer 15 | 16 | mnist_autoencoder_fpath = os.path.join(project_path, "downloads/MagNet/defensive_models/MNIST_I") 17 | cifar10_autoencoder_fpath = os.path.join(project_path, "downloads/MagNet/defensive_models/CIFAR") 18 | 19 | class MagNetRC: 20 | def __init__(self, keras_model, rc_name): 21 | # Example of rc_name: FeatureSqueezing?squeezer=bit_depth_1 22 | self.model_predict = lambda x: keras_model.predict(x) 23 | subject, params = parse_params(rc_name) 24 | assert subject == 'MagNet' 25 | 26 | if FLAGS.dataset_name == "MNIST": 27 | self.reformer = SimpleReformer(mnist_autoencoder_fpath) 28 | elif FLAGS.dataset_name == "CIFAR-10": 29 | self.reformer = SimpleReformer(cifar10_autoencoder_fpath) 30 | 31 | self.filter = lambda x: self.reformer.heal(x) 32 | 33 | def predict(self, X): 34 | X_filtered = self.filter(X) 35 | Y_pred = self.model_predict(X_filtered) 36 | return Y_pred 37 | 38 | def visualize_and_predict(self, X): 39 | X_filtered = self.filter(X) 40 | Y_pred = self.model_predict(X_filtered) 41 | return X_filtered, Y_pred -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mzweilin/EvadeML-Zoo/4dbeee04874a836f79782802b2bf8a8612a2a86f/utils/__init__.py -------------------------------------------------------------------------------- /utils/load_externals.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | 3 | external_libs = {'Cleverhans': "externals/cleverhans", 4 | "Carlini_nn_robust_attacks": "externals/carlini", 5 | "Keras-deep-learning-models": "externals/keras_models", 6 | "MobileNets": "externals/MobileNetworks", 7 | "Deepfool/Universal": "externals/universal/python", 8 | "DenseNet": "externals/titu1994/DenseNet", 9 | "MagNet": "externals/MagNet", 10 | } 11 | 12 | project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 13 | 14 | for lib_name, lib_path in external_libs.items(): 15 | lib_path = os.path.join(project_path, lib_path) 16 | 17 | if lib_name == 'Carlini_nn_robust_attacks': 18 | lib_token_fpath = os.path.join(lib_path, 'nn_robust_attacks', '__init__.py') 19 | if not os.path.isfile(lib_token_fpath): 20 | open(lib_token_fpath, 'a').close() 21 | 22 | if lib_name == 'Keras-deep-learning-models': 23 | lib_token_fpath = os.path.join(lib_path, '__init__.py') 24 | if not os.path.isfile(lib_token_fpath): 25 | open(lib_token_fpath, 'a').close() 26 | 27 | if lib_name == 'MobileNets': 28 | lib_token_fpath = os.path.join(lib_path, '__init__.py') 29 | if not os.path.isfile(lib_token_fpath): 30 | open(lib_token_fpath, 'a').close() 31 | 32 | if lib_name == 'Deepfool/Universal': 33 | lib_token_fpath = os.path.join(lib_path, '__init__.py') 34 | if not os.path.isfile(lib_token_fpath): 35 | open(lib_token_fpath, 'a').close() 36 | 37 | if lib_name == 'DenseNet': 38 | lib_token_fpath = os.path.join(lib_path, '__init__.py') 39 | if not os.path.isfile(lib_token_fpath): 40 | open(lib_token_fpath, 'a').close() 41 | 42 | if lib_name in ['MagNet']: 43 | lib_token_fpath = os.path.join(lib_path, '__init__.py') 44 | if not os.path.isfile(lib_token_fpath): 45 | open(lib_token_fpath, 'a').close() 46 | 47 | sys.path.append(lib_path) 48 | print("Located %s" % lib_name) 49 | -------------------------------------------------------------------------------- /utils/median.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | # http://stackoverflow.com/a/43554072/1864688 4 | 5 | def pad_amount(k): 6 | added = k - 1 7 | # note: this imitates scipy, which puts more at the beginning 8 | end = added // 2 9 | start = added - end 10 | return [start, end] 11 | 12 | def neighborhood(x, kh, kw): 13 | # input: N, H, W, C 14 | # output: N, H, W, KH, KW, C 15 | # padding is REFLECT 16 | xs = tf.shape(x) 17 | x_pad = tf.pad(x, ([0, 0], pad_amount(kh), pad_amount(kw), [0, 0]), 'SYMMETRIC') 18 | return tf.reshape(tf.extract_image_patches(x_pad, 19 | [1, kh, kw, 1], 20 | [1, 1, 1, 1], 21 | [1, 1, 1, 1], 22 | 'VALID'), 23 | (xs[0], xs[1], xs[2], kh, kw, xs[3])) 24 | 25 | def median_filter(x, kh, kw=-1): 26 | if kw == -1: 27 | kw = kh 28 | neigh_size = kh * kw 29 | xs = tf.shape(x) 30 | # get neighborhoods in shape (whatever, neigh_size) 31 | x_neigh = neighborhood(x, kh, kw) 32 | x_neigh = tf.transpose(x_neigh, (0, 1, 2, 5, 3, 4)) # N, H, W, C, KH, KW 33 | x_neigh = tf.reshape(x_neigh, (-1, neigh_size)) 34 | # note: this imitates scipy, which doesn't average with an even number of elements 35 | # get half, but rounded up 36 | rank = neigh_size - neigh_size // 2 37 | x_top, _ = tf.nn.top_k(x_neigh, rank) 38 | # bottom of top half should be middle 39 | x_mid = x_top[:, -1] 40 | return tf.reshape(x_mid, (xs[0], xs[1], xs[2], xs[3])) 41 | 42 | def median_filter_no_reshape(x, kh, kw): 43 | neigh_size = kh * kw 44 | xs = tf.shape(x) 45 | # get neighborhoods in shape (whatever, neigh_size) 46 | x_neigh = neighborhood(x, kh, kw) 47 | x_neigh = tf.transpose(x_neigh, (0, 1, 2, 5, 3, 4)) # N, H, W, C, KH, KW 48 | x_neigh = tf.reshape(x_neigh, (-1, neigh_size)) 49 | # note: this imitates scipy, which doesn't average with an even number of elements 50 | # get half, but rounded up 51 | rank = neigh_size - neigh_size // 2 52 | x_top, _ = tf.nn.top_k(x_neigh, rank) 53 | # bottom of top half should be middle 54 | x_mid = x_top[:, -1] 55 | # return tf.reshape(x_mid, (xs[0], xs[1], xs[2], xs[3])) 56 | return x_mid 57 | 58 | def median_random_filter(x, kh, kw): 59 | neigh_size = kh * kw 60 | xs = tf.shape(x) 61 | # get neighborhoods in shape (whatever, neigh_size) 62 | x_neigh = neighborhood(x, kh, kw) 63 | x_neigh = tf.transpose(x_neigh, (0, 1, 2, 5, 3, 4)) # N, H, W, C, KH, KW 64 | x_neigh = tf.reshape(x_neigh, (-1, neigh_size)) 65 | # note: this imitates scipy, which doesn't average with an even number of elements 66 | # get half, but rounded up 67 | rank = neigh_size - neigh_size // 2 68 | rand_int = tf.cast(tf.truncated_normal([1], mean=0, stddev=neigh_size/4)[0], tf.int32) 69 | x_top, _ = tf.nn.top_k(x_neigh, rank+rand_int) 70 | # bottom of top half should be middle 71 | x_mid = x_top[:, -1] 72 | return tf.reshape(x_mid, (xs[0], xs[1], xs[2], xs[3])) 73 | 74 | def median_random_filter_no_reshape(x, kh, kw): 75 | neigh_size = kh * kw 76 | xs = tf.shape(x) 77 | # get neighborhoods in shape (whatever, neigh_size) 78 | x_neigh = neighborhood(x, kh, kw) 79 | x_neigh = tf.transpose(x_neigh, (0, 1, 2, 5, 3, 4)) # N, H, W, C, KH, KW 80 | x_neigh = tf.reshape(x_neigh, (-1, neigh_size)) 81 | # note: this imitates scipy, which doesn't average with an even number of elements 82 | # get half, but rounded up 83 | rank = neigh_size - neigh_size // 2 84 | rand_int = tf.cast(tf.truncated_normal([1], mean=0, stddev=neigh_size/4)[0], tf.int32) 85 | x_top, _ = tf.nn.top_k(x_neigh, rank+rand_int) 86 | # bottom of top half should be middle 87 | x_mid = x_top[:, -1] 88 | return x_mid 89 | 90 | def median_random_pos_size_filter(x, kh, kw): 91 | pass 92 | # Get two/multiple x_mid, randomly select from one . 93 | s0 = median_random_filter_no_reshape(x, 2, 2) 94 | s1 = median_random_filter_no_reshape(x, 3, 3) 95 | s2 = median_random_filter_no_reshape(x, 4, 4) 96 | 97 | xs = tf.shape(x) 98 | nb_pixels = xs[0] * xs[1] * xs[2] * xs[3] 99 | samples_mnd = tf.squeeze(tf.multinomial(tf.log([[10., 10., 10.]]), nb_pixels)) 100 | 101 | # return tf.constant([0]*nb_pixels, dtype=tf.int64) 102 | zeros = tf.zeros([nb_pixels], dtype=tf.int64) 103 | ones = tf.ones([nb_pixels], dtype=tf.int64) 104 | twos = tf.ones([nb_pixels], dtype=tf.int64)*2 105 | # tmp = tf.cast(tf.equal(samples_mnd, tf.zeros([nb_pixels], dtype=tf.int64)), tf.int64) 106 | # return zeros, ones, twos 107 | 108 | selected_0 = tf.cast(tf.equal(samples_mnd, zeros), tf.float32) 109 | selected_1 = tf.cast(tf.equal(samples_mnd, ones), tf.float32) 110 | selected_2 = tf.cast(tf.equal(samples_mnd, twos), tf.float32) 111 | 112 | # return s0, selected_0 113 | x_mid = tf.add_n( [tf.multiply(s0, selected_0), tf.multiply(s1, selected_1), tf.multiply(s2, selected_2)] ) 114 | 115 | return tf.reshape(x_mid, (xs[0], xs[1], xs[2], xs[3])) 116 | 117 | 118 | def median_random_size_filter(x, kh, kw): 119 | pass 120 | # Get two/multiple x_mid, randomly select from one . 121 | s0 = median_filter_no_reshape(x, 2, 2) 122 | s1 = median_filter_no_reshape(x, 3, 3) 123 | # s2 = median_filter_no_reshape(x, 4, 4) 124 | 125 | xs = tf.shape(x) 126 | nb_pixels = xs[0] * xs[1] * xs[2] * xs[3] 127 | samples_mnd = tf.squeeze(tf.multinomial(tf.log([[10., 10.]]), nb_pixels)) 128 | 129 | # return tf.constant([0]*nb_pixels, dtype=tf.int64) 130 | zeros = tf.zeros([nb_pixels], dtype=tf.int64) 131 | ones = tf.ones([nb_pixels], dtype=tf.int64) 132 | # twos = tf.ones([nb_pixels], dtype=tf.int64)*2 133 | # tmp = tf.cast(tf.equal(samples_mnd, tf.zeros([nb_pixels], dtype=tf.int64)), tf.int64) 134 | # return zeros, ones, twos 135 | 136 | selected_0 = tf.cast(tf.equal(samples_mnd, zeros), tf.float64) 137 | selected_1 = tf.cast(tf.equal(samples_mnd, ones), tf.float64) 138 | # selected_2 = tf.cast(tf.equal(samples_mnd, twos), tf.float32) 139 | 140 | # return s0, selected_0 141 | # x_mid = tf.add_n( [tf.multiply(s0, selected_0), tf.multiply(s1, selected_1), tf.multiply(s2, selected_2)] ) 142 | x_mid = tf.add_n( [tf.multiply(s0, selected_0), tf.multiply(s1, selected_1)] ) 143 | 144 | return tf.reshape(x_mid, (xs[0], xs[1], xs[2], xs[3])) 145 | 146 | 147 | if __name__ == '__main__': 148 | import numpy as np 149 | from scipy import ndimage 150 | sess = tf.Session() 151 | 152 | X = tf.placeholder(shape=(None, 4, 4, None), dtype=tf.float32) 153 | f = median_filter(X, 3, 3) 154 | f_rand = median_random_pos_size_filter(X, 3, 3) 155 | l = f[0, 1, 1, 0] 156 | g = tf.gradients([l], [X]) 157 | 158 | vec = np.asarray([[[[0, 16], [1, 17], [2, 18], [3, 19]], 159 | [[4, 20], [5, 21], [6, 22], [7, 23]], 160 | [[8, 24], [9, 25], [10, 26], [11, 27]], 161 | [[12, 28], [13, 29], [14, 30], [15, 31]]]], dtype=np.float32) 162 | vec2 = np.asarray([[[[3, 16], [3, 17], [3, 18], [3, 19]], 163 | [[1, 20], [1, 21], [1, 22], [7, 23]], 164 | [[1, 24], [2, 25], [3, 26], [11, 27]], 165 | [[12, 28], [13, 29], [14, 30], [15, 31]]]], dtype=np.float32) 166 | 167 | print ("vec:", vec) 168 | mnp = ndimage.filters.median_filter(vec, size=(1, 3, 3, 1), mode='reflect') 169 | print ("mnp", mnp) 170 | mtf = sess.run(f, feed_dict={X: vec}) 171 | print ("mtf", mtf) 172 | 173 | 174 | mtf_rand_1 = sess.run(f_rand, feed_dict={X: vec}) 175 | mtf_rand_2 = sess.run(f_rand, feed_dict={X: vec}) 176 | print ("mtf_rand_1", mtf_rand_1) 177 | print ("mtf_rand_2", mtf_rand_2) 178 | 179 | print ("equal", np.array_equal(mnp, mtf)) 180 | print ("equal", np.array_equal(mnp, mtf_rand_1)) 181 | print ("equal", np.array_equal(mtf_rand_1, mtf_rand_2)) 182 | 183 | # print sess.run(g, feed_dict={X: vec}) 184 | 185 | from scipy import misc 186 | 187 | image = misc.imread('panda.png') 188 | images = np.expand_dims(image, axis=0) 189 | 190 | X2 = tf.placeholder(shape=(None, 299, 299, None), dtype=tf.float32) 191 | image_median = median_filter(X2, 3, 3) 192 | image_random_median = median_random_pos_size_filter(X2, 3, 3) 193 | 194 | images_blur = sess.run(image_median, feed_dict={X2: images}) 195 | images_rand_blur = sess.run(image_random_median, feed_dict={X2: images}) 196 | 197 | from PIL import Image 198 | 199 | names = ['panda_orig.png', 'panda_blur_3_3.png', 'panda_rand_blur.png'] 200 | for i, img in enumerate([images, images_blur, images_rand_blur]): 201 | img = Image.fromarray(np.squeeze(img).astype(np.uint8), 'RGB') 202 | img.save(names[i]) 203 | img.show() 204 | 205 | -------------------------------------------------------------------------------- /utils/output.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import os 3 | import sys 4 | 5 | def disablePrint(log_fpath=None): 6 | sys.stdout.flush() 7 | if log_fpath is None: 8 | log_fpath = os.devnull 9 | sys.stdout = open(log_fpath, 'w') 10 | 11 | 12 | def enablePrint(): 13 | sys.stdout.flush() 14 | log_f = sys.stdout 15 | sys.stdout = sys.__stdout__ 16 | log_f.close() 17 | 18 | 19 | def write_to_csv(li, fpath, fieldnames): 20 | with open(fpath, 'w') as csvfile: 21 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames) 22 | writer.writeheader() 23 | for di in li: 24 | writer.writerow(di) 25 | 26 | 27 | def formatter(start, end, step): 28 | return '{}-{}:{}'.format(start, end, step) 29 | 30 | 31 | def format_number_range(lst): 32 | """ 33 | Format a list of numbers to a string of ranges. 34 | Source: https://stackoverflow.com/a/9855078 35 | """ 36 | n = len(lst) 37 | result = [] 38 | scan = 0 39 | while n - scan > 2: 40 | step = lst[scan + 1] - lst[scan] 41 | if lst[scan + 2] - lst[scan + 1] != step: 42 | result.append(str(lst[scan])) 43 | scan += 1 44 | continue 45 | 46 | for j in range(scan+2, n-1): 47 | if lst[j+1] - lst[j] != step: 48 | result.append(formatter(lst[scan], lst[j], step)) 49 | scan = j+1 50 | break 51 | else: 52 | result.append(formatter(lst[scan], lst[-1], step)) 53 | return ','.join(result) 54 | 55 | if n - scan == 1: 56 | result.append(str(lst[scan])) 57 | elif n - scan == 2: 58 | result.append(','.join(map(str, lst[scan:]))) 59 | 60 | return ','.join(result) 61 | 62 | 63 | def save_task_descriptor(result_folder, to_csv): 64 | task = to_csv[0] 65 | fname = "%s_%d_%s_%s_task_desc.csv" % (task['dataset_name'], task['test_set_selected_length'], task['test_set_selected_idx_hash'][:5], task['model_name']) 66 | fpath = os.path.join(result_folder, fname) 67 | 68 | fieldnames = ['dataset_name', 'model_name', 'accuracy_test', 'mean_confidence_test', \ 69 | 'test_set_selected_length', 'test_set_selected_idx_ranges', 'test_set_selected_idx_hash', \ 70 | 'accuracy_test_selected', 'mean_confidence_test_selected'] 71 | write_to_csv(to_csv, fpath, fieldnames) 72 | -------------------------------------------------------------------------------- /utils/parameter_parser.py: -------------------------------------------------------------------------------- 1 | from future.standard_library import install_aliases 2 | install_aliases() 3 | from urllib import parse as urlparse 4 | 5 | def isfloat(value): 6 | try: 7 | float(value) 8 | return True 9 | except: 10 | return False 11 | 12 | 13 | def parse_params(params_str): 14 | if '?' not in params_str: 15 | return params_str, {} 16 | subject, params_str = params_str.split('?') 17 | params = urlparse.parse_qs(params_str) 18 | params = dict( (k, v.lower() if len(v)>1 else v[0] ) for k,v in params.items()) 19 | 20 | # Data type conversion. 21 | integer_parameter_names = ['batch_size', 'max_iterations', 'num_classes', 'max_iter', 'nb_iter', 'max_iter_df'] 22 | for k,v in params.items(): 23 | if k in integer_parameter_names: 24 | params[k] = int(v) 25 | elif v == 'true': 26 | params[k] = True 27 | elif v == 'false': 28 | params[k] = False 29 | elif v == 'inf': 30 | params[k] = np.inf 31 | elif isfloat(v): 32 | params[k] = float(v) 33 | 34 | return subject, params -------------------------------------------------------------------------------- /utils/squeeze.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from scipy import ndimage 4 | 5 | from .median import median_filter as median_filter_tf 6 | from .median import median_random_filter as median_random_filter_tf 7 | from .median import median_random_pos_size_filter as median_random_pos_size_filter_tf 8 | from .median import median_random_size_filter as median_random_size_filter_tf 9 | 10 | 11 | def reduce_precision_py(x, npp): 12 | """ 13 | Reduce the precision of image, the numpy version. 14 | :param x: a float tensor, which has been scaled to [0, 1]. 15 | :param npp: number of possible values per pixel. E.g. it's 256 for 8-bit gray-scale image, and 2 for binarized image. 16 | :return: a tensor representing image(s) with lower precision. 17 | """ 18 | # Note: 0 is a possible value too. 19 | npp_int = npp - 1 20 | x_int = np.rint(x * npp_int) 21 | x_float = x_int / npp_int 22 | return x_float 23 | 24 | def reduce_precision_tf(x, npp): 25 | """ 26 | Reduce the precision of image, the tensorflow version. 27 | """ 28 | npp_int = npp - 1 29 | x_int = tf.rint(tf.multiply(x, npp_int)) 30 | x_float = tf.div(x_int, npp_int) 31 | return x_float 32 | 33 | 34 | def bit_depth_py(x, bits): 35 | precisions = 2**bits 36 | return reduce_precision_py(x, precisions) 37 | 38 | def bit_depth_tf(x, bits): 39 | precisions = 2**bits 40 | return reduce_precision_tf(x, precisions) 41 | 42 | def bit_depth_random_py(x, bits, stddev): 43 | if stddev == 0.: 44 | rand_array = np.zeros(x.shape) 45 | else: 46 | rand_array = np.random.normal(loc=0., scale=stddev, size=x.shape) 47 | x_random = np.add(x, rand_array) 48 | return bit_depth_py(x_random, bits) 49 | 50 | def bit_depth_random_tf(x, bits, stddev): 51 | rand_ts = tf.random_normal(x.get_shape(), mean=0, stddev=stddev) 52 | x_random = tf.add(x, rand_tf) 53 | return bit_depth_tf(x_random, bits) 54 | 55 | def binary_filter_tf(x, threshold): 56 | x_bin = tf.nn.relu(tf.sign(x-threshold)) 57 | return x_bin 58 | 59 | def binary_filter_py(x, threshold): 60 | x_bin = np.maximum(np.sign(x-threshold), 0) 61 | return x_bin 62 | 63 | def binary_random_filter_tf(x, threshold, stddev=0.125): 64 | rand_ts = tf.random_normal(x.get_shape(), mean=0, stddev=stddev) 65 | x_bin = tf.nn.relu(tf.sign(tf.add(x,rand_ts)-threshold)) 66 | return x_bin 67 | 68 | def binary_random_filter_py(x, threshold, stddev=0.125): 69 | if stddev == 0.: 70 | rand_array = np.zeros(x.shape) 71 | else: 72 | rand_array = np.random.normal(loc=0., scale=stddev, size=x.shape) 73 | x_bin = np.maximum(np.sign(np.add(x, rand_array)-threshold), 0) 74 | return x_bin 75 | 76 | 77 | def median_filter_py(x, width, height=-1): 78 | """ 79 | Median smoothing by Scipy. 80 | :param x: a tensor of image(s) 81 | :param width: the width of the sliding window (number of pixels) 82 | :param height: the height of the window. The same as width by default. 83 | :return: a modified tensor with the same shape as x. 84 | """ 85 | if height == -1: 86 | height = width 87 | return ndimage.filters.median_filter(x, size=(1,width,height,1), mode='reflect') 88 | 89 | def median_random_filter_py(x, width, height=-1): 90 | # assert False 91 | init_op = tf.initialize_all_variables() 92 | with tf.Session() as sess: 93 | sess.run(init_op) 94 | x = tf.constant(x) 95 | res = median_random_filter_tf(x, width, height) 96 | return res.eval() 97 | 98 | def median_random_pos_size_filter_py(x, width, height=-1): 99 | # assert False 100 | init_op = tf.initialize_all_variables() 101 | with tf.Session() as sess: 102 | sess.run(init_op) 103 | x = tf.constant(x) 104 | res = median_random_pos_size_filter_tf(x, width, height) 105 | return res.eval() 106 | 107 | def median_random_size_filter_py(x, width, height=-1): 108 | # assert False 109 | init_op = tf.global_variables_initializer() 110 | with tf.Session() as sess: 111 | sess.run(init_op) 112 | x = tf.constant(x) 113 | res = median_random_size_filter_tf(x, width, height) 114 | return res.eval() 115 | 116 | 117 | # Squeezers implemented in OpenCV 118 | # OpenCV expects uint8 as image data type. 119 | def opencv_wrapper(imgs, opencv_func, argv): 120 | ret_imgs = [] 121 | imgs_copy = imgs 122 | 123 | if imgs.shape[3] == 1: 124 | imgs_copy = np.squeeze(imgs) 125 | 126 | for img in imgs_copy: 127 | img_uint8 = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8) 128 | ret_img = opencv_func(*[img_uint8]+argv) 129 | if type(ret_img) == tuple: 130 | ret_img = ret_img[1] 131 | ret_img = ret_img.astype(np.float32) / 255. 132 | ret_imgs.append(ret_img) 133 | ret_imgs = np.stack(ret_imgs) 134 | 135 | if imgs.shape[3] == 1: 136 | ret_imgs = np.expand_dims(ret_imgs, axis=3) 137 | 138 | return ret_imgs 139 | 140 | 141 | # Binary filters. 142 | def adaptive_binarize_py(x, block_size=5, C=33.8): 143 | "Works like an edge detector." 144 | # ADAPTIVE_THRESH_GAUSSIAN_C, ADAPTIVE_THRESH_MEAN_C 145 | # THRESH_BINARY, THRESH_BINARY_INV 146 | import cv2 147 | ret_imgs = opencv_wrapper(x, cv2.adaptiveThreshold, [255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, block_size, C]) 148 | return ret_imgs 149 | 150 | def otsu_binarize_py(x): 151 | # func = lambda img: cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] 152 | # return opencv_binarize(x, func) 153 | import cv2 154 | ret_imgs = opencv_wrapper(x, cv2.threshold, [0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU]) 155 | return ret_imgs 156 | 157 | 158 | # Non-local Means 159 | def non_local_means_color_py(imgs, search_window, block_size, photo_render): 160 | import cv2 161 | ret_imgs = opencv_wrapper(imgs, cv2.fastNlMeansDenoisingColored, [None,photo_render,photo_render,block_size,search_window]) 162 | return ret_imgs 163 | 164 | def non_local_means_color_tf(imgs, search_window, block_size, photo_render): 165 | my_func = lambda x: non_local_means_color_py(x, search_window, block_size, photo_render) 166 | y = tf.py_func(my_func, [imgs], tf.float32, stateful=False) 167 | return y 168 | 169 | def non_local_means_bw_py(imgs, search_window, block_size, photo_render): 170 | import cv2 171 | ret_imgs = opencv_wrapper(imgs, cv2.fastNlMeansDenoising, [None,photo_render,block_size,search_window]) 172 | return ret_imgs 173 | 174 | def non_local_means_bw_tf(imgs, search_window, block_size, photo_render): 175 | my_func = lambda x: non_local_means_bw_py(x, search_window, block_size, photo_render) 176 | y = tf.py_func(my_func, [imgs], tf.float32, stateful=False) 177 | return y 178 | 179 | 180 | def bilateral_filter_py(imgs, d, sigmaSpace, sigmaColor): 181 | """ 182 | :param d: Diameter of each pixel neighborhood that is used during filtering. 183 | If it is non-positive, it is computed from sigmaSpace. 184 | :param sigmaSpace: Filter sigma in the coordinate space. 185 | A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). 186 | When d>0, it specifies the neighborhood size regardless of sigmaSpace. 187 | Otherwise, d is proportional to sigmaSpace. 188 | :param sigmaColor: Filter sigma in the color space. 189 | A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting in larger areas of semi-equal color. 190 | """ 191 | import cv2 192 | return opencv_wrapper(imgs, cv2.bilateralFilter, [d, sigmaColor, sigmaSpace]) 193 | 194 | def bilateral_filter_tf(imgs, d, sigmaSpace, sigmaColor): 195 | my_func = lambda x: bilateral_filter_py(x, d, sigmaSpace, sigmaColor) 196 | y = tf.py_func(my_func, [imgs], tf.float32, stateful=False) 197 | return y 198 | 199 | 200 | # Adaptive Bilateral Filter 201 | # https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#adaptivebilateralfilter 202 | # Removed in OpenCV > 3.0. 203 | def adaptive_bilateral_filter_py(imgs, ksize, sigmaSpace, maxSigmaColor=20.0): 204 | import cv2 205 | return opencv_wrapper(imgs, cv2.adaptiveBilateralFilter, [(ksize,ksize), sigmaSpace, maxSigmaColor]) 206 | 207 | def adaptive_bilateral_filter_tf(imgs, ksize, sigmaSpace, maxSigmaColor=20.0): 208 | my_func = lambda x: adaptive_bilateral_filter_py(x, ksize, sigmaSpace, maxSigmaColor) 209 | y = tf.py_func(my_func, [imgs], tf.float32, stateful=False) 210 | return y 211 | 212 | 213 | none_tf = none_py = lambda x:x 214 | 215 | 216 | import sys, os 217 | project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 218 | sys.path.append(project_path) 219 | 220 | from externals.MagNet.worker import SimpleReformer 221 | mnist_autoencoder_fpath = os.path.join(project_path, "downloads/MagNet/defensive_models/MNIST_I") 222 | cifar10_autoencoder_fpath = os.path.join(project_path, "downloads/MagNet/defensive_models/CIFAR") 223 | 224 | reformer_mnist = SimpleReformer(mnist_autoencoder_fpath) 225 | reformer_cifar10 = SimpleReformer(cifar10_autoencoder_fpath) 226 | 227 | def magnet_mnist_py(imgs): 228 | return reformer_mnist.heal(imgs) 229 | 230 | def magnet_cifar10_py(imgs): 231 | return reformer_cifar10.heal(imgs) 232 | 233 | # Construct a name search function. 234 | def isfloat(value): 235 | try: 236 | float(value) 237 | return True 238 | except: 239 | return False 240 | 241 | def parse_params(params_str): 242 | params = [] 243 | 244 | for param in params_str.split('_'): 245 | param = param.strip() 246 | if param.isdigit(): 247 | param = int(param) 248 | elif isfloat(param): 249 | param = float(param) 250 | else: 251 | continue 252 | params.append(param) 253 | 254 | return params 255 | 256 | def get_squeezer_by_name(name, func_type): 257 | squeezer_list = ['none', 258 | 'bit_depth_random', 259 | 'bit_depth', 260 | 'binary_filter', 261 | 'binary_random_filter', 262 | 'adaptive_binarize', 263 | 'otsu_binarize', 264 | 'median_filter', 265 | 'median_random_filter', 266 | 'median_random_size_filter', 267 | 'non_local_means_bw', 268 | 'non_local_means_color', 269 | 'adaptive_bilateral_filter', 270 | 'bilateral_filter', 271 | 'magnet_mnist', 272 | 'magnet_cifar10', 273 | ] 274 | 275 | for squeezer_name in squeezer_list: 276 | if name.startswith(squeezer_name): 277 | func_name = "%s_py" % squeezer_name if func_type=='python' else "%s_tf" % squeezer_name 278 | params_str = name[len(squeezer_name):] 279 | 280 | # Return a list 281 | args = parse_params(params_str) 282 | # print ("params_str: %s, args: %s" % (params_str, args)) 283 | 284 | return lambda x: globals()[func_name](*([x]+args)) 285 | 286 | raise Exception('Unknown squeezer name: %s' % name) 287 | 288 | def get_sequential_squeezers_by_name(squeezers_name): 289 | # example_squeezers_name = "binary_filter_0.5,median_smoothing_2_2" 290 | squeeze_func = None 291 | for squeezer_name in squeezers_name.split(','): 292 | squeezer = get_squeezer_by_name(squeezer_name, 'python') 293 | 294 | if squeeze_func == None: 295 | squeeze_func = lambda x: squeezer(x) 296 | else: 297 | old_func = squeeze_func 298 | squeeze_func = lambda x: squeezer(old_func(x)) 299 | return squeeze_func 300 | -------------------------------------------------------------------------------- /utils/visualization.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | def draw_plot(xs, series_list, label_list, fname): 3 | fig, ax = plt.subplots() 4 | 5 | for i,series in enumerate(series_list): 6 | smask = np.isfinite(series) 7 | ax.plot(xs[smask], series[smask], linestyle='-', marker='o', label=label_list[i]) 8 | 9 | legend = ax.legend(loc='best', shadow=True) 10 | 11 | # The frame is matplotlib.patches.Rectangle instance surrounding the legend. 12 | frame = legend.get_frame() 13 | frame.set_facecolor('0.90') 14 | 15 | # plt.show() 16 | plt.savefig(fname) 17 | plt.close(fig) --------------------------------------------------------------------------------