The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .github
    ├── FUNDING.yml
    ├── ISSUE_TEMPLATE
    │   ├── bug_report.md
    │   ├── feature_request.md
    │   └── general-question.md
    └── workflows
    │   ├── docs.yml
    │   ├── guide.yml
    │   ├── pypi.yml
    │   └── tests.yml
├── .gitignore
├── .pre-commit-config.yaml
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.rst
├── docs
    ├── Makefile
    ├── conf.py
    ├── index.rst
    ├── make.bat
    ├── modules
    │   ├── attacks.rst
    │   ├── criteria.rst
    │   ├── devutils.rst
    │   ├── distances.rst
    │   ├── models.rst
    │   ├── plot.rst
    │   ├── tensorboard.rst
    │   ├── types.rst
    │   ├── utils.rst
    │   └── zoo.rst
    └── requirements.txt
├── examples
    ├── README.md
    ├── eot_attack_pytorch_resnet18.py
    ├── multiple_attacks_pytorch_resnet18.py
    ├── single_attack_pytorch_resnet18.py
    ├── single_attack_tensorflow_resnet50.py
    ├── spatial_attack_pytorch_resnet18.py
    ├── substituion_model_pytorch_resnet18.py
    └── zoo
    │   └── mnist
    │       ├── foolbox_model.py
    │       └── mnist_cnn.pth
├── foolbox
    ├── VERSION
    ├── __init__.py
    ├── attacks
    │   ├── __init__.py
    │   ├── additive_noise.py
    │   ├── base.py
    │   ├── basic_iterative_method.py
    │   ├── binarization.py
    │   ├── blended_noise.py
    │   ├── blur.py
    │   ├── boundary_attack.py
    │   ├── brendel_bethge.py
    │   ├── carlini_wagner.py
    │   ├── contrast.py
    │   ├── contrast_min.py
    │   ├── dataset_attack.py
    │   ├── ddn.py
    │   ├── deepfool.py
    │   ├── ead.py
    │   ├── fast_gradient_method.py
    │   ├── fast_minimum_norm.py
    │   ├── gen_attack.py
    │   ├── gen_attack_utils.py
    │   ├── gradient_descent_base.py
    │   ├── hop_skip_jump.py
    │   ├── inversion.py
    │   ├── mi_fgsm.py
    │   ├── newtonfool.py
    │   ├── pointwise.py
    │   ├── projected_gradient_descent.py
    │   ├── saltandpepper.py
    │   ├── sparse_l1_descent_attack.py
    │   ├── spatial_attack.py
    │   ├── spatial_attack_transformations.py
    │   └── virtual_adversarial_attack.py
    ├── criteria.py
    ├── data
    │   ├── cifar100_00_49.png
    │   ├── cifar100_01_33.png
    │   ├── cifar100_02_72.png
    │   ├── cifar100_03_51.png
    │   ├── cifar100_04_71.png
    │   ├── cifar100_05_92.png
    │   ├── cifar100_06_15.png
    │   ├── cifar100_07_14.png
    │   ├── cifar100_08_23.png
    │   ├── cifar100_09_0.png
    │   ├── cifar100_10_71.png
    │   ├── cifar100_11_75.png
    │   ├── cifar100_12_81.png
    │   ├── cifar100_13_69.png
    │   ├── cifar100_14_40.png
    │   ├── cifar100_15_43.png
    │   ├── cifar100_16_92.png
    │   ├── cifar100_17_97.png
    │   ├── cifar100_18_70.png
    │   ├── cifar100_19_53.png
    │   ├── cifar10_00_3.png
    │   ├── cifar10_01_8.png
    │   ├── cifar10_02_8.png
    │   ├── cifar10_03_0.png
    │   ├── cifar10_04_6.png
    │   ├── cifar10_05_6.png
    │   ├── cifar10_06_1.png
    │   ├── cifar10_07_6.png
    │   ├── cifar10_08_3.png
    │   ├── cifar10_09_1.png
    │   ├── cifar10_10_0.png
    │   ├── cifar10_11_9.png
    │   ├── cifar10_12_5.png
    │   ├── cifar10_13_7.png
    │   ├── cifar10_14_9.png
    │   ├── cifar10_15_8.png
    │   ├── cifar10_16_5.png
    │   ├── cifar10_17_7.png
    │   ├── cifar10_18_8.png
    │   ├── cifar10_19_6.png
    │   ├── fashionMNIST_00_9.png
    │   ├── fashionMNIST_01_2.png
    │   ├── fashionMNIST_02_1.png
    │   ├── fashionMNIST_03_1.png
    │   ├── fashionMNIST_04_6.png
    │   ├── fashionMNIST_05_1.png
    │   ├── fashionMNIST_06_4.png
    │   ├── fashionMNIST_07_6.png
    │   ├── fashionMNIST_08_5.png
    │   ├── fashionMNIST_09_7.png
    │   ├── fashionMNIST_10_4.png
    │   ├── fashionMNIST_11_5.png
    │   ├── fashionMNIST_12_7.png
    │   ├── fashionMNIST_13_3.png
    │   ├── fashionMNIST_14_4.png
    │   ├── fashionMNIST_15_1.png
    │   ├── fashionMNIST_16_2.png
    │   ├── fashionMNIST_17_4.png
    │   ├── fashionMNIST_18_8.png
    │   ├── fashionMNIST_19_0.png
    │   ├── imagenet_00_243.jpg
    │   ├── imagenet_01_559.jpg
    │   ├── imagenet_02_438.jpg
    │   ├── imagenet_03_990.jpg
    │   ├── imagenet_04_949.jpg
    │   ├── imagenet_05_853.jpg
    │   ├── imagenet_06_609.jpg
    │   ├── imagenet_07_609.jpg
    │   ├── imagenet_08_915.jpg
    │   ├── imagenet_09_455.jpg
    │   ├── imagenet_10_541.jpg
    │   ├── imagenet_11_630.jpg
    │   ├── imagenet_12_741.jpg
    │   ├── imagenet_13_471.jpg
    │   ├── imagenet_14_129.jpg
    │   ├── imagenet_15_99.jpg
    │   ├── imagenet_16_251.jpg
    │   ├── imagenet_17_22.jpg
    │   ├── imagenet_18_317.jpg
    │   ├── imagenet_19_305.jpg
    │   ├── mnist_00_7.png
    │   ├── mnist_01_2.png
    │   ├── mnist_02_1.png
    │   ├── mnist_03_0.png
    │   ├── mnist_04_4.png
    │   ├── mnist_05_1.png
    │   ├── mnist_06_4.png
    │   ├── mnist_07_9.png
    │   ├── mnist_08_5.png
    │   ├── mnist_09_9.png
    │   ├── mnist_10_0.png
    │   ├── mnist_11_6.png
    │   ├── mnist_12_9.png
    │   ├── mnist_13_0.png
    │   ├── mnist_14_1.png
    │   ├── mnist_15_5.png
    │   ├── mnist_16_9.png
    │   ├── mnist_17_7.png
    │   ├── mnist_18_3.png
    │   └── mnist_19_4.png
    ├── devutils.py
    ├── distances.py
    ├── external
    │   ├── LICENSE
    │   ├── README.rst
    │   ├── __init__.py
    │   └── clipping_aware_rescaling.py
    ├── gradient_estimators.py
    ├── models
    │   ├── __init__.py
    │   ├── base.py
    │   ├── jax.py
    │   ├── numpy.py
    │   ├── pytorch.py
    │   ├── tensorflow.py
    │   └── wrappers.py
    ├── plot.py
    ├── py.typed
    ├── tensorboard.py
    ├── types.py
    ├── utils.py
    └── zoo
    │   ├── __init__.py
    │   ├── common.py
    │   ├── git_cloner.py
    │   ├── model_loader.py
    │   ├── weights_fetcher.py
    │   └── zoo.py
├── guide
    ├── .vuepress
    │   ├── config.js
    │   └── public
    │   │   ├── CNAME
    │   │   ├── logo.png
    │   │   ├── logo_alpha.png
    │   │   └── logo_small.png
    ├── README.md
    └── guide
    │   ├── README.md
    │   ├── adding_attacks.md
    │   ├── development.md
    │   ├── examples.md
    │   └── getting-started.md
├── paper
    ├── paper.bib
    └── paper.md
├── performance
    ├── README.md
    ├── foolbox_1.8.0.ipynb
    ├── foolbox_2.4.0.ipynb
    ├── foolbox_3.1.1.ipynb
    ├── images.npy
    └── labels.npy
├── pyproject.toml
├── readthedocs.yml
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests
    ├── conftest.py
    ├── requirements.txt
    ├── test_attacks.py
    ├── test_attacks_base.py
    ├── test_attacks_raise.py
    ├── test_binarization_attack.py
    ├── test_brendel_bethge_attack.py
    ├── test_criteria.py
    ├── test_dataset_attack.py
    ├── test_devutils.py
    ├── test_distances.py
    ├── test_eot_wrapper.py
    ├── test_fast_minimum_norm_attack.py
    ├── test_fetch_weights.py
    ├── test_gen_attack_utils.py
    ├── test_hsj_attack.py
    ├── test_models.py
    ├── test_plot.py
    ├── test_pointwise_attack.py
    ├── test_spatial_attack.py
    ├── test_tensorboard.py
    ├── test_utils.py
    └── test_zoo.py


/.github/FUNDING.yml:
--------------------------------------------------------------------------------
 1 | # These are supported funding model platforms
 2 | 
 3 | # github: [jonasrauber]
 4 | patreon: # Replace with a single Patreon username
 5 | open_collective: # Replace with a single Open Collective username
 6 | ko_fi: # Replace with a single Ko-fi username
 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
 9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | custom: ["paypal.me/jonasrauber"]
13 | 


--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
 1 | ---
 2 | name: Bug report
 3 | about: Create a report to help us improve
 4 | title: ''
 5 | labels: ''
 6 | assignees: ''
 7 | 
 8 | ---
 9 | 
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 | 
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 
16 | 
17 | **Expected behavior**
18 | A clear and concise description of what you expected to happen.
19 | 
20 | 
21 | **Software (please complete the following information):**
22 |  - Foolbox version: 
23 |  -  PyTorch version (if applicable):
24 |  -  TensorFlow version (if applicable):
25 |  -  JAX version (if applicable):
26 | 
27 | 
28 | **Additional context**
29 | Add any other context about the problem here.
30 | 


--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
 1 | ---
 2 | name: Feature request
 3 | about: Suggest an idea for this project
 4 | title: ''
 5 | labels: ''
 6 | assignees: ''
 7 | 
 8 | ---
 9 | 
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 | 
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 | 
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 | 
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 | 


--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/general-question.md:
--------------------------------------------------------------------------------
 1 | ---
 2 | name: General question
 3 | about: Ask a general question about foolbox
 4 | title: ''
 5 | labels: ''
 6 | assignees: ''
 7 | 
 8 | ---
 9 | 
10 | 
11 | 


--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
 1 | name: Docs
 2 | 
 3 | on:
 4 |   push:
 5 |     branches:
 6 |       - master
 7 |   pull_request:
 8 | 
 9 | jobs:
10 |   build:
11 | 
12 |     runs-on: ubuntu-latest
13 | 
14 |     steps:
15 |     - uses: actions/checkout@v2
16 |     - uses: actions/cache@v1
17 |       with:
18 |         path: ~/.cache/pip
19 |         key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
20 |         restore-keys: |
21 |           ${{ runner.os }}-pip-
22 |     - name: Set up Python 3.8
23 |       uses: actions/setup-python@v1
24 |       with:
25 |         python-version: 3.9
26 |     - name: Install package
27 |       run: |
28 |         pip install -e .
29 |     - name: Install requirements.txt
30 |       run: |
31 |         pip install -r requirements.txt
32 |     - name: Install docs/requirements.txt
33 |       run: |
34 |         python -m pip install --upgrade pip setuptools
35 |         pip install -r docs/requirements.txt
36 |     - name: sphinx
37 |       run: |
38 |         cd docs && make html
39 | 


--------------------------------------------------------------------------------
/.github/workflows/guide.yml:
--------------------------------------------------------------------------------
 1 | name: Guide
 2 | 
 3 | on:
 4 |   push:
 5 |     branches:
 6 |       - master
 7 | 
 8 | jobs:
 9 |   vuepress:
10 |     runs-on: ubuntu-latest
11 |     steps:
12 |     - uses: actions/checkout@v2
13 |     - name: Install vuepress
14 |       run: |
15 |         sudo apt update
16 |         sudo apt install yarn -y
17 |         yarn global add vuepress
18 |         yarn add vue-template-compiler
19 |     - name: Build
20 |       run: |
21 |         export NODE_OPTIONS=--openssl-legacy-provider
22 |         vuepress build
23 |       working-directory: ./guide
24 |     - name: Push
25 |       env:
26 |         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
27 |       run: |
28 |         git init
29 |         git config user.name "GitHub Actions Bot"
30 |         git config user.email "<>"
31 |         git add -A
32 |         git commit -m 'deploy'
33 |         git push -f https://x-access-token:${GITHUB_TOKEN}@github.com/bethgelab/foolbox.git master:gh-pages
34 |       working-directory: ./guide/.vuepress/dist
35 | 


--------------------------------------------------------------------------------
/.github/workflows/pypi.yml:
--------------------------------------------------------------------------------
 1 | name: PyPI
 2 | 
 3 | on:
 4 |   release:
 5 |     types: [created]
 6 | 
 7 | jobs:
 8 |   deploy:
 9 |     runs-on: ubuntu-latest
10 |     steps:
11 |     - uses: actions/checkout@v1
12 |     - name: Set up Python
13 |       uses: actions/setup-python@v1
14 |       with:
15 |         python-version: '3.9'
16 |     - name: Install dependencies
17 |       run: |
18 |         python -m pip install --upgrade pip
19 |         pip install setuptools wheel twine
20 |     - name: Build and publish
21 |       env:
22 |         TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
23 |         TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
24 |       run: |
25 |         python setup.py sdist bdist_wheel
26 |         twine upload dist/*
27 | 


--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
  1 | name: Tests
  2 | 
  3 | on:
  4 |   push:
  5 |     branches:
  6 |       - master
  7 |   pull_request:
  8 | 
  9 | jobs:
 10 |   build:
 11 | 
 12 |     runs-on: ubuntu-latest
 13 |     strategy:
 14 |       max-parallel: 20
 15 |       matrix:
 16 |         python-version: ['3.9', '3.10']
 17 |         backend: ["none", "pytorch", "tensorflow", "jax", "numpy"]
 18 | 
 19 |     steps:
 20 |     - uses: actions/checkout@v2
 21 |     - uses: actions/cache@v1
 22 |       with:
 23 |         path: ~/.cache/pip
 24 |         key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
 25 |         restore-keys: |
 26 |           ${{ runner.os }}-pip-
 27 |     - uses: actions/cache@v1
 28 |       with:
 29 |         path: ~/.cache/torch
 30 |         key: ${{ runner.os }}-cache-torch
 31 |     - uses: actions/cache@v1
 32 |       with:
 33 |         path: ~/.torch
 34 |         key: ${{ runner.os }}-torch
 35 |     - uses: actions/cache@v1
 36 |       with:
 37 |         path: ~/.keras
 38 |         key: ${{ runner.os }}-keras
 39 | #   - name: debugging
 40 | #     run: |
 41 | #       python3 -c 'from urllib.request import urlopen; r = urlopen("https://download.pytorch.org/models/resnet18-5c106cde.pth"); print(r.status, r.reason, r.msg)' || true
 42 | #   - name: workaround for https://github.com/pytorch/vision/issues/1876
 43 | #     uses: wei/wget@v1
 44 | #     with:
 45 | #         args: https://download.pytorch.org/models/resnet18-5c106cde.pth
 46 | #   - name: workaround (step 2)
 47 | #     run: |
 48 | #       mkdir -p ~/.cache/torch/checkpoints || true
 49 | #       mv resnet18-5c106cde.pth ~/.cache/torch/checkpoints || true
 50 |     - name: Set up Python ${{ matrix.python-version }}
 51 |       uses: actions/setup-python@v1
 52 |       with:
 53 |         python-version: ${{ matrix.python-version }}
 54 |     - name: Install requirements.txt
 55 |       run: |
 56 |         function retry-with-backoff() {
 57 |           for BACKOFF in 0 1 2 4 8 16 32 64; do
 58 |             sleep $BACKOFF
 59 |             if "$@"; then
 60 |               return 0
 61 |             fi
 62 |           done
 63 |           return 1
 64 |         }
 65 | 
 66 |         python -m pip install --upgrade pip setuptools wheel
 67 |         retry-with-backoff pip install -r requirements.txt
 68 |     - name: flake8
 69 |       run: |
 70 |         flake8 . --count --show-source --statistics
 71 |     - name: black
 72 |       run: |
 73 |         black --check --verbose .
 74 |     - name: Install package
 75 |       run: |
 76 |         pip install -e .
 77 |     - name: Install tests/requirements.txt
 78 |       run: |
 79 |         function retry-with-backoff() {
 80 |           for BACKOFF in 0 1 2 4 8 16 32 64; do
 81 |             sleep $BACKOFF
 82 |             if "$@"; then
 83 |               return 0
 84 |             fi
 85 |           done
 86 |           return 1
 87 |         }
 88 | 
 89 |         retry-with-backoff pip install -r tests/requirements.txt
 90 |     - name: mypy (package)
 91 |       run: |
 92 |         mypy --install-types --non-interactive foolbox/
 93 |         mypy -p foolbox
 94 |     - name: mypy (tests)
 95 |       run: |
 96 |         mypy tests/
 97 |     - name: Test with pytest (backend ${{ matrix.backend }})
 98 |       run: |
 99 |         pytest --durations=0 --cov-report term-missing --cov=foolbox --verbose --backend ${{ matrix.backend }}
100 |     - name: Codecov
101 |       continue-on-error: true
102 |       env:
103 |           CODECOV_TOKEN: "60c0d7ac-8ec1-47c6-b3b1-ac2ad2dea76f"
104 |       run: |
105 |         codecov
106 |     - name: Coveralls
107 |       continue-on-error: true
108 |       env:
109 |         COVERALLS_REPO_TOKEN: "2r76Cn01kW1sSkEirJ3SRpp478NJtPNdA"
110 |       run: |
111 |         coveralls
112 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
  1 | .mypy_cache
  2 | .pytest_cache/
  3 | 
  4 | # Created by https://www.gitignore.io/api/osx,vim,linux,python
  5 | 
  6 | ### Linux ###
  7 | *~
  8 | 
  9 | # temporary files which can be created if a process still has a handle open of a deleted file
 10 | .fuse_hidden*
 11 | 
 12 | # KDE directory preferences
 13 | .directory
 14 | 
 15 | # Linux trash folder which might appear on any partition or disk
 16 | .Trash-*
 17 | 
 18 | # .nfs files are created when an open file is removed but is still being accessed
 19 | .nfs*
 20 | 
 21 | ### OSX ###
 22 | *.DS_Store
 23 | .AppleDouble
 24 | .LSOverride
 25 | 
 26 | # Icon must end with two \r
 27 | Icon
 28 | 
 29 | # Thumbnails
 30 | ._*
 31 | 
 32 | # Files that might appear in the root of a volume
 33 | .DocumentRevisions-V100
 34 | .fseventsd
 35 | .Spotlight-V100
 36 | .TemporaryItems
 37 | .Trashes
 38 | .VolumeIcon.icns
 39 | .com.apple.timemachine.donotpresent
 40 | 
 41 | # Directories potentially created on remote AFP share
 42 | .AppleDB
 43 | .AppleDesktop
 44 | Network Trash Folder
 45 | Temporary Items
 46 | .apdisk
 47 | 
 48 | ### Python ###
 49 | # Byte-compiled / optimized / DLL files
 50 | __pycache__/
 51 | *.py[cod]
 52 | *$py.class
 53 | 
 54 | # C extensions
 55 | *.so
 56 | 
 57 | # Distribution / packaging
 58 | .Python
 59 | env/
 60 | build/
 61 | develop-eggs/
 62 | dist/
 63 | downloads/
 64 | eggs/
 65 | .eggs/
 66 | lib/
 67 | lib64/
 68 | parts/
 69 | sdist/
 70 | var/
 71 | wheels/
 72 | *.egg-info/
 73 | .installed.cfg
 74 | *.egg
 75 | 
 76 | # PyInstaller
 77 | #  Usually these files are written by a python script from a template
 78 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 79 | *.manifest
 80 | *.spec
 81 | 
 82 | # Installer logs
 83 | pip-log.txt
 84 | pip-delete-this-directory.txt
 85 | 
 86 | # Unit test / coverage reports
 87 | htmlcov/
 88 | .tox/
 89 | .coverage
 90 | .coverage.*
 91 | .cache
 92 | nosetests.xml
 93 | coverage.xml
 94 | *,cover
 95 | .hypothesis/
 96 | 
 97 | # Translations
 98 | *.mo
 99 | *.pot
100 | 
101 | # Django stuff:
102 | *.log
103 | local_settings.py
104 | 
105 | # Flask stuff:
106 | instance/
107 | .webassets-cache
108 | 
109 | # Scrapy stuff:
110 | .scrapy
111 | 
112 | # Sphinx documentation
113 | docs/_build/
114 | 
115 | # PyBuilder
116 | target/
117 | 
118 | # Jupyter Notebook
119 | .ipynb_checkpoints
120 | 
121 | # pyenv
122 | .python-version
123 | 
124 | # celery beat schedule file
125 | celerybeat-schedule
126 | 
127 | # SageMath parsed files
128 | *.sage.py
129 | 
130 | # dotenv
131 | .env
132 | 
133 | # virtualenv
134 | .venv
135 | venv/
136 | ENV/
137 | 
138 | # Spyder project settings
139 | .spyderproject
140 | .spyproject
141 | 
142 | # Rope project settings
143 | .ropeproject
144 | 
145 | # mkdocs documentation
146 | /site
147 | 
148 | ### Vim ###
149 | # swap
150 | [._]*.s[a-v][a-z]
151 | [._]*.sw[a-p]
152 | [._]s[a-v][a-z]
153 | [._]sw[a-p]
154 | # session
155 | Session.vim
156 | # temporary
157 | .netrwhist
158 | # auto-generated tag files
159 | tags
160 | 
161 | # PyCharm
162 | .idea/
163 | 
164 | # Visual Studio Code
165 | .vscode/
166 | 
167 | # End of https://www.gitignore.io/api/osx,vim,linux,python
168 | 


--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
 1 | repos:
 2 | - repo: https://github.com/ambv/black
 3 |   rev: 22.3.0
 4 |   hooks:
 5 |   - id: black
 6 |     language_version: python3.8
 7 |     additional_dependencies: [ 'click==8.0.4' ]
 8 | - repo: https://gitlab.com/pycqa/flake8
 9 |   rev: 4.0.1
10 |   hooks:
11 |   - id: flake8
12 | - repo: https://github.com/pre-commit/mirrors-mypy
13 |   rev: 'bdfdfda2221c4fd123dbc9ac0f2074951bd5af58'
14 |   hooks:
15 |   - id: mypy


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
 1 | MIT License
 2 | 
 3 | Copyright (c) 2020 Jonas Rauber et al.
 4 | 
 5 | Permission is hereby granted, free of charge, to any person obtaining a copy
 6 | of this software and associated documentation files (the "Software"), to deal
 7 | in the Software without restriction, including without limitation the rights
 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 | 
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 | 
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 | 


--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include foolbox/VERSION
2 | include foolbox/py.typed
3 | include foolbox/data/*.jpg
4 | include foolbox/data/*.png
5 | include LICENSE
6 | 


--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
 1 | .PHONY: test
 2 | test:
 3 | 	pytest --pdb --cov=foolbox
 4 | 	pytest --pdb --cov=foolbox --cov-append --backend pytorch
 5 | 	pytest --pdb --cov=foolbox --cov-append --backend tensorflow
 6 | 	pytest --pdb --cov=foolbox --cov-append --backend jax
 7 | 	pytest --pdb --cov=foolbox --cov-append --backend numpy
 8 | 
 9 | .PHONY: testskipslow
10 | testskipslow:
11 | 	pytest --pdb --skipslow --cov=foolbox
12 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend pytorch
13 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend tensorflow
14 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend jax
15 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend numpy
16 | 
17 | .PHONY: testskipslowrev
18 | testskipslowrev:
19 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend numpy
20 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend jax
21 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend tensorflow
22 | 	pytest --pdb --skipslow --cov=foolbox --cov-append --backend pytorch
23 | 	pytest --pdb --skipslow --cov=foolbox
24 | 
25 | .PHONY: testattacks
26 | testattacks:
27 | 	pytest --pdb --cov=foolbox.attacks tests/test_attacks.py
28 | 	pytest --pdb --cov=foolbox.attacks tests/test_attacks.py --cov-append --backend pytorch
29 | 	pytest --pdb --cov=foolbox.attacks tests/test_attacks.py --cov-append --backend tensorflow
30 | 	pytest --pdb --cov=foolbox.attacks tests/test_attacks.py --cov-append --backend jax
31 | 	pytest --pdb --cov=foolbox.attacks tests/test_attacks.py --cov-append --backend numpy
32 | 
33 | .PHONY: black
34 | black:
35 | 	black .
36 | 
37 | .PHONY: blackcheck
38 | blackcheck:
39 | 	black --check .
40 | 
41 | .PHONY: flake8
42 | flake8:
43 | 	flake8
44 | 
45 | .PHONY: mypy
46 | mypy:
47 | 	mypy -p foolbox
48 | 	mypy tests/
49 | 	mypy examples/
50 | 
51 | .PHONY: install
52 | install:
53 | 	pip3 install -e .
54 | 
55 | .PHONY: devsetup
56 | devsetup:
57 | 	pre-commit install
58 | 
59 | .PHONY: build
60 | build:
61 | 	python3 setup.py sdist
62 | 
63 | .PHONY: commit
64 | commit:
65 | 	git add foolbox/VERSION
66 | 	git commit -m 'Version $(shell cat foolbox/VERSION)'
67 | 
68 | .PHONY: release
69 | release: build
70 | 	twine upload dist/foolbox-$(shell cat foolbox/VERSION).tar.gz
71 | 
72 | .PHONY: guide
73 | guide:
74 | 	cd guide && vuepress build --temp /tmp/
75 | 
76 | .PHONY: installvuepress
77 | installvuepress:
78 | 	curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
79 | 	echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
80 | 	sudo apt update && sudo apt install yarn
81 | 	sudo yarn global add vuepress
82 | 
83 | .PHONY: serveguide
84 | serveguide:
85 | 	cd guide/.vuepress/dist/ && python3 -m http.server 9999
86 | 
87 | .PHONY: devguide
88 | devguide:
89 | 	cd guide && vuepress dev --temp /tmp/ --port 9999
90 | 
91 | .PHONY: pushguide
92 | pushguide:
93 | 	cd guide/.vuepress/dist/ && git init && git add -A && git commit -m 'deploy'
94 | 	cd guide/.vuepress/dist/ && git push -f git@github.com:bethgelab/foolbox.git master:gh-pages
95 | 


--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
 1 | # Minimal makefile for Sphinx documentation
 2 | #
 3 | 
 4 | # You can set these variables from the command line, and also
 5 | # from the environment for the first two.
 6 | SPHINXOPTS    ?=
 7 | SPHINXBUILD   ?= sphinx-build
 8 | SPHINXAUTOBUILD   ?= sphinx-autobuild
 9 | SOURCEDIR     = .
10 | BUILDDIR      = _build
11 | 
12 | # Put it first so that "make" without argument is like "make help".
13 | help:
14 | 	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
15 | 
16 | .PHONY: help Makefile
17 | 
18 | .PHONY: livehtml
19 | livehtml:
20 | 	@$(SPHINXAUTOBUILD) -b html "$(SOURCEDIR)" "$(BUILDDIR)/html" \
21 | 		-z ../foolbox/ -p 9999 -H 0.0.0.0 \
22 | 		--ignore "*.swp" \
23 |         --ignore "*.pdf" \
24 |         --ignore "*.log" \
25 |         --ignore "*.out" \
26 |         --ignore "*.toc" \
27 |         --ignore "*.aux" \
28 |         --ignore "*.idx" \
29 |         --ignore "*.ind" \
30 |         --ignore "*.ilg" \
31 |         --ignore "*.tex" \
32 | 		$(SPHINXOPTS) $(O)
33 | 
34 | # Catch-all target: route all unknown targets to Sphinx using the new
35 | # "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
36 | %: Makefile
37 | 	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
38 | 


--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
 1 | # Configuration file for the Sphinx documentation builder.
 2 | #
 3 | # This file only contains a selection of the most common options. For a full
 4 | # list see the documentation:
 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
 6 | 
 7 | # -- Path setup --------------------------------------------------------------
 8 | 
 9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 | 
17 | import foolbox
18 | 
19 | # -- Project information -----------------------------------------------------
20 | 
21 | project = "Foolbox"
22 | copyright = "2024, Jonas Rauber, Roland S. Zimmermann"
23 | author = "Jonas Rauber, Roland S. Zimmermann"
24 | 
25 | version = foolbox.__version__
26 | release = foolbox.__version__
27 | 
28 | 
29 | # -- General configuration ---------------------------------------------------
30 | 
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = [
35 |     "sphinx.ext.autodoc",
36 |     "sphinx.ext.autodoc.typehints",
37 |     "sphinx.ext.autosummary",
38 |     "sphinx.ext.napoleon",
39 | ]
40 | 
41 | # autodoc_typehints = "signature"
42 | autodoc_typehints = "description"
43 | 
44 | # Add any paths that contain templates here, relative to this directory.
45 | templates_path = ["_templates"]
46 | 
47 | # List of patterns, relative to source directory, that match files and
48 | # directories to ignore when looking for source files.
49 | # This pattern also affects html_static_path and html_extra_path.
50 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
51 | 
52 | 
53 | # -- Options for HTML output -------------------------------------------------
54 | 
55 | # The theme to use for HTML and HTML Help pages.  See the documentation for
56 | # a list of builtin themes.
57 | #
58 | html_theme = "sphinx_rtd_theme"
59 | # html_theme = "sphinx_typlog_theme"
60 | 
61 | # Add any paths that contain custom static files (such as style sheets) here,
62 | # relative to this directory. They are copied after the builtin static files,
63 | # so a file named "default.css" will overwrite the builtin "default.css".
64 | html_static_path = ["_static"]
65 | 


--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
 1 | Welcome to Foolbox Native
 2 | =========================
 3 | 
 4 | Foolbox is a Python toolbox to create adversarial examples that fool neural networks.
 5 | *Foolbox 3.0* has been completely rewritten from scratch.
 6 | It is now built on top of `EagerPy <https://github.com/jonasrauber/eagerpy>`_
 7 | and comes with native support for these frameworks:
 8 | 
 9 | * `PyTorch <https://pytorch.org>`_
10 | * `TensorFlow <https://www.tensorflow.org>`_
11 | * `JAX <https://github.com/google/jax>`_
12 | 
13 | Foolbox comes with a :doc:`large collection of adversarial attacks <modules/attacks>`, both gradient-based white-box attacks as well as decision-based and score-based black-box attacks.
14 | 
15 | The source code and a `minimal working example <https://github.com/bethgelab/foolbox#example>`_ can be found on `GitHub <https://github.com/bethgelab/foolbox>`_.
16 | 
17 | 
18 | .. toctree::
19 |    :maxdepth: 2
20 |    :caption: User API
21 | 
22 |    modules/models
23 |    modules/attacks
24 |    modules/criteria
25 |    modules/distances
26 |    modules/utils
27 |    modules/plot
28 |    modules/zoo
29 | 
30 | .. toctree::
31 |    :maxdepth: 2
32 |    :caption: Internal API
33 | 
34 |    modules/devutils
35 |    modules/tensorboard
36 |    modules/types
37 | 
38 | 
39 | Indices and tables
40 | ==================
41 | 
42 | * :ref:`genindex`
43 | * :ref:`modindex`
44 | * :ref:`search`
45 | 


--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
 1 | @ECHO OFF
 2 | 
 3 | pushd %~dp0
 4 | 
 5 | REM Command file for Sphinx documentation
 6 | 
 7 | if "%SPHINXBUILD%" == "" (
 8 | 	set SPHINXBUILD=sphinx-build
 9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | 
13 | if "%1" == "" goto help
14 | 
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | 	echo.
18 | 	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | 	echo.installed, then set the SPHINXBUILD environment variable to point
20 | 	echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | 	echo.may add the Sphinx directory to PATH.
22 | 	echo.
23 | 	echo.If you don't have Sphinx installed, grab it from
24 | 	echo.http://sphinx-doc.org/
25 | 	exit /b 1
26 | )
27 | 
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 | 
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 | 
34 | :end
35 | popd
36 | 


--------------------------------------------------------------------------------
/docs/modules/attacks.rst:
--------------------------------------------------------------------------------
  1 | :mod:`foolbox.attacks`
  2 | ======================
  3 | 
  4 | .. automodule:: foolbox.attacks
  5 |    :members:
  6 |    :undoc-members:
  7 | 
  8 | .. autosummary::
  9 |    :nosignatures:
 10 | 
 11 |    L2ContrastReductionAttack
 12 |    VirtualAdversarialAttack
 13 |    DDNAttack
 14 |    L2ProjectedGradientDescentAttack
 15 |    LinfProjectedGradientDescentAttack
 16 |    L2BasicIterativeAttack
 17 |    LinfBasicIterativeAttack
 18 |    L2FastGradientAttack
 19 |    LinfFastGradientAttack
 20 | 
 21 |    L2AdditiveGaussianNoiseAttack
 22 |    L2AdditiveUniformNoiseAttack
 23 |    L2ClippingAwareAdditiveGaussianNoiseAttack
 24 |    L2ClippingAwareAdditiveUniformNoiseAttack
 25 |    LinfAdditiveUniformNoiseAttack
 26 |    L2RepeatedAdditiveGaussianNoiseAttack
 27 |    L2RepeatedAdditiveUniformNoiseAttack
 28 |    L2ClippingAwareRepeatedAdditiveGaussianNoiseAttack
 29 |    L2ClippingAwareRepeatedAdditiveUniformNoiseAttack
 30 |    LinfRepeatedAdditiveUniformNoiseAttack
 31 |    InversionAttack
 32 |    BinarySearchContrastReductionAttack
 33 |    LinearSearchContrastReductionAttack
 34 | 
 35 |    HopSkipJumpAttack
 36 | 
 37 |    L2CarliniWagnerAttack
 38 |    NewtonFoolAttack
 39 |    EADAttack
 40 |    GaussianBlurAttack
 41 |    L2DeepFoolAttack
 42 |    LinfDeepFoolAttack
 43 |    SaltAndPepperNoiseAttack
 44 |    LinearSearchBlendedUniformNoiseAttack
 45 |    BinarizationRefinementAttack
 46 |    DatasetAttack
 47 |    BoundaryAttack
 48 |    L0BrendelBethgeAttack
 49 |    L1BrendelBethgeAttack
 50 |    L2BrendelBethgeAttack
 51 |    LinfinityBrendelBethgeAttack
 52 |    L0FMNAttack
 53 |    L1FMNAttack
 54 |    L2FMNAttack
 55 |    LInfFMNAttack
 56 |    PointwiseAttack
 57 | 
 58 |    FGM
 59 |    FGSM
 60 |    L2PGD
 61 |    LinfPGD
 62 |    PGD
 63 | 
 64 | .. autoclass:: L2ContrastReductionAttack
 65 | .. autoclass:: VirtualAdversarialAttack
 66 | .. autoclass:: DDNAttack
 67 | .. autoclass:: L2ProjectedGradientDescentAttack
 68 | .. autoclass:: LinfProjectedGradientDescentAttack
 69 | .. autoclass:: L2BasicIterativeAttack
 70 | .. autoclass:: LinfBasicIterativeAttack
 71 | .. autoclass:: L2FastGradientAttack
 72 | .. autoclass:: LinfFastGradientAttack
 73 | 
 74 | .. autoclass:: L2AdditiveGaussianNoiseAttack
 75 | .. autoclass:: L2AdditiveUniformNoiseAttack
 76 | .. autoclass:: L2ClippingAwareAdditiveGaussianNoiseAttack
 77 | .. autoclass:: L2ClippingAwareAdditiveUniformNoiseAttack
 78 | .. autoclass:: LinfAdditiveUniformNoiseAttack
 79 | .. autoclass:: L2RepeatedAdditiveGaussianNoiseAttack
 80 | .. autoclass:: L2RepeatedAdditiveUniformNoiseAttack
 81 | .. autoclass:: L2ClippingAwareRepeatedAdditiveGaussianNoiseAttack
 82 | .. autoclass:: L2ClippingAwareRepeatedAdditiveUniformNoiseAttack
 83 | .. autoclass:: LinfRepeatedAdditiveUniformNoiseAttack
 84 | .. autoclass:: InversionAttack
 85 | .. autoclass:: BinarySearchContrastReductionAttack
 86 | .. autoclass:: LinearSearchContrastReductionAttack
 87 | 
 88 | .. autoclass:: L2CarliniWagnerAttack
 89 | .. autoclass:: NewtonFoolAttack
 90 | .. autoclass:: EADAttack
 91 | .. autoclass:: GaussianBlurAttack
 92 | .. autoclass:: L2DeepFoolAttack
 93 | .. autoclass:: LinfDeepFoolAttack
 94 | .. autoclass:: SaltAndPepperNoiseAttack
 95 | .. autoclass:: LinearSearchBlendedUniformNoiseAttack
 96 | .. autoclass:: BinarizationRefinementAttack
 97 | .. autoclass:: DatasetAttack
 98 | .. autoclass:: BoundaryAttack
 99 | .. autoclass:: L0BrendelBethgeAttack
100 | .. autoclass:: L1BrendelBethgeAttack
101 | .. autoclass:: L2BrendelBethgeAttack
102 | .. autoclass:: LinfinityBrendelBethgeAttack
103 | .. autoclass:: L0FMNAttack
104 | .. autoclass:: L1FMNAttack
105 | .. autoclass:: L2FMNAttack
106 | .. autoclass:: LInfFMNAttack
107 | .. autoclass:: PointwiseAttack
108 | 
109 | .. autoclass:: FGM
110 | .. autoclass:: FGSM
111 | .. autoclass:: L2PGD
112 | .. autoclass:: LinfPGD
113 | .. autoclass:: PGD
114 | 


--------------------------------------------------------------------------------
/docs/modules/criteria.rst:
--------------------------------------------------------------------------------
1 | .. automodule:: foolbox.criteria
2 | 


--------------------------------------------------------------------------------
/docs/modules/devutils.rst:
--------------------------------------------------------------------------------
1 | :mod:`foolbox.devutils`
2 | =======================
3 | 
4 | .. automodule:: foolbox.devutils
5 |    :members:
6 |    :undoc-members:
7 | 


--------------------------------------------------------------------------------
/docs/modules/distances.rst:
--------------------------------------------------------------------------------
 1 | :mod:`foolbox.distances`
 2 | ========================
 3 | 
 4 | .. automodule:: foolbox.distances
 5 | 
 6 | Detailed description
 7 | --------------------
 8 | 
 9 | .. autoclass:: Distance
10 |    :members:
11 | 
12 | .. autoclass:: LpDistance
13 |    :members:
14 | 


--------------------------------------------------------------------------------
/docs/modules/models.rst:
--------------------------------------------------------------------------------
 1 | :mod:`foolbox.models`
 2 | =====================
 3 | 
 4 | .. automodule:: foolbox.models
 5 | 
 6 | Models
 7 | ------
 8 | 
 9 | .. autosummary::
10 |    :nosignatures:
11 | 
12 |    Model
13 |    PyTorchModel
14 |    TensorFlowModel
15 |    JAXModel
16 |    NumPyModel
17 | 
18 | Wrappers
19 | --------
20 | 
21 | .. autosummary::
22 |    :nosignatures:
23 | 
24 |    TransformBoundsWrapper
25 |    ExpectationOverTransformationWrapper
26 | 
27 | Detailed description
28 | --------------------
29 | 
30 | .. autoclass:: Model
31 |    :members:
32 | 
33 | .. autoclass:: PyTorchModel
34 |    :members:
35 | 
36 | .. autoclass:: TensorFlowModel
37 |    :members:
38 | 
39 | .. autoclass:: JAXModel
40 |    :members:
41 | 
42 | .. autoclass:: NumPyModel
43 |    :members:
44 | 
45 | .. autoclass:: TransformBoundsWrapper
46 |    :members:
47 | 
48 | .. autoclass:: ExpectationOverTransformationWrapper
49 |    :members:
50 | 


--------------------------------------------------------------------------------
/docs/modules/plot.rst:
--------------------------------------------------------------------------------
1 | :mod:`foolbox.plot`
2 | ===================
3 | 
4 | .. automodule:: foolbox.plot
5 |    :members:
6 |    :undoc-members:
7 | 


--------------------------------------------------------------------------------
/docs/modules/tensorboard.rst:
--------------------------------------------------------------------------------
1 | :mod:`foolbox.tensorboard`
2 | ==========================
3 | 
4 | .. automodule:: foolbox.tensorboard
5 |    :members:
6 |    :undoc-members:
7 | 


--------------------------------------------------------------------------------
/docs/modules/types.rst:
--------------------------------------------------------------------------------
1 | :mod:`foolbox.types`
2 | ====================
3 | 
4 | .. automodule:: foolbox.types
5 |    :members:
6 |    :undoc-members:
7 | 


--------------------------------------------------------------------------------
/docs/modules/utils.rst:
--------------------------------------------------------------------------------
1 | :mod:`foolbox.utils`
2 | ====================
3 | 
4 | .. automodule:: foolbox.utils
5 |    :members:
6 |    :undoc-members:
7 | 


--------------------------------------------------------------------------------
/docs/modules/zoo.rst:
--------------------------------------------------------------------------------
 1 | :mod:`foolbox.zoo`
 2 | ==================
 3 | 
 4 | .. automodule:: foolbox.zoo
 5 | 
 6 | 
 7 | Get Model
 8 | ---------
 9 | 
10 | .. autofunction:: get_model
11 | 
12 | 
13 | Fetch Weights
14 | -------------
15 | 
16 | .. autofunction:: fetch_weights
17 | 


--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx>=7.2.6
2 | sphinx-autobuild>=2024.2.4
3 | sphinx_rtd_theme>=2.0.0
4 | sphinx-typlog-theme==0.8.0
5 | 


--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
 1 | ## Examples
 2 | 
 3 | This folder contains examples that demonstrate how Foolbox can be used
 4 | to run one or more adversarial attacks and how to use the returned results
 5 | to compute the robust accuracy (the accuracy of the model when it is attacked).
 6 | 
 7 | The standard example can be found in:
 8 | * `single_attack_pytorch_resnet18.py`
 9 | * `single_attack_tensorflow_resnet50.py`
10 | 
11 | It shows how to run a single adversarial attack (Linf PGD) against an ImageNet
12 | model in PyTorch and TensorFlow.
13 | 
14 | The remaining examples are all for PyTorch,
15 | but the difference between these frameworks is really just replacing the model
16 | at the beginning of the script. So any example can be easily run with any
17 | framework.
18 | 
19 | `multiple_attacks_pytorch_resnet18.py` is an extended version of the single attack
20 | example. It shows how to combine the results of running multiple attacks
21 | to report the robust accuracy always using the strongest attack per sample.
22 | 
23 | `spatial_attack_pytorch_resnet18.py` shows how to use the Spatial Attack. This attack
24 | is a bit special because it doesn't use Lp balls and instead considers translations
25 | and rotations. It therefore has a custom example. All the other attacks can be
26 | used like Linf PGD in the other examples above.
27 | 
28 | `substituion_model_pytorch_resnet18.py` shows how to replace the gradient of
29 | a model with the gradient of another model. This can be useful when the original
30 | model has bad gradients ("gradient masking", "obfuscated gradients").
31 | 
32 | The `zoo` folder shows how a model can be shared in a Foolbox Model Zoo compatible way.
33 | 


--------------------------------------------------------------------------------
/examples/multiple_attacks_pytorch_resnet18.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | import torchvision.models as models
 3 | import eagerpy as ep
 4 | from foolbox import PyTorchModel, accuracy, samples
 5 | import foolbox.attacks as fa
 6 | import numpy as np
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     # instantiate a model (could also be a TensorFlow or JAX model)
11 |     model = models.resnet18(pretrained=True).eval()
12 |     preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
13 |     fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
14 | 
15 |     # get data and test the model
16 |     # wrapping the tensors with ep.astensors is optional, but it allows
17 |     # us to work with EagerPy tensors in the following
18 |     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
19 |     clean_acc = accuracy(fmodel, images, labels)
20 |     print(f"clean accuracy:  {clean_acc * 100:.1f} %")
21 |     print("")
22 | 
23 |     attacks = [
24 |         fa.FGSM(),
25 |         fa.LinfPGD(),
26 |         fa.LinfBasicIterativeAttack(),
27 |         fa.LinfAdditiveUniformNoiseAttack(),
28 |         fa.LinfDeepFoolAttack(),
29 |     ]
30 | 
31 |     epsilons = [
32 |         0.0,
33 |         0.0005,
34 |         0.001,
35 |         0.0015,
36 |         0.002,
37 |         0.003,
38 |         0.005,
39 |         0.01,
40 |         0.02,
41 |         0.03,
42 |         0.1,
43 |         0.3,
44 |         0.5,
45 |         1.0,
46 |     ]
47 |     print("epsilons")
48 |     print(epsilons)
49 |     print("")
50 | 
51 |     attack_success = np.zeros((len(attacks), len(epsilons), len(images)), dtype=np.bool)
52 |     for i, attack in enumerate(attacks):
53 |         _, _, success = attack(fmodel, images, labels, epsilons=epsilons)
54 |         assert success.shape == (len(epsilons), len(images))
55 |         success_ = success.numpy()
56 |         assert success_.dtype == np.bool
57 |         attack_success[i] = success_
58 |         print(attack)
59 |         print("  ", 1.0 - success_.mean(axis=-1).round(2))
60 | 
61 |     # calculate and report the robust accuracy (the accuracy of the model when
62 |     # it is attacked) using the best attack per sample
63 |     robust_accuracy = 1.0 - attack_success.max(axis=0).mean(axis=-1)
64 |     print("")
65 |     print("-" * 79)
66 |     print("")
67 |     print("worst case (best attack per-sample)")
68 |     print("  ", robust_accuracy.round(2))
69 |     print("")
70 | 
71 |     print("robust accuracy for perturbations with")
72 |     for eps, acc in zip(epsilons, robust_accuracy):
73 |         print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
74 | 


--------------------------------------------------------------------------------
/examples/single_attack_pytorch_resnet18.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | """
 3 | A simple example that demonstrates how to run a single attack against
 4 | a PyTorch ResNet-18 model for different epsilons and how to then report
 5 | the robust accuracy.
 6 | """
 7 | import torchvision.models as models
 8 | import eagerpy as ep
 9 | from foolbox import PyTorchModel, accuracy, samples
10 | from foolbox.attacks import LinfPGD
11 | 
12 | 
13 | def main() -> None:
14 |     # instantiate a model (could also be a TensorFlow or JAX model)
15 |     model = models.resnet18(pretrained=True).eval()
16 |     preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
17 |     fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
18 | 
19 |     # get data and test the model
20 |     # wrapping the tensors with ep.astensors is optional, but it allows
21 |     # us to work with EagerPy tensors in the following
22 |     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
23 |     clean_acc = accuracy(fmodel, images, labels)
24 |     print(f"clean accuracy:  {clean_acc * 100:.1f} %")
25 | 
26 |     # apply the attack
27 |     attack = LinfPGD()
28 |     epsilons = [
29 |         0.0,
30 |         0.0002,
31 |         0.0005,
32 |         0.0008,
33 |         0.001,
34 |         0.0015,
35 |         0.002,
36 |         0.003,
37 |         0.01,
38 |         0.1,
39 |         0.3,
40 |         0.5,
41 |         1.0,
42 |     ]
43 |     raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons)
44 | 
45 |     # calculate and report the robust accuracy (the accuracy of the model when
46 |     # it is attacked)
47 |     robust_accuracy = 1 - success.float32().mean(axis=-1)
48 |     print("robust accuracy for perturbations with")
49 |     for eps, acc in zip(epsilons, robust_accuracy):
50 |         print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
51 | 
52 |     # we can also manually check this
53 |     # we will use the clipped advs instead of the raw advs, otherwise
54 |     # we would need to check if the perturbation sizes are actually
55 |     # within the specified epsilon bound
56 |     print()
57 |     print("we can also manually check this:")
58 |     print()
59 |     print("robust accuracy for perturbations with")
60 |     for eps, advs_ in zip(epsilons, clipped_advs):
61 |         acc2 = accuracy(fmodel, advs_, labels)
62 |         print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
63 |         print("    perturbation sizes:")
64 |         perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy()
65 |         print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
66 |         if acc2 == 0:
67 |             break
68 | 
69 | 
70 | if __name__ == "__main__":
71 |     main()
72 | 


--------------------------------------------------------------------------------
/examples/single_attack_tensorflow_resnet50.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | import tensorflow as tf
 3 | import eagerpy as ep
 4 | from foolbox import TensorFlowModel, accuracy, samples, Model
 5 | from foolbox.attacks import LinfPGD
 6 | 
 7 | 
 8 | def main() -> None:
 9 |     # instantiate a model (could also be a TensorFlow or JAX model)
10 |     model = tf.keras.applications.ResNet50(weights="imagenet")
11 |     pre = dict(flip_axis=-1, mean=[104.0, 116.0, 123.0])  # RGB to BGR
12 |     fmodel: Model = TensorFlowModel(model, bounds=(0, 255), preprocessing=pre)
13 |     fmodel = fmodel.transform_bounds((0, 1))
14 | 
15 |     # get data and test the model
16 |     # wrapping the tensors with ep.astensors is optional, but it allows
17 |     # us to work with EagerPy tensors in the following
18 |     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
19 |     clean_acc = accuracy(fmodel, images, labels)
20 |     print(f"clean accuracy:  {clean_acc * 100:.1f} %")
21 | 
22 |     # apply the attack
23 |     attack = LinfPGD()
24 |     epsilons = [
25 |         0.0,
26 |         0.0002,
27 |         0.0005,
28 |         0.0008,
29 |         0.001,
30 |         0.0015,
31 |         0.002,
32 |         0.003,
33 |         0.01,
34 |         0.1,
35 |         0.3,
36 |         0.5,
37 |         1.0,
38 |     ]
39 |     raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons)
40 | 
41 |     # calculate and report the robust accuracy (the accuracy of the model when
42 |     # it is attacked)
43 |     robust_accuracy = 1 - success.float32().mean(axis=-1)
44 |     print("robust accuracy for perturbations with")
45 |     for eps, acc in zip(epsilons, robust_accuracy):
46 |         print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
47 | 
48 |     # we can also manually check this
49 |     # we will use the clipped advs instead of the raw advs, otherwise
50 |     # we would need to check if the perturbation sizes are actually
51 |     # within the specified epsilon bound
52 |     print()
53 |     print("we can also manually check this:")
54 |     print()
55 |     print("robust accuracy for perturbations with")
56 |     for eps, advs_ in zip(epsilons, clipped_advs):
57 |         acc2 = accuracy(fmodel, advs_, labels)
58 |         print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
59 |         print("    perturbation sizes:")
60 |         perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy()
61 |         print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
62 |         if acc2 == 0:
63 |             break
64 | 
65 | 
66 | if __name__ == "__main__":
67 |     main()
68 | 


--------------------------------------------------------------------------------
/examples/spatial_attack_pytorch_resnet18.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | """
 3 | The spatial attack is a very special attack because it tries to find adversarial
 4 | perturbations using a set of translations and rotations rather then in an Lp ball.
 5 | It therefore has a slightly different interface.
 6 | """
 7 | import torchvision.models as models
 8 | import eagerpy as ep
 9 | from foolbox import PyTorchModel, accuracy, samples
10 | import foolbox.attacks as fa
11 | 
12 | 
13 | def main() -> None:
14 |     # instantiate a model (could also be a TensorFlow or JAX model)
15 |     model = models.resnet18(pretrained=True).eval()
16 |     preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
17 |     fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
18 | 
19 |     # get data and test the model
20 |     # wrapping the tensors with ep.astensors is optional, but it allows
21 |     # us to work with EagerPy tensors in the following
22 |     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
23 |     clean_acc = accuracy(fmodel, images, labels) * 100
24 |     print(f"clean accuracy:  {clean_acc:.1f} %")
25 | 
26 |     # the attack trys a combination of specified rotations and translations to an image
27 |     # stops early if adversarial shifts and translations for all images are found
28 |     attack = fa.SpatialAttack(
29 |         max_translation=6,  # 6px so x in [x-6, x+6] and y in [y-6, y+6]
30 |         num_translations=6,  # number of translations in x, y.
31 |         max_rotation=20,  # +- rotation in degrees
32 |         num_rotations=5,  # number of rotations
33 |         # max total iterations = num_rotations * num_translations**2
34 |     )
35 | 
36 |     # report the success rate of the attack (percentage of samples that could
37 |     # be adversarially perturbed) and the robust accuracy (the remaining accuracy
38 |     # of the model when it is attacked)
39 |     xp_, _, success = attack(fmodel, images, labels)
40 |     suc = success.float32().mean().item() * 100
41 |     print(
42 |         f"attack success:  {suc:.1f} %"
43 |         " (for the specified rotation and translation bounds)"
44 |     )
45 |     print(
46 |         f"robust accuracy: {100 - suc:.1f} %"
47 |         " (for the specified rotation and translation bounds)"
48 |     )
49 | 
50 | 
51 | if __name__ == "__main__":
52 |     main()
53 | 


--------------------------------------------------------------------------------
/examples/substituion_model_pytorch_resnet18.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | # mypy: no-disallow-untyped-defs
 3 | """
 4 | Sometimes one wants to replace the gradient of a model with a different gradient
 5 | from another model to make the attack more reliable. That is, the forward pass
 6 | should go through model 1, but the backward pass should go through model 2.
 7 | This example shows how that can be done in Foolbox.
 8 | """
 9 | import torchvision.models as models
10 | import eagerpy as ep
11 | from foolbox import PyTorchModel, accuracy, samples
12 | from foolbox.attacks import LinfPGD
13 | from foolbox.attacks.base import get_criterion
14 | 
15 | 
16 | def main() -> None:
17 |     # instantiate a model (could also be a TensorFlow or JAX model)
18 |     model = models.resnet18(pretrained=True).eval()
19 |     preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
20 |     fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
21 | 
22 |     # get data and test the model
23 |     # wrapping the tensors with ep.astensors is optional, but it allows
24 |     # us to work with EagerPy tensors in the following
25 |     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
26 |     clean_acc = accuracy(fmodel, images, labels)
27 |     print(f"clean accuracy:  {clean_acc * 100:.1f} %")
28 | 
29 |     # replace the gradient with the gradient from another model
30 |     model2 = fmodel  # demo, we just use the same model
31 | 
32 |     # TODO: this is still a bit annoying because we need
33 |     # to overwrite run to get the labels
34 |     class Attack(LinfPGD):
35 |         def value_and_grad(self, loss_fn, x):
36 |             val1 = loss_fn(x)
37 |             loss_fn2 = self.get_loss_fn(model2, self.labels)
38 |             _, grad2 = ep.value_and_grad(loss_fn2, x)
39 |             return val1, grad2
40 | 
41 |         def run(self, model, inputs, criterion, *, epsilon, **kwargs):
42 |             criterion_ = get_criterion(criterion)
43 |             self.labels = criterion_.labels
44 |             return super().run(model, inputs, criterion_, epsilon=epsilon, **kwargs)
45 | 
46 |     # apply the attack
47 |     attack = Attack()
48 |     epsilons = [
49 |         0.0,
50 |         0.0002,
51 |         0.0005,
52 |         0.0008,
53 |         0.001,
54 |         0.0015,
55 |         0.002,
56 |         0.003,
57 |         0.01,
58 |         0.1,
59 |         0.3,
60 |         0.5,
61 |         1.0,
62 |     ]
63 |     raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons)
64 | 
65 |     # calculate and report the robust accuracy (the accuracy of the model when
66 |     # it is attacked)
67 |     robust_accuracy = 1 - success.float32().mean(axis=-1)
68 |     print("robust accuracy for perturbations with")
69 |     for eps, acc in zip(epsilons, robust_accuracy):
70 |         print(f"  Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
71 | 
72 |     # we can also manually check this
73 |     # we will use the clipped advs instead of the raw advs, otherwise
74 |     # we would need to check if the perturbation sizes are actually
75 |     # within the specified epsilon bound
76 |     print()
77 |     print("we can also manually check this:")
78 |     print()
79 |     print("robust accuracy for perturbations with")
80 |     for eps, advs_ in zip(epsilons, clipped_advs):
81 |         acc2 = accuracy(fmodel, advs_, labels)
82 |         print(f"  Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
83 |         print("    perturbation sizes:")
84 |         perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy()
85 |         print("    ", str(perturbation_sizes).replace("\n", "\n" + "    "))
86 |         if acc2 == 0:
87 |             break
88 | 
89 | 
90 | if __name__ == "__main__":
91 |     main()
92 | 


--------------------------------------------------------------------------------
/examples/zoo/mnist/foolbox_model.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python3
 2 | import torch
 3 | import torch.nn as nn
 4 | import os
 5 | from foolbox.models import PyTorchModel
 6 | from foolbox.utils import accuracy, samples
 7 | 
 8 | 
 9 | def create() -> PyTorchModel:
10 |     model = nn.Sequential(
11 |         nn.Conv2d(1, 32, 3),
12 |         nn.ReLU(),
13 |         nn.Conv2d(32, 64, 3),
14 |         nn.ReLU(),
15 |         nn.MaxPool2d(2),
16 |         nn.Dropout2d(0.25),
17 |         nn.Flatten(),
18 |         nn.Linear(9216, 128),
19 |         nn.ReLU(),
20 |         nn.Dropout2d(0.5),
21 |         nn.Linear(128, 10),
22 |     )
23 |     path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mnist_cnn.pth")
24 |     model.load_state_dict(torch.load(path))  # type: ignore
25 |     model.eval()
26 |     preprocessing = dict(mean=0.1307, std=0.3081)
27 |     fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
28 |     return fmodel
29 | 
30 | 
31 | if __name__ == "__main__":
32 |     # test the model
33 |     fmodel = create()
34 |     images, labels = samples(fmodel, dataset="mnist", batchsize=20)
35 |     print(accuracy(fmodel, images, labels))
36 | 


--------------------------------------------------------------------------------
/examples/zoo/mnist/mnist_cnn.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/examples/zoo/mnist/mnist_cnn.pth


--------------------------------------------------------------------------------
/foolbox/VERSION:
--------------------------------------------------------------------------------
1 | 3.3.4
2 | 


--------------------------------------------------------------------------------
/foolbox/__init__.py:
--------------------------------------------------------------------------------
 1 | from os.path import join as _join
 2 | from os.path import dirname as _dirname
 3 | 
 4 | with open(_join(_dirname(__file__), "VERSION")) as _f:
 5 |     __version__ = _f.read().strip()
 6 | 
 7 | # internal modules
 8 | # ----------------
 9 | 
10 | from . import devutils  # noqa: F401
11 | from . import tensorboard  # noqa: F401
12 | from . import types  # noqa: F401
13 | 
14 | # user-facing modules
15 | # -------------------
16 | 
17 | from .distances import Distance  # noqa: F401
18 | from . import distances  # noqa: F401
19 | 
20 | from .criteria import Criterion  # noqa: F401
21 | from .criteria import Misclassification  # noqa: F401
22 | from .criteria import TargetedMisclassification  # noqa: F401
23 | 
24 | from . import plot  # noqa: F401
25 | 
26 | from .models import Model  # noqa: F401
27 | from .models import PyTorchModel  # noqa: F401
28 | from .models import TensorFlowModel  # noqa: F401
29 | from .models import JAXModel  # noqa: F401
30 | from .models import NumPyModel  # noqa: F401
31 | 
32 | from .utils import accuracy  # noqa: F401
33 | from .utils import samples  # noqa: F401
34 | 
35 | from .attacks import Attack  # noqa: F401
36 | from . import attacks  # noqa: F401
37 | 
38 | from . import zoo  # noqa: F401
39 | 
40 | from . import gradient_estimators  # noqa: F401
41 | 


--------------------------------------------------------------------------------
/foolbox/attacks/__init__.py:
--------------------------------------------------------------------------------
  1 | from .base import Attack  # noqa: F401
  2 | 
  3 | # FixedEpsilonAttack subclasses
  4 | from .contrast import L2ContrastReductionAttack  # noqa: F401
  5 | from .virtual_adversarial_attack import VirtualAdversarialAttack  # noqa: F401
  6 | from .ddn import DDNAttack  # noqa: F401
  7 | from .projected_gradient_descent import (  # noqa: F401
  8 |     L1ProjectedGradientDescentAttack,
  9 |     L2ProjectedGradientDescentAttack,
 10 |     LinfProjectedGradientDescentAttack,
 11 |     L1AdamProjectedGradientDescentAttack,
 12 |     L2AdamProjectedGradientDescentAttack,
 13 |     LinfAdamProjectedGradientDescentAttack,
 14 | )
 15 | from .basic_iterative_method import (  # noqa: F401
 16 |     L1BasicIterativeAttack,
 17 |     L2BasicIterativeAttack,
 18 |     LinfBasicIterativeAttack,
 19 |     L1AdamBasicIterativeAttack,
 20 |     L2AdamBasicIterativeAttack,
 21 |     LinfAdamBasicIterativeAttack,
 22 | )
 23 | from .mi_fgsm import (  # noqa: F401
 24 |     L1MomentumIterativeFastGradientMethod,
 25 |     L2MomentumIterativeFastGradientMethod,
 26 |     LinfMomentumIterativeFastGradientMethod,
 27 | )
 28 | from .fast_gradient_method import (  # noqa: F401
 29 |     L1FastGradientAttack,
 30 |     L2FastGradientAttack,
 31 |     LinfFastGradientAttack,
 32 | )
 33 | from .additive_noise import (  # noqa: F401
 34 |     L2AdditiveGaussianNoiseAttack,
 35 |     L2AdditiveUniformNoiseAttack,
 36 |     L2ClippingAwareAdditiveGaussianNoiseAttack,
 37 |     L2ClippingAwareAdditiveUniformNoiseAttack,
 38 |     LinfAdditiveUniformNoiseAttack,
 39 |     L2RepeatedAdditiveGaussianNoiseAttack,
 40 |     L2RepeatedAdditiveUniformNoiseAttack,
 41 |     L2ClippingAwareRepeatedAdditiveGaussianNoiseAttack,
 42 |     L2ClippingAwareRepeatedAdditiveUniformNoiseAttack,
 43 |     LinfRepeatedAdditiveUniformNoiseAttack,
 44 | )
 45 | from .sparse_l1_descent_attack import SparseL1DescentAttack  # noqa: F401
 46 | 
 47 | # MinimizatonAttack subclasses
 48 | from .inversion import InversionAttack  # noqa: F401
 49 | from .contrast_min import (  # noqa: F401
 50 |     BinarySearchContrastReductionAttack,
 51 |     LinearSearchContrastReductionAttack,
 52 | )
 53 | from .carlini_wagner import L2CarliniWagnerAttack  # noqa: F401
 54 | from .newtonfool import NewtonFoolAttack  # noqa: F401
 55 | from .ead import EADAttack  # noqa: F401
 56 | from .blur import GaussianBlurAttack  # noqa: F401
 57 | from .spatial_attack import SpatialAttack  # noqa: F401
 58 | from .deepfool import L2DeepFoolAttack, LinfDeepFoolAttack  # noqa: F401
 59 | from .saltandpepper import SaltAndPepperNoiseAttack  # noqa: F401
 60 | from .blended_noise import LinearSearchBlendedUniformNoiseAttack  # noqa: F401
 61 | from .binarization import BinarizationRefinementAttack  # noqa: F401
 62 | from .dataset_attack import DatasetAttack  # noqa: F401
 63 | from .boundary_attack import BoundaryAttack  # noqa: F401
 64 | from .hop_skip_jump import HopSkipJumpAttack  # noqa: F401
 65 | from .brendel_bethge import (  # noqa: F401
 66 |     L0BrendelBethgeAttack,
 67 |     L1BrendelBethgeAttack,
 68 |     L2BrendelBethgeAttack,
 69 |     LinfinityBrendelBethgeAttack,
 70 | )
 71 | from .fast_minimum_norm import (  # noqa: F401
 72 |     L0FMNAttack,
 73 |     L1FMNAttack,
 74 |     L2FMNAttack,
 75 |     LInfFMNAttack,
 76 | )
 77 | from .gen_attack import GenAttack  # noqa: F401
 78 | from .pointwise import PointwiseAttack  # noqa: F401
 79 | 
 80 | # from .blended_noise import LinearSearchBlendedUniformNoiseAttack  # noqa: F401
 81 | # from .brendel_bethge import (  # noqa: F401
 82 | #     L0BrendelBethgeAttack,
 83 | #     L1BrendelBethgeAttack,
 84 | #     L2BrendelBethgeAttack,
 85 | #     LinfinityBrendelBethgeAttack,
 86 | # )
 87 | # from .additive_noise import L2AdditiveGaussianNoiseAttack  # noqa: F401
 88 | # from .additive_noise import L2AdditiveUniformNoiseAttack  # noqa: F401
 89 | # from .additive_noise import LinfAdditiveUniformNoiseAttack  # noqa: F401
 90 | # from .additive_noise import L2RepeatedAdditiveGaussianNoiseAttack  # noqa: F401
 91 | # from .additive_noise import L2RepeatedAdditiveUniformNoiseAttack  # noqa: F401
 92 | # from .additive_noise import LinfRepeatedAdditiveUniformNoiseAttack  # noqa: F401
 93 | # from .saltandpepper import SaltAndPepperNoiseAttack  # noqa: F401
 94 | 
 95 | FGM = L2FastGradientAttack
 96 | FGSM = LinfFastGradientAttack
 97 | L1PGD = L1ProjectedGradientDescentAttack
 98 | L2PGD = L2ProjectedGradientDescentAttack
 99 | LinfPGD = LinfProjectedGradientDescentAttack
100 | PGD = LinfPGD
101 | MIFGSM = LinfMomentumIterativeFastGradientMethod
102 | 
103 | L1AdamPGD = L1AdamProjectedGradientDescentAttack
104 | L2AdamPGD = L2AdamProjectedGradientDescentAttack
105 | LinfAdamPGD = LinfAdamProjectedGradientDescentAttack
106 | AdamPGD = LinfAdamPGD
107 | 


--------------------------------------------------------------------------------
/foolbox/attacks/binarization.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Optional, Any
  2 | from typing_extensions import Literal
  3 | import eagerpy as ep
  4 | import numpy as np
  5 | 
  6 | from ..models import Model
  7 | 
  8 | from ..criteria import Criterion
  9 | 
 10 | from ..distances import Distance
 11 | 
 12 | from .base import FlexibleDistanceMinimizationAttack
 13 | from .base import T
 14 | from .base import get_is_adversarial
 15 | from .base import get_criterion
 16 | from .base import raise_if_kwargs
 17 | from .base import verify_input_bounds
 18 | 
 19 | 
 20 | class BinarizationRefinementAttack(FlexibleDistanceMinimizationAttack):
 21 |     """For models that preprocess their inputs by binarizing the
 22 |     inputs, this attack can improve adversarials found by other
 23 |     attacks. It does this by utilizing information about the
 24 |     binarization and mapping values to the corresponding value in
 25 |     the clean input or to the right side of the threshold.
 26 | 
 27 |     Args:
 28 |         threshold : The threshold used by the models binarization. If none,
 29 |             defaults to (model.bounds()[1] - model.bounds()[0]) / 2.
 30 |         included_in : Whether the threshold value itself belongs to the lower or
 31 |             upper interval.
 32 |     """
 33 | 
 34 |     def __init__(
 35 |         self,
 36 |         *,
 37 |         distance: Optional[Distance] = None,
 38 |         threshold: Optional[float] = None,
 39 |         included_in: Union[Literal["lower"], Literal["upper"]] = "upper",
 40 |     ):
 41 |         super().__init__(distance=distance)
 42 |         self.threshold = threshold
 43 |         self.included_in = included_in
 44 | 
 45 |     def run(
 46 |         self,
 47 |         model: Model,
 48 |         inputs: T,
 49 |         criterion: Union[Criterion, T],
 50 |         *,
 51 |         early_stop: Optional[float] = None,
 52 |         starting_points: Optional[T] = None,
 53 |         **kwargs: Any,
 54 |     ) -> T:
 55 |         raise_if_kwargs(kwargs)
 56 |         if starting_points is None:
 57 |             raise ValueError("BinarizationRefinementAttack requires starting_points")
 58 |         (o, x), restore_type = ep.astensors_(inputs, starting_points)
 59 |         del inputs, starting_points, kwargs
 60 | 
 61 |         verify_input_bounds(x, model)
 62 | 
 63 |         criterion = get_criterion(criterion)
 64 |         is_adversarial = get_is_adversarial(criterion, model)
 65 | 
 66 |         if self.threshold is None:
 67 |             min_, max_ = model.bounds
 68 |             threshold = (min_ + max_) / 2.0
 69 |         else:
 70 |             threshold = self.threshold
 71 | 
 72 |         assert o.dtype == x.dtype
 73 | 
 74 |         nptype = o.reshape(-1)[0].numpy().dtype.type
 75 |         if nptype not in [np.float16, np.float32, np.float64]:
 76 |             raise ValueError(  # pragma: no cover
 77 |                 f"expected dtype to be float16, float32 or float64, found '{nptype}'"
 78 |             )
 79 | 
 80 |         threshold = nptype(threshold)
 81 |         offset = nptype(1.0)
 82 | 
 83 |         if self.included_in == "lower":
 84 |             lower_ = threshold
 85 |             upper_ = np.nextafter(threshold, threshold + offset)
 86 |         elif self.included_in == "upper":
 87 |             lower_ = np.nextafter(threshold, threshold - offset)
 88 |             upper_ = threshold
 89 |         else:
 90 |             raise ValueError(
 91 |                 f"expected included_in to be 'lower' or 'upper', found '{self.included_in}'"
 92 |             )
 93 | 
 94 |         assert lower_ < upper_
 95 | 
 96 |         p = ep.full_like(o, ep.nan)
 97 | 
 98 |         lower = ep.ones_like(o) * lower_
 99 |         upper = ep.ones_like(o) * upper_
100 | 
101 |         indices = ep.logical_and(o <= lower, x <= lower)
102 |         p = ep.where(indices, o, p)
103 | 
104 |         indices = ep.logical_and(o <= lower, x >= upper)
105 |         p = ep.where(indices, upper, p)
106 | 
107 |         indices = ep.logical_and(o >= upper, x <= lower)
108 |         p = ep.where(indices, lower, p)
109 | 
110 |         indices = ep.logical_and(o >= upper, x >= upper)
111 |         p = ep.where(indices, o, p)
112 | 
113 |         assert not ep.any(ep.isnan(p))
114 | 
115 |         is_adv1 = is_adversarial(x)
116 |         is_adv2 = is_adversarial(p)
117 |         if (is_adv1 != is_adv2).any():
118 |             raise ValueError(
119 |                 "The specified threshold does not match what is done by the model."
120 |             )
121 |         return restore_type(p)
122 | 


--------------------------------------------------------------------------------
/foolbox/attacks/blended_noise.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Optional, Any
  2 | import numpy as np
  3 | import eagerpy as ep
  4 | 
  5 | from ..devutils import atleast_kd
  6 | 
  7 | from ..distances import Distance
  8 | 
  9 | from .base import FlexibleDistanceMinimizationAttack
 10 | from .base import Model
 11 | from .base import Criterion
 12 | from .base import T
 13 | from .base import get_is_adversarial
 14 | from .base import get_criterion
 15 | from .base import raise_if_kwargs
 16 | 
 17 | import warnings
 18 | from .base import verify_input_bounds
 19 | 
 20 | 
 21 | class LinearSearchBlendedUniformNoiseAttack(FlexibleDistanceMinimizationAttack):
 22 |     """Blends the input with a uniform noise input until it is misclassified.
 23 | 
 24 |     Args:
 25 |         distance : Distance measure for which minimal adversarial examples are searched.
 26 |         directions : Number of random directions in which the perturbation is searched.
 27 |         steps : Number of blending steps between the original image and the random
 28 |             directions.
 29 |     """
 30 | 
 31 |     def __init__(
 32 |         self,
 33 |         *,
 34 |         distance: Optional[Distance] = None,
 35 |         directions: int = 1000,
 36 |         steps: int = 1000,
 37 |     ):
 38 |         super().__init__(distance=distance)
 39 |         self.directions = directions
 40 |         self.steps = steps
 41 | 
 42 |         if directions <= 0:
 43 |             raise ValueError("directions must be larger than 0")
 44 | 
 45 |     def run(
 46 |         self,
 47 |         model: Model,
 48 |         inputs: T,
 49 |         criterion: Union[Criterion, Any] = None,
 50 |         *,
 51 |         early_stop: Optional[float] = None,
 52 |         **kwargs: Any,
 53 |     ) -> T:
 54 |         raise_if_kwargs(kwargs)
 55 |         x, restore_type = ep.astensor_(inputs)
 56 |         criterion_ = get_criterion(criterion)
 57 |         del inputs, criterion, kwargs
 58 | 
 59 |         verify_input_bounds(x, model)
 60 | 
 61 |         is_adversarial = get_is_adversarial(criterion_, model)
 62 | 
 63 |         min_, max_ = model.bounds
 64 | 
 65 |         N = len(x)
 66 | 
 67 |         for j in range(self.directions):
 68 |             # random noise inputs tend to be classified into the same class,
 69 |             # so we might need to make very many draws if the original class
 70 |             # is that one
 71 |             random_ = ep.uniform(x, x.shape, min_, max_)
 72 |             is_adv_ = atleast_kd(is_adversarial(random_), x.ndim)
 73 | 
 74 |             if j == 0:
 75 |                 random = random_
 76 |                 is_adv = is_adv_
 77 |             else:
 78 |                 random = ep.where(is_adv, random, random_)
 79 |                 is_adv = is_adv.logical_or(is_adv_)
 80 | 
 81 |             if is_adv.all():
 82 |                 break
 83 | 
 84 |         if not is_adv.all():
 85 |             warnings.warn(
 86 |                 f"{self.__class__.__name__} failed to draw sufficient random"
 87 |                 f" inputs that are adversarial ({is_adv.sum()} / {N})."
 88 |             )
 89 | 
 90 |         x0 = x
 91 | 
 92 |         epsilons = np.linspace(0, 1, num=self.steps + 1, dtype=np.float32)
 93 |         best = ep.ones(x, (N,))
 94 | 
 95 |         for epsilon in epsilons:
 96 |             x = (1 - epsilon) * x0 + epsilon * random
 97 |             # TODO: due to limited floating point precision, clipping can be required
 98 |             is_adv = is_adversarial(x)
 99 | 
100 |             epsilon = epsilon.item()
101 | 
102 |             best = ep.minimum(ep.where(is_adv, epsilon, 1.0), best)
103 | 
104 |             if (best < 1).all():
105 |                 break
106 | 
107 |         best = atleast_kd(best, x0.ndim)
108 |         x = (1 - best) * x0 + best * random
109 | 
110 |         return restore_type(x)
111 | 


--------------------------------------------------------------------------------
/foolbox/attacks/blur.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Optional, Any
  2 | import numpy as np
  3 | from scipy.ndimage.filters import gaussian_filter
  4 | import eagerpy as ep
  5 | 
  6 | from ..devutils import atleast_kd
  7 | 
  8 | from ..models import Model
  9 | 
 10 | from ..criteria import Criterion
 11 | 
 12 | from ..distances import Distance
 13 | 
 14 | from .base import FlexibleDistanceMinimizationAttack
 15 | from .base import T
 16 | from .base import get_is_adversarial
 17 | from .base import get_criterion
 18 | from .base import get_channel_axis
 19 | from .base import raise_if_kwargs
 20 | from .base import verify_input_bounds
 21 | 
 22 | 
 23 | class GaussianBlurAttack(FlexibleDistanceMinimizationAttack):
 24 |     """Blurs the inputs using a Gaussian filter with linearly
 25 |     increasing standard deviation.
 26 | 
 27 |     Args:
 28 |         steps : Number of sigma values tested between 0 and max_sigma.
 29 |         channel_axis : Index of the channel axis in the input data.
 30 |         max_sigma : Maximally allowed sigma value of the Gaussian blur.
 31 |     """
 32 | 
 33 |     def __init__(
 34 |         self,
 35 |         *,
 36 |         distance: Optional[Distance] = None,
 37 |         steps: int = 1000,
 38 |         channel_axis: Optional[int] = None,
 39 |         max_sigma: Optional[float] = None,
 40 |     ):
 41 |         super().__init__(distance=distance)
 42 |         self.steps = steps
 43 |         self.channel_axis = channel_axis
 44 |         self.max_sigma = max_sigma
 45 | 
 46 |     def run(
 47 |         self,
 48 |         model: Model,
 49 |         inputs: T,
 50 |         criterion: Union[Criterion, T],
 51 |         *,
 52 |         early_stop: Optional[float] = None,
 53 |         **kwargs: Any,
 54 |     ) -> T:
 55 |         raise_if_kwargs(kwargs)
 56 |         x, restore_type = ep.astensor_(inputs)
 57 |         del inputs, kwargs
 58 | 
 59 |         verify_input_bounds(x, model)
 60 | 
 61 |         criterion = get_criterion(criterion)
 62 |         is_adversarial = get_is_adversarial(criterion, model)
 63 | 
 64 |         if x.ndim != 4:
 65 |             raise NotImplementedError(
 66 |                 "only implemented for inputs with two spatial dimensions (and one channel and one batch dimension)"
 67 |             )
 68 | 
 69 |         if self.channel_axis is None:
 70 |             channel_axis = get_channel_axis(model, x.ndim)
 71 |         else:
 72 |             channel_axis = self.channel_axis % x.ndim
 73 | 
 74 |         if channel_axis is None:
 75 |             raise ValueError(
 76 |                 "cannot infer the data_format from the model, please specify"
 77 |                 " channel_axis when initializing the attack"
 78 |             )
 79 | 
 80 |         max_sigma: float
 81 |         if self.max_sigma is None:
 82 |             if channel_axis == 1:
 83 |                 h, w = x.shape[2:4]
 84 |             elif channel_axis == 3:
 85 |                 h, w = x.shape[1:3]
 86 |             else:
 87 |                 raise ValueError(
 88 |                     "expected 'channel_axis' to be 1 or 3, got {channel_axis}"
 89 |                 )
 90 |             max_sigma = max(h, w)
 91 |         else:
 92 |             max_sigma = self.max_sigma
 93 | 
 94 |         min_, max_ = model.bounds
 95 | 
 96 |         x0 = x
 97 |         x0_ = x0.numpy()
 98 | 
 99 |         result = x0
100 |         found = is_adversarial(x0)
101 | 
102 |         epsilon = 0.0
103 |         stepsize = 1.0 / self.steps
104 |         for _ in range(self.steps):
105 |             # TODO: reduce the batch size to the ones that haven't been sucessful
106 | 
107 |             epsilon += stepsize
108 | 
109 |             sigmas = [epsilon * max_sigma] * x0.ndim
110 |             sigmas[0] = 0
111 |             sigmas[channel_axis] = 0
112 | 
113 |             # TODO: once we can implement gaussian_filter in eagerpy, avoid converting from numpy
114 |             x_ = gaussian_filter(x0_, sigmas)
115 |             x_ = np.clip(x_, min_, max_)
116 |             x = ep.from_numpy(x0, x_)
117 | 
118 |             is_adv = is_adversarial(x)
119 |             new_adv = ep.logical_and(is_adv, found.logical_not())
120 |             result = ep.where(atleast_kd(new_adv, x.ndim), x, result)
121 |             found = ep.logical_or(new_adv, found)
122 | 
123 |             if found.all():
124 |                 break
125 | 
126 |         return restore_type(result)
127 | 


--------------------------------------------------------------------------------
/foolbox/attacks/contrast.py:
--------------------------------------------------------------------------------
 1 | from typing import Union, Any
 2 | import eagerpy as ep
 3 | 
 4 | from ..devutils import flatten
 5 | from ..devutils import atleast_kd
 6 | 
 7 | from ..criteria import Criterion
 8 | 
 9 | from ..distances import l2
10 | 
11 | from ..models import Model
12 | 
13 | from .base import FixedEpsilonAttack
14 | from .base import T
15 | from .base import raise_if_kwargs
16 | from .base import verify_input_bounds
17 | 
18 | 
19 | class L2ContrastReductionAttack(FixedEpsilonAttack):
20 |     """Reduces the contrast of the input using a perturbation of the given size
21 | 
22 |     Args:
23 |         target : Target relative to the bounds from 0 (min) to 1 (max)
24 |             towards which the contrast is reduced
25 |     """
26 | 
27 |     distance = l2
28 | 
29 |     def __init__(self, *, target: float = 0.5):
30 |         self.target = target
31 | 
32 |     def run(
33 |         self,
34 |         model: Model,
35 |         inputs: T,
36 |         criterion: Union[Criterion, Any] = None,
37 |         *,
38 |         epsilon: float,
39 |         **kwargs: Any,
40 |     ) -> T:
41 |         raise_if_kwargs(kwargs)
42 |         x, restore_type = ep.astensor_(inputs)
43 |         del inputs, criterion, kwargs
44 | 
45 |         verify_input_bounds(x, model)
46 | 
47 |         min_, max_ = model.bounds
48 |         target = min_ + self.target * (max_ - min_)
49 | 
50 |         direction = target - x
51 |         norms = ep.norms.l2(flatten(direction), axis=-1)
52 |         scale = epsilon / atleast_kd(norms, direction.ndim)
53 |         scale = ep.minimum(scale, 1)
54 | 
55 |         x = x + scale * direction
56 |         x = x.clip(min_, max_)
57 |         return restore_type(x)
58 | 


--------------------------------------------------------------------------------
/foolbox/attacks/contrast_min.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Any, Optional
  2 | import eagerpy as ep
  3 | 
  4 | from ..devutils import atleast_kd
  5 | 
  6 | from ..models import Model
  7 | 
  8 | from ..criteria import Criterion
  9 | 
 10 | from ..distances import Distance
 11 | 
 12 | from .base import FlexibleDistanceMinimizationAttack
 13 | from .base import T
 14 | from .base import get_is_adversarial
 15 | from .base import get_criterion
 16 | from .base import raise_if_kwargs
 17 | from .base import verify_input_bounds
 18 | 
 19 | 
 20 | class BinarySearchContrastReductionAttack(FlexibleDistanceMinimizationAttack):
 21 |     """Reduces the contrast of the input using a binary search to find the
 22 |     smallest adversarial perturbation
 23 | 
 24 |     Args:
 25 |         distance : Distance measure for which minimal adversarial examples are searched.
 26 |         binary_search_steps : Number of iterations in the binary search.
 27 |             This controls the precision of the results.
 28 |         target : Target relative to the bounds from 0 (min) to 1 (max)
 29 |             towards which the contrast is reduced
 30 |     """
 31 | 
 32 |     def __init__(
 33 |         self,
 34 |         *,
 35 |         distance: Optional[Distance] = None,
 36 |         binary_search_steps: int = 15,
 37 |         target: float = 0.5,
 38 |     ):
 39 |         super().__init__(distance=distance)
 40 |         self.binary_search_steps = binary_search_steps
 41 |         self.target = target
 42 | 
 43 |     def run(
 44 |         self,
 45 |         model: Model,
 46 |         inputs: T,
 47 |         criterion: Union[Criterion, T],
 48 |         *,
 49 |         early_stop: Optional[float] = None,
 50 |         **kwargs: Any,
 51 |     ) -> T:
 52 |         raise_if_kwargs(kwargs)
 53 |         x, restore_type = ep.astensor_(inputs)
 54 |         del inputs, kwargs
 55 | 
 56 |         verify_input_bounds(x, model)
 57 | 
 58 |         criterion = get_criterion(criterion)
 59 |         is_adversarial = get_is_adversarial(criterion, model)
 60 | 
 61 |         min_, max_ = model.bounds
 62 |         target = min_ + self.target * (max_ - min_)
 63 |         direction = target - x
 64 | 
 65 |         lower_bound = ep.zeros(x, len(x))
 66 |         upper_bound = ep.ones(x, len(x))
 67 |         epsilons = lower_bound
 68 |         for _ in range(self.binary_search_steps):
 69 |             eps = atleast_kd(epsilons, x.ndim)
 70 |             is_adv = is_adversarial(x + eps * direction)
 71 |             lower_bound = ep.where(is_adv, lower_bound, epsilons)
 72 |             upper_bound = ep.where(is_adv, epsilons, upper_bound)
 73 |             epsilons = (lower_bound + upper_bound) / 2
 74 | 
 75 |         epsilons = upper_bound
 76 |         eps = atleast_kd(epsilons, x.ndim)
 77 |         xp = x + eps * direction
 78 |         return restore_type(xp)
 79 | 
 80 | 
 81 | class LinearSearchContrastReductionAttack(FlexibleDistanceMinimizationAttack):
 82 |     """Reduces the contrast of the input using a linear search to find the
 83 |     smallest adversarial perturbation"""
 84 | 
 85 |     def __init__(
 86 |         self,
 87 |         *,
 88 |         distance: Optional[Distance] = None,
 89 |         steps: int = 1000,
 90 |         target: float = 0.5,
 91 |     ):
 92 |         super().__init__(distance=distance)
 93 |         self.steps = steps
 94 |         self.target = target
 95 | 
 96 |     def run(
 97 |         self,
 98 |         model: Model,
 99 |         inputs: T,
100 |         criterion: Union[Criterion, T],
101 |         *,
102 |         early_stop: Optional[float] = None,
103 |         **kwargs: Any,
104 |     ) -> T:
105 |         raise_if_kwargs(kwargs)
106 |         x, restore_type = ep.astensor_(inputs)
107 |         del inputs, kwargs
108 | 
109 |         verify_input_bounds(x, model)
110 | 
111 |         criterion = get_criterion(criterion)
112 |         is_adversarial = get_is_adversarial(criterion, model)
113 | 
114 |         min_, max_ = model.bounds
115 |         target = min_ + self.target * (max_ - min_)
116 |         direction = target - x
117 | 
118 |         best = ep.ones(x, len(x))
119 | 
120 |         epsilon = 0.0
121 |         stepsize = 1.0 / self.steps
122 |         for _ in range(self.steps):
123 |             # TODO: reduce the batch size to the ones that have not yet been sucessful
124 | 
125 |             is_adv = is_adversarial(x + epsilon * direction)
126 |             is_best_adv = ep.logical_and(is_adv, best == 1)
127 |             best = ep.where(is_best_adv, epsilon, best)
128 | 
129 |             if (best < 1).all():
130 |                 break  # pragma: no cover
131 | 
132 |             epsilon += stepsize
133 | 
134 |         eps = atleast_kd(best, x.ndim)
135 |         xp = x + eps * direction
136 |         return restore_type(xp)
137 | 


--------------------------------------------------------------------------------
/foolbox/attacks/dataset_attack.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Optional, Any, List
  2 | import numpy as np
  3 | import eagerpy as ep
  4 | 
  5 | from ..devutils import atleast_kd
  6 | 
  7 | from ..models import Model
  8 | 
  9 | from ..distances import Distance
 10 | 
 11 | from ..criteria import Criterion
 12 | 
 13 | from .base import FlexibleDistanceMinimizationAttack
 14 | from .base import T
 15 | from .base import get_criterion
 16 | from .base import raise_if_kwargs
 17 | from .base import verify_input_bounds
 18 | 
 19 | 
 20 | class DatasetAttack(FlexibleDistanceMinimizationAttack):
 21 |     """Draws randomly from the given dataset until adversarial examples for all
 22 |     inputs have been found.
 23 | 
 24 |     To pass data form the dataset to this attack, call :meth:`feed()`.
 25 |     :meth:`feed()` can be called several times and should only be called with
 26 |     batches that are small enough that they can be passed through the model.
 27 | 
 28 |     Args:
 29 |         distance : Distance measure for which minimal adversarial examples are searched.
 30 |     """
 31 | 
 32 |     def __init__(self, *, distance: Optional[Distance] = None):
 33 |         super().__init__(distance=distance)
 34 |         self.raw_inputs: List[ep.Tensor] = []
 35 |         self.raw_outputs: List[ep.Tensor] = []
 36 |         self.inputs: Optional[ep.Tensor] = None
 37 |         self.outputs: Optional[ep.Tensor] = None
 38 | 
 39 |     def feed(self, model: Model, inputs: Any) -> None:
 40 |         x = ep.astensor(inputs)
 41 |         del inputs
 42 | 
 43 |         self.raw_inputs.append(x)
 44 |         self.raw_outputs.append(model(x))
 45 | 
 46 |     def process_raw(self) -> None:
 47 |         raw_inputs = self.raw_inputs
 48 |         raw_outputs = self.raw_outputs
 49 |         assert len(raw_inputs) == len(raw_outputs)
 50 |         assert (self.inputs is None) == (self.outputs is None)
 51 | 
 52 |         if self.inputs is None:
 53 |             if len(raw_inputs) == 0:
 54 |                 raise ValueError(
 55 |                     "DatasetAttack can only be called after data has been provided using 'feed()'"
 56 |                 )
 57 |         elif self.inputs is not None:
 58 |             assert self.outputs is not None
 59 |             raw_inputs = [self.inputs] + raw_inputs
 60 |             raw_outputs = [self.outputs] + raw_outputs
 61 | 
 62 |         self.inputs = ep.concatenate(raw_inputs, axis=0)
 63 |         self.outputs = ep.concatenate(raw_outputs, axis=0)
 64 |         self.raw_inputs = []
 65 |         self.raw_outputs = []
 66 | 
 67 |     def run(
 68 |         self,
 69 |         model: Model,
 70 |         inputs: T,
 71 |         criterion: Union[Criterion, T],
 72 |         *,
 73 |         early_stop: Optional[float] = None,
 74 |         **kwargs: Any,
 75 |     ) -> T:
 76 |         raise_if_kwargs(kwargs)
 77 |         self.process_raw()
 78 |         assert self.inputs is not None
 79 |         assert self.outputs is not None
 80 |         x, restore_type = ep.astensor_(inputs)
 81 |         del inputs, kwargs
 82 | 
 83 |         verify_input_bounds(x, model)
 84 | 
 85 |         criterion = get_criterion(criterion)
 86 | 
 87 |         result = x
 88 |         found = criterion(x, model(x))
 89 | 
 90 |         batch_size = len(x)
 91 | 
 92 |         # for every sample try every other sample
 93 |         index_pools: List[List[int]] = []
 94 |         for i in range(batch_size):
 95 |             indices = list(range(batch_size))
 96 |             indices.remove(i)
 97 |             np.random.shuffle(indices)
 98 |             index_pools.append(indices)
 99 | 
100 |         for i in range(batch_size - 1):
101 |             if found.all():
102 |                 break
103 | 
104 |             indices_np = np.array([pool[i] for pool in index_pools])
105 | 
106 |             xp = self.inputs[indices_np]
107 |             yp = self.outputs[indices_np]
108 |             is_adv = criterion(xp, yp)
109 | 
110 |             new_found = ep.logical_and(is_adv, found.logical_not())
111 |             result = ep.where(atleast_kd(new_found, result.ndim), xp, result)
112 |             found = ep.logical_or(found, new_found)
113 | 
114 |         return restore_type(result)
115 | 


--------------------------------------------------------------------------------
/foolbox/attacks/fast_gradient_method.py:
--------------------------------------------------------------------------------
  1 | from .gradient_descent_base import L1BaseGradientDescent
  2 | from .gradient_descent_base import L2BaseGradientDescent
  3 | from .gradient_descent_base import LinfBaseGradientDescent
  4 | from ..models.base import Model
  5 | from ..criteria import Misclassification, TargetedMisclassification
  6 | from .base import T
  7 | from typing import Union, Any
  8 | 
  9 | 
 10 | class L1FastGradientAttack(L1BaseGradientDescent):
 11 |     """Fast Gradient Method (FGM) using the L1 norm
 12 | 
 13 |     Args:
 14 |         random_start : Controls whether to randomly start within allowed epsilon ball.
 15 |     """
 16 | 
 17 |     def __init__(self, *, random_start: bool = False):
 18 |         super().__init__(
 19 |             rel_stepsize=1.0,
 20 |             steps=1,
 21 |             random_start=random_start,
 22 |         )
 23 | 
 24 |     def run(
 25 |         self,
 26 |         model: Model,
 27 |         inputs: T,
 28 |         criterion: Union[Misclassification, TargetedMisclassification, T],
 29 |         *,
 30 |         epsilon: float,
 31 |         **kwargs: Any,
 32 |     ) -> T:
 33 |         if hasattr(criterion, "target_classes"):
 34 |             raise ValueError("unsupported criterion")
 35 | 
 36 |         return super().run(
 37 |             model=model, inputs=inputs, criterion=criterion, epsilon=epsilon, **kwargs
 38 |         )
 39 | 
 40 | 
 41 | class L2FastGradientAttack(L2BaseGradientDescent):
 42 |     """Fast Gradient Method (FGM)
 43 | 
 44 |     Args:
 45 |         random_start : Controls whether to randomly start within allowed epsilon ball.
 46 |     """
 47 | 
 48 |     def __init__(self, *, random_start: bool = False):
 49 |         super().__init__(
 50 |             rel_stepsize=1.0,
 51 |             steps=1,
 52 |             random_start=random_start,
 53 |         )
 54 | 
 55 |     def run(
 56 |         self,
 57 |         model: Model,
 58 |         inputs: T,
 59 |         criterion: Union[Misclassification, TargetedMisclassification, T],
 60 |         *,
 61 |         epsilon: float,
 62 |         **kwargs: Any,
 63 |     ) -> T:
 64 |         if hasattr(criterion, "target_classes"):
 65 |             raise ValueError("unsupported criterion")
 66 | 
 67 |         return super().run(
 68 |             model=model, inputs=inputs, criterion=criterion, epsilon=epsilon, **kwargs
 69 |         )
 70 | 
 71 | 
 72 | class LinfFastGradientAttack(LinfBaseGradientDescent):
 73 |     """Fast Gradient Sign Method (FGSM)
 74 | 
 75 |     Args:
 76 |         random_start : Controls whether to randomly start within allowed epsilon ball.
 77 |     """
 78 | 
 79 |     def __init__(self, *, random_start: bool = False):
 80 |         super().__init__(
 81 |             rel_stepsize=1.0,
 82 |             steps=1,
 83 |             random_start=random_start,
 84 |         )
 85 | 
 86 |     def run(
 87 |         self,
 88 |         model: Model,
 89 |         inputs: T,
 90 |         criterion: Union[Misclassification, TargetedMisclassification, T],
 91 |         *,
 92 |         epsilon: float,
 93 |         **kwargs: Any,
 94 |     ) -> T:
 95 |         if hasattr(criterion, "target_classes"):
 96 |             raise ValueError("unsupported criterion")
 97 | 
 98 |         return super().run(
 99 |             model=model, inputs=inputs, criterion=criterion, epsilon=epsilon, **kwargs
100 |         )
101 | 


--------------------------------------------------------------------------------
/foolbox/attacks/inversion.py:
--------------------------------------------------------------------------------
 1 | from typing import Union, Any, Optional
 2 | import eagerpy as ep
 3 | 
 4 | from ..criteria import Criterion
 5 | 
 6 | from ..models import Model
 7 | 
 8 | from .base import FlexibleDistanceMinimizationAttack
 9 | from .base import T
10 | from .base import raise_if_kwargs
11 | from .base import verify_input_bounds
12 | 
13 | 
14 | class InversionAttack(FlexibleDistanceMinimizationAttack):
15 |     """Creates "negative images" by inverting the pixel values. [#Hos16]_
16 | 
17 |     References:
18 |         .. [#Hos16] Hossein Hosseini, Baicen Xiao, Mayoore Jaiswal, Radha Poovendran,
19 |                "On the Limitation of Convolutional Neural Networks in Recognizing
20 |                Negative Images",
21 |                https://arxiv.org/abs/1607.02533
22 |     """
23 | 
24 |     def run(
25 |         self,
26 |         model: Model,
27 |         inputs: T,
28 |         criterion: Union[Criterion, Any] = None,
29 |         *,
30 |         early_stop: Optional[float] = None,
31 |         **kwargs: Any,
32 |     ) -> T:
33 |         raise_if_kwargs(kwargs)
34 |         x, restore_type = ep.astensor_(inputs)
35 |         del inputs, criterion, kwargs
36 | 
37 |         verify_input_bounds(x, model)
38 | 
39 |         min_, max_ = model.bounds
40 |         x = min_ + max_ - x
41 |         return restore_type(x)
42 | 


--------------------------------------------------------------------------------
/foolbox/attacks/newtonfool.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Tuple, Any, Optional
  2 | import eagerpy as ep
  3 | 
  4 | from ..models import Model
  5 | 
  6 | from ..criteria import Misclassification
  7 | 
  8 | from ..distances import l2
  9 | 
 10 | from ..devutils import atleast_kd, flatten
 11 | 
 12 | from .base import MinimizationAttack
 13 | from .base import get_criterion
 14 | from .base import T
 15 | from .base import raise_if_kwargs
 16 | from .base import verify_input_bounds
 17 | 
 18 | 
 19 | class NewtonFoolAttack(MinimizationAttack):
 20 |     """Implementation of the NewtonFool Attack. [#Jang17]_
 21 | 
 22 |     Args:
 23 |         steps : Number of update steps to perform.
 24 |         step_size : Size of each update step.
 25 | 
 26 |     References:
 27 |         .. [#Jang17] Uyeong Jang et al., "Objective Metrics and Gradient Descent
 28 |             Algorithms for Adversarial Examples in Machine Learning",
 29 |             https://dl.acm.org/citation.cfm?id=3134635
 30 |     """
 31 | 
 32 |     distance = l2
 33 | 
 34 |     def __init__(self, steps: int = 100, stepsize: float = 0.01):
 35 |         self.steps = steps
 36 |         self.stepsize = stepsize
 37 | 
 38 |     def run(
 39 |         self,
 40 |         model: Model,
 41 |         inputs: T,
 42 |         criterion: Union[Misclassification, T],
 43 |         *,
 44 |         early_stop: Optional[float] = None,
 45 |         **kwargs: Any,
 46 |     ) -> T:
 47 |         raise_if_kwargs(kwargs)
 48 |         x, restore_type = ep.astensor_(inputs)
 49 |         criterion_ = get_criterion(criterion)
 50 |         del inputs, criterion, kwargs
 51 | 
 52 |         verify_input_bounds(x, model)
 53 | 
 54 |         N = len(x)
 55 | 
 56 |         if isinstance(criterion_, Misclassification):
 57 |             classes = criterion_.labels
 58 |         else:
 59 |             raise ValueError("unsupported criterion")
 60 | 
 61 |         if classes.shape != (N,):
 62 |             raise ValueError(
 63 |                 f"expected labels to have shape ({N},), got {classes.shape}"
 64 |             )
 65 | 
 66 |         min_, max_ = model.bounds
 67 | 
 68 |         x_l2_norm = flatten(x.square()).sum(1)
 69 | 
 70 |         def loss_fun(x: ep.Tensor) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
 71 |             logits = model(x)
 72 |             scores = ep.softmax(logits)
 73 |             pred_scores = scores[range(N), classes]
 74 |             loss = pred_scores.sum()
 75 |             return loss, (scores, pred_scores)
 76 | 
 77 |         for i in range(self.steps):
 78 |             # (1) get the scores and gradients
 79 |             _, (scores, pred_scores), gradients = ep.value_aux_and_grad(loss_fun, x)
 80 | 
 81 |             pred = scores.argmax(-1)
 82 |             num_classes = scores.shape[-1]
 83 | 
 84 |             # (2) calculate gradient norm
 85 |             gradients_l2_norm = flatten(gradients.square()).sum(1)
 86 | 
 87 |             # (3) calculate delta
 88 |             a = self.stepsize * x_l2_norm * gradients_l2_norm
 89 |             b = pred_scores - 1.0 / num_classes
 90 | 
 91 |             delta = ep.minimum(a, b)
 92 | 
 93 |             # (4) stop the attack if an adversarial example has been found
 94 |             # this is not described in the paper but otherwise once the prob. drops
 95 |             # below chance level the likelihood is not decreased but increased
 96 |             is_not_adversarial = (pred == classes).float32()
 97 |             delta *= is_not_adversarial
 98 | 
 99 |             # (5) calculate & apply current perturbation
100 |             a = atleast_kd(delta / gradients_l2_norm.square(), gradients.ndim)
101 |             x -= a * gradients
102 | 
103 |             x = ep.clip(x, min_, max_)
104 | 
105 |         return restore_type(x)
106 | 


--------------------------------------------------------------------------------
/foolbox/attacks/saltandpepper.py:
--------------------------------------------------------------------------------
  1 | from typing import Optional, Any, Union
  2 | import eagerpy as ep
  3 | 
  4 | from ..criteria import Misclassification
  5 | 
  6 | from ..distances import l2
  7 | 
  8 | from ..devutils import flatten
  9 | from ..devutils import atleast_kd
 10 | 
 11 | from .base import MinimizationAttack
 12 | from .base import get_is_adversarial
 13 | from .base import get_channel_axis
 14 | 
 15 | from ..models.base import Model
 16 | from .base import get_criterion
 17 | from .base import T
 18 | from .base import raise_if_kwargs
 19 | from .base import verify_input_bounds
 20 | 
 21 | 
 22 | class SaltAndPepperNoiseAttack(MinimizationAttack):
 23 |     """Increases the amount of salt and pepper noise until the input is misclassified.
 24 | 
 25 |     Args:
 26 |         steps : The number of steps to run.
 27 |         across_channels : Whether the noise should be the same across all channels.
 28 |         channel_axis : The axis across which the noise should be the same
 29 |             (if across_channels is True). If None, will be automatically inferred
 30 |             from the model if possible.
 31 |     """
 32 | 
 33 |     distance = l2
 34 | 
 35 |     def __init__(
 36 |         self,
 37 |         steps: int = 1000,
 38 |         across_channels: bool = True,
 39 |         channel_axis: Optional[int] = None,
 40 |     ):
 41 |         self.steps = steps
 42 |         self.across_channels = across_channels
 43 |         self.channel_axis = channel_axis
 44 | 
 45 |     def run(
 46 |         self,
 47 |         model: Model,
 48 |         inputs: T,
 49 |         criterion: Union[Misclassification, Any],
 50 |         *,
 51 |         early_stop: Optional[float] = None,
 52 |         **kwargs: Any,
 53 |     ) -> T:
 54 |         raise_if_kwargs(kwargs)
 55 |         x0, restore_type = ep.astensor_(inputs)
 56 |         criterion_ = get_criterion(criterion)
 57 |         del inputs, criterion, kwargs
 58 | 
 59 |         verify_input_bounds(x0, model)
 60 | 
 61 |         is_adversarial = get_is_adversarial(criterion_, model)
 62 | 
 63 |         N = len(x0)
 64 |         shape = list(x0.shape)
 65 | 
 66 |         if self.across_channels and x0.ndim > 2:
 67 |             if self.channel_axis is None:
 68 |                 channel_axis = get_channel_axis(model, x0.ndim)
 69 |             else:
 70 |                 channel_axis = self.channel_axis % x0.ndim
 71 |             if channel_axis is not None:
 72 |                 shape[channel_axis] = 1
 73 | 
 74 |         min_, max_ = model.bounds
 75 |         r = max_ - min_
 76 | 
 77 |         result = x0
 78 |         is_adv = is_adversarial(result)
 79 |         best_advs_norms = ep.where(is_adv, ep.zeros(x0, N), ep.full(x0, N, ep.inf))
 80 |         min_probability = ep.zeros(x0, N)
 81 |         max_probability = ep.ones(x0, N)
 82 |         stepsizes = max_probability / self.steps
 83 |         p = stepsizes
 84 | 
 85 |         for step in range(self.steps):
 86 |             # add salt and pepper
 87 |             u = ep.uniform(x0, tuple(shape))
 88 |             p_ = atleast_kd(p, x0.ndim)
 89 |             salt = (u >= 1 - p_ / 2).astype(x0.dtype) * r
 90 |             pepper = -(u < p_ / 2).astype(x0.dtype) * r
 91 |             x = x0 + salt + pepper
 92 |             x = ep.clip(x, min_, max_)
 93 | 
 94 |             # check if we found new best adversarials
 95 |             norms = flatten(x - x0).norms.l2(axis=-1)
 96 |             closer = norms < best_advs_norms
 97 |             is_adv = is_adversarial(x)  # TODO: ignore those that are not closer anyway
 98 |             is_best_adv = ep.logical_and(is_adv, closer)
 99 | 
100 |             # update results and search space
101 |             result = ep.where(atleast_kd(is_best_adv, x.ndim), x, result)
102 |             best_advs_norms = ep.where(is_best_adv, norms, best_advs_norms)
103 |             min_probability = ep.where(is_best_adv, 0.5 * p, min_probability)
104 |             # we set max_probability a bit higher than p because the relationship
105 |             # between p and norms is not strictly monotonic
106 |             max_probability = ep.where(
107 |                 is_best_adv, ep.minimum(p * 1.2, 1.0), max_probability
108 |             )
109 |             remaining = self.steps - step
110 |             stepsizes = ep.where(
111 |                 is_best_adv, (max_probability - min_probability) / remaining, stepsizes
112 |             )
113 |             reset = p == max_probability
114 |             p = ep.where(ep.logical_or(is_best_adv, reset), min_probability, p)
115 |             p = ep.minimum(p + stepsizes, max_probability)
116 | 
117 |         return restore_type(result)
118 | 


--------------------------------------------------------------------------------
/foolbox/attacks/sparse_l1_descent_attack.py:
--------------------------------------------------------------------------------
 1 | from typing import Optional
 2 | import eagerpy as ep
 3 | import numpy as np
 4 | 
 5 | from ..devutils import flatten
 6 | from ..devutils import atleast_kd
 7 | 
 8 | from ..types import Bounds
 9 | 
10 | from .gradient_descent_base import L1BaseGradientDescent
11 | from .gradient_descent_base import normalize_lp_norms
12 | 
13 | 
14 | class SparseL1DescentAttack(L1BaseGradientDescent):
15 |     """Sparse L1 Descent Attack [#Tra19]_.
16 | 
17 |     Args:
18 |         rel_stepsize: Stepsize relative to epsilon.
19 |         abs_stepsize: If given, it takes precedence over rel_stepsize.
20 |         steps : Number of update steps.
21 |         random_start : Controls whether to randomly start within allowed epsilon ball.
22 | 
23 |     References:
24 |         .. [#Tra19] Florian Tramèr, Dan Boneh, "Adversarial Training and
25 |         Robustness for Multiple Perturbations"
26 |         https://arxiv.org/abs/1904.13000
27 |     """
28 | 
29 |     def normalize(
30 |         self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
31 |     ) -> ep.Tensor:
32 |         bad_pos = ep.logical_or(
33 |             ep.logical_and(x == bounds.lower, gradients < 0),
34 |             ep.logical_and(x == bounds.upper, gradients > 0),
35 |         )
36 |         gradients = ep.where(bad_pos, ep.zeros_like(gradients), gradients)
37 | 
38 |         abs_gradients = gradients.abs()
39 |         quantiles = np.quantile(
40 |             flatten(abs_gradients).numpy(), q=self.quantile, axis=-1
41 |         )
42 |         keep = abs_gradients >= atleast_kd(
43 |             ep.from_numpy(gradients, quantiles), gradients.ndim
44 |         )
45 |         e = ep.where(keep, gradients.sign(), ep.zeros_like(gradients))
46 |         return normalize_lp_norms(e, p=1)
47 | 
48 |     def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
49 |         # based on https://github.com/ftramer/MultiRobustness/blob/ad41b63235d13b1b2a177c5f270ab9afa74eee69/pgd_attack.py#L110
50 |         delta = flatten(x - x0)
51 |         norms = delta.norms.l1(axis=-1)
52 |         if (norms <= epsilon).all():
53 |             return x
54 | 
55 |         n, d = delta.shape
56 |         abs_delta = abs(delta)
57 |         mu = -ep.sort(-abs_delta, axis=-1)
58 |         cumsums = mu.cumsum(axis=-1)
59 |         js = 1.0 / ep.arange(x, 1, d + 1).astype(x.dtype)
60 |         temp = mu - js * (cumsums - epsilon)
61 |         guarantee_first = ep.arange(x, d).astype(x.dtype) / d
62 |         # guarantee_first are small values (< 1) that we add to the boolean
63 |         # tensor (only 0 and 1) to break the ties and always return the first
64 |         # argmin, i.e. the first value where the boolean tensor is 0
65 |         # (otherwise, this is not guaranteed on GPUs, see e.g. PyTorch)
66 |         rho = ep.argmin((temp > 0).astype(x.dtype) + guarantee_first, axis=-1)
67 |         theta = 1.0 / (1 + rho.astype(x.dtype)) * (cumsums[range(n), rho] - epsilon)
68 |         delta = delta.sign() * ep.maximum(abs_delta - theta[..., ep.newaxis], 0)
69 |         delta = delta.reshape(x.shape)
70 |         return x0 + delta
71 | 
72 |     def __init__(
73 |         self,
74 |         *,
75 |         quantile: float = 0.99,
76 |         rel_stepsize: float = 0.2,
77 |         abs_stepsize: Optional[float] = None,
78 |         steps: int = 10,
79 |         random_start: bool = False,
80 |     ):
81 |         super().__init__(
82 |             rel_stepsize=rel_stepsize,
83 |             abs_stepsize=abs_stepsize,
84 |             steps=steps,
85 |             random_start=random_start,
86 |         )
87 |         if not 0 <= quantile <= 1:
88 |             raise ValueError(f"quantile needs to be between 0 and 1, got {quantile}")
89 |         self.quantile = quantile
90 | 


--------------------------------------------------------------------------------
/foolbox/attacks/virtual_adversarial_attack.py:
--------------------------------------------------------------------------------
  1 | from typing import Union, Any
  2 | import eagerpy as ep
  3 | 
  4 | from ..models import Model
  5 | 
  6 | from ..criteria import Misclassification
  7 | 
  8 | from ..distances import l2
  9 | 
 10 | from ..devutils import flatten, atleast_kd
 11 | 
 12 | from .base import FixedEpsilonAttack
 13 | from .base import get_criterion
 14 | from .base import T
 15 | from .base import raise_if_kwargs
 16 | from .base import verify_input_bounds
 17 | 
 18 | 
 19 | class VirtualAdversarialAttack(FixedEpsilonAttack):
 20 |     """Second-order gradient-based attack on the logits. [Miy15]_
 21 |     The attack calculate an untargeted adversarial perturbation by performing a
 22 |     approximated second order optimization step on the KL divergence between
 23 |     the unperturbed predictions and the predictions for the adversarial
 24 |     perturbation. This attack was originally introduced as the
 25 |     Virtual Adversarial Training [Miy15]_ method.
 26 | 
 27 |     Args:
 28 |         steps : Number of update steps.
 29 |         xi : L2 distance between original image and first adversarial proposal.
 30 | 
 31 | 
 32 |     References:
 33 |         .. [Miy15] Takeru Miyato, Shin-ichi Maeda, Masanori Koyama, Ken Nakae,
 34 |             Shin Ishii, "Distributional Smoothing with Virtual Adversarial Training",
 35 |             https://arxiv.org/abs/1507.00677
 36 |     """
 37 | 
 38 |     distance = l2
 39 | 
 40 |     def __init__(self, steps: int, xi: float = 1e-6):
 41 |         self.steps = steps
 42 |         self.xi = xi
 43 | 
 44 |     def run(
 45 |         self,
 46 |         model: Model,
 47 |         inputs: T,
 48 |         criterion: Union[Misclassification, T],
 49 |         *,
 50 |         epsilon: float,
 51 |         **kwargs: Any,
 52 |     ) -> T:
 53 |         raise_if_kwargs(kwargs)
 54 |         x, restore_type = ep.astensor_(inputs)
 55 |         criterion_ = get_criterion(criterion)
 56 |         del inputs, criterion, kwargs
 57 | 
 58 |         verify_input_bounds(x, model)
 59 | 
 60 |         N = len(x)
 61 | 
 62 |         if isinstance(criterion_, Misclassification):
 63 |             classes = criterion_.labels
 64 |         else:
 65 |             raise ValueError("unsupported criterion")
 66 | 
 67 |         if classes.shape != (N,):
 68 |             raise ValueError(
 69 |                 f"expected labels to have shape ({N},), got {classes.shape}"
 70 |             )
 71 | 
 72 |         bounds = model.bounds
 73 | 
 74 |         def loss_fun(delta: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
 75 |             assert x.shape[0] == logits.shape[0]
 76 |             assert delta.shape == x.shape
 77 | 
 78 |             x_hat = x + delta
 79 |             logits_hat = model(x_hat)
 80 |             loss = ep.kl_div_with_logits(logits, logits_hat).sum()
 81 | 
 82 |             return loss
 83 | 
 84 |         value_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=False)
 85 | 
 86 |         clean_logits = model(x)
 87 | 
 88 |         # start with random vector as search vector
 89 |         d = ep.normal(x, shape=x.shape, mean=0, stddev=1)
 90 |         for it in range(self.steps):
 91 |             # normalize proposal to be unit vector
 92 |             d = d * self.xi / atleast_kd(ep.norms.l2(flatten(d), axis=-1), x.ndim)
 93 | 
 94 |             # use gradient of KL divergence as new search vector
 95 |             _, grad = value_and_grad(d, clean_logits)
 96 |             d = grad
 97 | 
 98 |             # rescale search vector
 99 |             d = (bounds[1] - bounds[0]) * d
100 | 
101 |             if ep.any(ep.norms.l2(flatten(d), axis=-1) < 1e-64):
102 |                 raise RuntimeError(  # pragma: no cover
103 |                     "Gradient vanished; this can happen if xi is too small."
104 |                 )
105 | 
106 |         final_delta = epsilon / atleast_kd(ep.norms.l2(flatten(d), axis=-1), d.ndim) * d
107 |         x_adv = ep.clip(x + final_delta, *bounds)
108 |         return restore_type(x_adv)
109 | 


--------------------------------------------------------------------------------
/foolbox/data/cifar100_00_49.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_00_49.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_01_33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_01_33.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_02_72.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_02_72.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_03_51.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_03_51.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_04_71.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_04_71.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_05_92.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_05_92.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_06_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_06_15.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_07_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_07_14.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_08_23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_08_23.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_09_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_09_0.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_10_71.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_10_71.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_11_75.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_11_75.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_12_81.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_12_81.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_13_69.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_13_69.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_14_40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_14_40.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_15_43.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_15_43.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_16_92.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_16_92.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_17_97.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_17_97.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_18_70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_18_70.png


--------------------------------------------------------------------------------
/foolbox/data/cifar100_19_53.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar100_19_53.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_00_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_00_3.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_01_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_01_8.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_02_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_02_8.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_03_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_03_0.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_04_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_04_6.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_05_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_05_6.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_06_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_06_1.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_07_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_07_6.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_08_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_08_3.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_09_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_09_1.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_10_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_10_0.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_11_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_11_9.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_12_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_12_5.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_13_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_13_7.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_14_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_14_9.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_15_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_15_8.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_16_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_16_5.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_17_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_17_7.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_18_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_18_8.png


--------------------------------------------------------------------------------
/foolbox/data/cifar10_19_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/cifar10_19_6.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_00_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_00_9.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_01_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_01_2.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_02_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_02_1.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_03_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_03_1.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_04_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_04_6.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_05_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_05_1.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_06_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_06_4.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_07_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_07_6.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_08_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_08_5.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_09_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_09_7.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_10_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_10_4.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_11_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_11_5.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_12_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_12_7.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_13_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_13_3.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_14_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_14_4.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_15_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_15_1.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_16_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_16_2.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_17_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_17_4.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_18_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_18_8.png


--------------------------------------------------------------------------------
/foolbox/data/fashionMNIST_19_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/fashionMNIST_19_0.png


--------------------------------------------------------------------------------
/foolbox/data/imagenet_00_243.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_00_243.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_01_559.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_01_559.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_02_438.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_02_438.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_03_990.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_03_990.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_04_949.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_04_949.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_05_853.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_05_853.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_06_609.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_06_609.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_07_609.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_07_609.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_08_915.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_08_915.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_09_455.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_09_455.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_10_541.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_10_541.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_11_630.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_11_630.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_12_741.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_12_741.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_13_471.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_13_471.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_14_129.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_14_129.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_15_99.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_15_99.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_16_251.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_16_251.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_17_22.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_17_22.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_18_317.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_18_317.jpg


--------------------------------------------------------------------------------
/foolbox/data/imagenet_19_305.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/imagenet_19_305.jpg


--------------------------------------------------------------------------------
/foolbox/data/mnist_00_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_00_7.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_01_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_01_2.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_02_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_02_1.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_03_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_03_0.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_04_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_04_4.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_05_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_05_1.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_06_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_06_4.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_07_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_07_9.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_08_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_08_5.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_09_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_09_9.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_10_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_10_0.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_11_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_11_6.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_12_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_12_9.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_13_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_13_0.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_14_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_14_1.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_15_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_15_5.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_16_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_16_9.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_17_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_17_7.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_18_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_18_3.png


--------------------------------------------------------------------------------
/foolbox/data/mnist_19_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/data/mnist_19_4.png


--------------------------------------------------------------------------------
/foolbox/devutils.py:
--------------------------------------------------------------------------------
 1 | """Internal module with utility functions"""
 2 | import eagerpy as ep
 3 | 
 4 | 
 5 | def flatten(x: ep.Tensor, keep: int = 1) -> ep.Tensor:
 6 |     return x.flatten(start=keep)
 7 | 
 8 | 
 9 | def atleast_kd(x: ep.Tensor, k: int) -> ep.Tensor:
10 |     shape = x.shape + (1,) * (k - x.ndim)
11 |     return x.reshape(shape)
12 | 


--------------------------------------------------------------------------------
/foolbox/distances.py:
--------------------------------------------------------------------------------
 1 | from abc import ABC, abstractmethod
 2 | from typing import TypeVar
 3 | import eagerpy as ep
 4 | 
 5 | from .devutils import flatten
 6 | from .devutils import atleast_kd
 7 | 
 8 | 
 9 | T = TypeVar("T")
10 | 
11 | 
12 | class Distance(ABC):
13 |     @abstractmethod
14 |     def __call__(self, reference: T, perturbed: T) -> T:
15 |         ...
16 | 
17 |     @abstractmethod
18 |     def clip_perturbation(self, references: T, perturbed: T, epsilon: float) -> T:
19 |         ...
20 | 
21 | 
22 | class LpDistance(Distance):
23 |     def __init__(self, p: float):
24 |         self.p = p
25 | 
26 |     def __repr__(self) -> str:
27 |         return f"LpDistance({self.p})"
28 | 
29 |     def __str__(self) -> str:
30 |         return f"L{self.p} distance"
31 | 
32 |     def __call__(self, references: T, perturbed: T) -> T:
33 |         """Calculates the distances from references to perturbed using the Lp norm.
34 | 
35 |         Args:
36 |             references: A batch of reference inputs.
37 |             perturbed: A batch of perturbed inputs.
38 | 
39 |         Returns:
40 |             A 1D tensor with the distances from references to perturbed.
41 |         """
42 |         (x, y), restore_type = ep.astensors_(references, perturbed)
43 |         norms = ep.norms.lp(flatten(y - x), self.p, axis=-1)
44 |         return restore_type(norms)
45 | 
46 |     def clip_perturbation(self, references: T, perturbed: T, epsilon: float) -> T:
47 |         """Clips the perturbations to epsilon and returns the new perturbed
48 | 
49 |         Args:
50 |             references: A batch of reference inputs.
51 |             perturbed: A batch of perturbed inputs.
52 | 
53 |         Returns:
54 |             A tenosr like perturbed but with the perturbation clipped to epsilon.
55 |         """
56 |         (x, y), restore_type = ep.astensors_(references, perturbed)
57 |         p = y - x
58 |         if self.p == ep.inf:
59 |             clipped_perturbation = ep.clip(p, -epsilon, epsilon)
60 |             return restore_type(x + clipped_perturbation)
61 |         norms = ep.norms.lp(flatten(p), self.p, axis=-1)
62 |         norms = ep.maximum(norms, 1e-12)  # avoid divsion by zero
63 |         factor = epsilon / norms
64 |         factor = ep.minimum(1, factor)  # clipping -> decreasing but not increasing
65 |         if self.p == 0:
66 |             if (factor == 1).all():
67 |                 return perturbed
68 |             raise NotImplementedError("reducing L0 norms not yet supported")
69 |         factor = atleast_kd(factor, x.ndim)
70 |         clipped_perturbation = factor * p
71 |         return restore_type(x + clipped_perturbation)
72 | 
73 | 
74 | l0 = LpDistance(0)
75 | l1 = LpDistance(1)
76 | l2 = LpDistance(2)
77 | linf = LpDistance(ep.inf)
78 | 


--------------------------------------------------------------------------------
/foolbox/external/LICENSE:
--------------------------------------------------------------------------------
1 | The code in this subfolder might be under a different license than the rest of the project.
2 | 


--------------------------------------------------------------------------------
/foolbox/external/README.rst:
--------------------------------------------------------------------------------
 1 | License
 2 | -------
 3 | 
 4 | The code in this subfolder might be under a different license than the rest of the project.
 5 | 
 6 | Sources
 7 | -------
 8 | 
 9 | * `clipping_aware_rescaling.py <https://github.com/jonasrauber/clipping-aware-rescaling>`_
10 | 


--------------------------------------------------------------------------------
/foolbox/external/__init__.py:
--------------------------------------------------------------------------------
1 | from . import clipping_aware_rescaling  # noqa: F401
2 | 


--------------------------------------------------------------------------------
/foolbox/external/clipping_aware_rescaling.py:
--------------------------------------------------------------------------------
 1 | # Copyright (c) 2020, Jonas Rauber
 2 | #
 3 | # Licensed under the BSD 3-Clause License
 4 | #
 5 | # Last changed:
 6 | # * 2020-07-15
 7 | # * 2020-01-08
 8 | # * 2019-04-18
 9 | 
10 | import eagerpy as ep
11 | 
12 | 
13 | def l2_clipping_aware_rescaling(x, delta, eps: float, a: float = 0.0, b: float = 1.0):  # type: ignore
14 |     """Calculates eta such that norm(clip(x + eta * delta, a, b) - x) == eps.
15 | 
16 |     Assumes x and delta have a batch dimension and eps, a, b, and p are
17 |     scalars. If the equation cannot be solved because eps is too large, the
18 |     left hand side is maximized.
19 | 
20 |     Args:
21 |         x: A batch of inputs (PyTorch Tensor, TensorFlow Eager Tensor, NumPy
22 |             Array, JAX Array, or EagerPy Tensor).
23 |         delta: A batch of perturbation directions (same shape and type as x).
24 |         eps: The target norm (non-negative float).
25 |         a: The lower bound of the data domain (float).
26 |         b: The upper bound of the data domain (float).
27 | 
28 |     Returns:
29 |         eta: A batch of scales with the same number of dimensions as x but all
30 |             axis == 1 except for the batch dimension.
31 |     """
32 |     (x, delta), restore_fn = ep.astensors_(x, delta)
33 |     N = x.shape[0]
34 |     assert delta.shape[0] == N
35 |     rows = ep.arange(x, N)
36 | 
37 |     delta2 = delta.square().reshape((N, -1))
38 |     space = ep.where(delta >= 0, b - x, x - a).reshape((N, -1))
39 |     f2 = space.square() / ep.maximum(delta2, 1e-20)
40 |     ks = ep.argsort(f2, axis=-1)
41 |     f2_sorted = f2[rows[:, ep.newaxis], ks]
42 |     m = ep.cumsum(delta2[rows[:, ep.newaxis], ks.flip(axis=1)], axis=-1).flip(axis=1)
43 |     dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
44 |     dx = ep.concatenate((f2_sorted[:, :1], dx), axis=-1)
45 |     dy = m * dx
46 |     y = ep.cumsum(dy, axis=-1)
47 |     c = y >= eps**2
48 | 
49 |     # work-around to get first nonzero element in each row
50 |     f = ep.arange(x, c.shape[-1], 0, -1)
51 |     j = ep.argmax(c.astype(f.dtype) * f, axis=-1)
52 | 
53 |     eta2 = f2_sorted[rows, j] - (y[rows, j] - eps**2) / m[rows, j]
54 |     # it can happen that for certain rows even the largest j is not large enough
55 |     # (i.e. c[:, -1] is False), then we will just use it (without any correction) as it's
56 |     # the best we can do (this should also be the only cases where m[j] can be
57 |     # 0 and they are thus not a problem)
58 |     eta2 = ep.where(c[:, -1], eta2, f2_sorted[:, -1])
59 |     eta = ep.sqrt(eta2)
60 |     eta = eta.reshape((-1,) + (1,) * (x.ndim - 1))
61 | 
62 |     # xp = ep.clip(x + eta * delta, a, b)
63 |     # l2 = (xp - x).reshape((N, -1)).square().sum(axis=-1).sqrt()
64 |     return restore_fn(eta)
65 | 


--------------------------------------------------------------------------------
/foolbox/gradient_estimators.py:
--------------------------------------------------------------------------------
 1 | from typing import Callable, Tuple, Type
 2 | import eagerpy as ep
 3 | from .types import BoundsInput, Bounds
 4 | from .attacks.base import Attack
 5 | 
 6 | 
 7 | def evolutionary_strategies_gradient_estimator(
 8 |     AttackCls: Type[Attack],
 9 |     *,
10 |     samples: int,
11 |     sigma: float,
12 |     bounds: BoundsInput,
13 |     clip: bool,
14 | ) -> Type[Attack]:
15 | 
16 |     if not hasattr(AttackCls, "value_and_grad"):
17 |         raise ValueError(
18 |             "This attack does not support gradient estimators."
19 |         )  # pragma: no cover
20 | 
21 |     bounds = Bounds(*bounds)
22 | 
23 |     class GradientEstimator(AttackCls):  # type: ignore
24 |         def value_and_grad(
25 |             self,
26 |             loss_fn: Callable[[ep.Tensor], ep.Tensor],
27 |             x: ep.Tensor,
28 |         ) -> Tuple[ep.Tensor, ep.Tensor]:
29 |             value = loss_fn(x)
30 | 
31 |             gradient = ep.zeros_like(x)
32 |             for k in range(samples // 2):
33 |                 noise = ep.normal(x, shape=x.shape)
34 | 
35 |                 pos_theta = x + sigma * noise
36 |                 neg_theta = x - sigma * noise
37 | 
38 |                 if clip:
39 |                     pos_theta = pos_theta.clip(*bounds)
40 |                     neg_theta = neg_theta.clip(*bounds)
41 | 
42 |                 pos_loss = loss_fn(pos_theta)
43 |                 neg_loss = loss_fn(neg_theta)
44 | 
45 |                 gradient += (pos_loss - neg_loss) * noise
46 | 
47 |             gradient /= 2 * sigma * 2 * samples
48 | 
49 |             return value, gradient
50 | 
51 |     GradientEstimator.__name__ = AttackCls.__name__ + "WithESGradientEstimator"
52 |     GradientEstimator.__qualname__ = AttackCls.__qualname__ + "WithESGradientEstimator"
53 |     return GradientEstimator
54 | 
55 | 
56 | es_gradient_estimator = evolutionary_strategies_gradient_estimator
57 | 


--------------------------------------------------------------------------------
/foolbox/models/__init__.py:
--------------------------------------------------------------------------------
 1 | from .base import Model  # noqa: F401
 2 | from .base import TransformBoundsWrapper  # noqa: F401
 3 | 
 4 | from .pytorch import PyTorchModel  # noqa: F401
 5 | from .tensorflow import TensorFlowModel  # noqa: F401
 6 | from .jax import JAXModel  # noqa: F401
 7 | from .numpy import NumPyModel  # noqa: F401
 8 | 
 9 | from .wrappers import ThresholdingWrapper  # noqa: F401
10 | from .wrappers import ExpectationOverTransformationWrapper  # noqa: F401
11 | 


--------------------------------------------------------------------------------
/foolbox/models/jax.py:
--------------------------------------------------------------------------------
 1 | from typing import Any, Optional
 2 | import eagerpy as ep
 3 | 
 4 | from ..types import BoundsInput, Preprocessing
 5 | 
 6 | from .base import ModelWithPreprocessing
 7 | 
 8 | 
 9 | class JAXModel(ModelWithPreprocessing):
10 |     def __init__(
11 |         self,
12 |         model: Any,
13 |         bounds: BoundsInput,
14 |         preprocessing: Preprocessing = None,
15 |         data_format: Optional[str] = "channels_last",
16 |     ):
17 |         dummy = ep.jax.numpy.zeros(0)
18 |         super().__init__(model, bounds=bounds, dummy=dummy, preprocessing=preprocessing)
19 |         self._data_format = data_format
20 | 
21 |     @property
22 |     def data_format(self) -> str:
23 |         if self._data_format is None:
24 |             raise AttributeError(  # AttributeError -> hasattr returns False
25 |                 "please specify data_format when initializing the JaxModel"
26 |             )
27 |         return self._data_format
28 | 


--------------------------------------------------------------------------------
/foolbox/models/numpy.py:
--------------------------------------------------------------------------------
 1 | from typing import TypeVar, Callable, Optional
 2 | import eagerpy as ep
 3 | 
 4 | from ..types import Bounds
 5 | from ..types import BoundsInput
 6 | 
 7 | from .base import Model
 8 | 
 9 | 
10 | T = TypeVar("T")
11 | 
12 | 
13 | class NumPyModel(Model):
14 |     def __init__(
15 |         self, model: Callable, bounds: BoundsInput, data_format: Optional[str] = None
16 |     ):
17 |         self._model = model
18 |         self._bounds = Bounds(*bounds)
19 |         if data_format is not None:
20 |             if data_format not in ["channels_first", "channels_last"]:
21 |                 raise ValueError(
22 |                     f"expected data_format to be 'channels_first' or 'channels_last', got {data_format}"
23 |                 )
24 |         self._data_format = data_format
25 | 
26 |     @property
27 |     def bounds(self) -> Bounds:
28 |         return self._bounds
29 | 
30 |     def __call__(self, inputs: T) -> T:
31 |         x, restore_type = ep.astensor_(inputs)
32 |         y = self._model(x.numpy())
33 |         z = ep.from_numpy(x, y)
34 |         return restore_type(z)
35 | 
36 |     @property
37 |     def data_format(self) -> str:
38 |         if self._data_format is None:
39 |             raise AttributeError(  # AttributeError -> hasattr returns False
40 |                 "please specify data_format when initializing the NumPyModel"
41 |             )
42 |         return self._data_format
43 | 


--------------------------------------------------------------------------------
/foolbox/models/pytorch.py:
--------------------------------------------------------------------------------
 1 | from typing import Any, cast
 2 | import warnings
 3 | import eagerpy as ep
 4 | 
 5 | from ..types import BoundsInput, Preprocessing
 6 | 
 7 | from .base import ModelWithPreprocessing
 8 | 
 9 | 
10 | def get_device(device: Any) -> Any:
11 |     import torch
12 | 
13 |     if device is None:
14 |         return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15 |     if isinstance(device, str):
16 |         return torch.device(device)
17 |     return device
18 | 
19 | 
20 | class PyTorchModel(ModelWithPreprocessing):
21 |     def __init__(
22 |         self,
23 |         model: Any,
24 |         bounds: BoundsInput,
25 |         device: Any = None,
26 |         preprocessing: Preprocessing = None,
27 |     ):
28 |         import torch
29 | 
30 |         if not isinstance(model, torch.nn.Module):
31 |             raise ValueError("expected model to be a torch.nn.Module instance")
32 | 
33 |         if model.training:
34 |             with warnings.catch_warnings():
35 |                 warnings.simplefilter("always")
36 |                 warnings.warn(
37 |                     "The PyTorch model is in training mode and therefore might"
38 |                     " not be deterministic. Call the eval() method to set it in"
39 |                     " evaluation mode if this is not intended."
40 |                 )
41 | 
42 |         device = get_device(device)
43 |         model = model.to(device)
44 |         dummy = ep.torch.zeros(0, device=device)
45 | 
46 |         # we need to make sure the output only requires_grad if the input does
47 |         def _model(x: torch.Tensor) -> torch.Tensor:
48 |             with torch.set_grad_enabled(x.requires_grad):
49 |                 result = cast(torch.Tensor, model(x))
50 |             return result
51 | 
52 |         super().__init__(
53 |             _model, bounds=bounds, dummy=dummy, preprocessing=preprocessing
54 |         )
55 | 
56 |         self.data_format = "channels_first"
57 |         self.device = device
58 | 


--------------------------------------------------------------------------------
/foolbox/models/tensorflow.py:
--------------------------------------------------------------------------------
 1 | from typing import cast, Any
 2 | import eagerpy as ep
 3 | 
 4 | from ..types import BoundsInput, Preprocessing
 5 | 
 6 | from .base import ModelWithPreprocessing
 7 | 
 8 | 
 9 | def get_device(device: Any) -> Any:
10 |     import tensorflow as tf
11 | 
12 |     if device is None:
13 |         device = tf.device("/GPU:0" if tf.test.is_gpu_available() else "/CPU:0")
14 |     if isinstance(device, str):
15 |         device = tf.device(device)
16 |     return device
17 | 
18 | 
19 | class TensorFlowModel(ModelWithPreprocessing):
20 |     def __init__(
21 |         self,
22 |         model: Any,
23 |         bounds: BoundsInput,
24 |         device: Any = None,
25 |         preprocessing: Preprocessing = None,
26 |     ):
27 |         import tensorflow as tf
28 | 
29 |         if not tf.executing_eagerly():
30 |             raise ValueError(
31 |                 "TensorFlowModel requires TensorFlow Eager Mode"
32 |             )  # pragma: no cover
33 | 
34 |         device = get_device(device)
35 |         with device:
36 |             dummy = ep.tensorflow.zeros(0)
37 |         super().__init__(model, bounds, dummy, preprocessing=preprocessing)
38 | 
39 |         self.device = device
40 | 
41 |     @property
42 |     def data_format(self) -> str:
43 |         import tensorflow as tf
44 | 
45 |         return cast(str, tf.keras.backend.image_data_format())
46 | 


--------------------------------------------------------------------------------
/foolbox/models/wrappers.py:
--------------------------------------------------------------------------------
 1 | import eagerpy as ep
 2 | 
 3 | from ..types import Bounds
 4 | 
 5 | from .base import Model
 6 | from .base import T
 7 | 
 8 | 
 9 | class ThresholdingWrapper(Model):
10 |     def __init__(self, model: Model, threshold: float):
11 |         self._model = model
12 |         self._threshold = threshold
13 | 
14 |     @property
15 |     def bounds(self) -> Bounds:
16 |         return self._model.bounds
17 | 
18 |     def __call__(self, inputs: T) -> T:
19 |         min_, max_ = self._model.bounds
20 |         x, restore_type = ep.astensor_(inputs)
21 |         y = ep.where(x < self._threshold, min_, max_).astype(x.dtype)
22 |         z = self._model(y)
23 |         return restore_type(z)
24 | 
25 | 
26 | class ExpectationOverTransformationWrapper(Model):
27 |     def __init__(self, model: Model, n_steps: int = 16):
28 |         self._model = model
29 |         self._n_steps = n_steps
30 | 
31 |     @property
32 |     def bounds(self) -> Bounds:
33 |         return self._model.bounds
34 | 
35 |     def __call__(self, inputs: T) -> T:
36 | 
37 |         x, restore_type = ep.astensor_(inputs)
38 | 
39 |         for i in range(self._n_steps):
40 |             z_t = self._model(x)
41 | 
42 |             if i == 0:
43 |                 z = z_t.expand_dims(0)
44 |             else:
45 |                 z = ep.concatenate([z, z_t.expand_dims(0)], axis=0)
46 | 
47 |         z = z.mean(0)
48 | 
49 |         return restore_type(z)
50 | 


--------------------------------------------------------------------------------
/foolbox/plot.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple, Any, Optional
 2 | import numpy as np
 3 | import eagerpy as ep
 4 | 
 5 | 
 6 | def images(
 7 |     images: Any,
 8 |     *,
 9 |     n: Optional[int] = None,
10 |     data_format: Optional[str] = None,
11 |     bounds: Tuple[float, float] = (0, 1),
12 |     ncols: Optional[int] = None,
13 |     nrows: Optional[int] = None,
14 |     figsize: Optional[Tuple[float, float]] = None,
15 |     scale: float = 1,
16 |     **kwargs: Any,
17 | ) -> None:
18 |     import matplotlib.pyplot as plt
19 | 
20 |     x: ep.Tensor = ep.astensor(images)
21 |     if x.ndim != 4:
22 |         raise ValueError(
23 |             "expected images to have four dimensions: (N, C, H, W) or (N, H, W, C)"
24 |         )
25 |     if n is not None:
26 |         x = x[:n]
27 |     if data_format is None:
28 |         channels_first = x.shape[1] == 1 or x.shape[1] == 3
29 |         channels_last = x.shape[-1] == 1 or x.shape[-1] == 3
30 |         if channels_first == channels_last:
31 |             raise ValueError("data_format ambigous, please specify it explicitly")
32 |     else:
33 |         channels_first = data_format == "channels_first"
34 |         channels_last = data_format == "channels_last"
35 |         if not channels_first and not channels_last:
36 |             raise ValueError(
37 |                 "expected data_format to be 'channels_first' or 'channels_last'"
38 |             )
39 |     assert channels_first != channels_last
40 |     x_np = x.numpy()
41 |     if channels_first:
42 |         x_np = np.transpose(x_np, axes=(0, 2, 3, 1))
43 |     min_, max_ = bounds
44 |     x_np = (x_np - min_) / (max_ - min_)
45 | 
46 |     if nrows is None and ncols is None:
47 |         nrows = 1
48 |     if ncols is None:
49 |         assert nrows is not None
50 |         ncols = (len(x_np) + nrows - 1) // nrows
51 |     elif nrows is None:
52 |         nrows = (len(x_np) + ncols - 1) // ncols
53 |     if figsize is None:
54 |         figsize = (ncols * scale, nrows * scale)
55 |     fig, axes = plt.subplots(
56 |         ncols=ncols,
57 |         nrows=nrows,
58 |         figsize=figsize,
59 |         squeeze=False,
60 |         constrained_layout=True,
61 |         **kwargs,
62 |     )
63 | 
64 |     for row in range(nrows):
65 |         for col in range(ncols):
66 |             ax = axes[row][col]
67 |             ax.set_xticks([])
68 |             ax.set_yticks([])
69 |             ax.axis("off")
70 |             i = row * ncols + col
71 |             if i < len(x):
72 |                 if x_np.shape[-1] == 1:
73 |                     ax.imshow(x_np[i][:, :, 0])
74 |                 else:
75 |                     ax.imshow(x_np[i])
76 | 


--------------------------------------------------------------------------------
/foolbox/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/foolbox/py.typed


--------------------------------------------------------------------------------
/foolbox/tensorboard.py:
--------------------------------------------------------------------------------
 1 | """Internal module for attacks that support logging to TensorBoard"""
 2 | from typing import Union, Callable, TypeVar, Any, cast
 3 | from typing_extensions import Literal
 4 | import eagerpy as ep
 5 | from functools import wraps
 6 | 
 7 | 
 8 | FuncType = Callable[..., None]
 9 | F = TypeVar("F", bound=FuncType)
10 | 
11 | 
12 | def maybenoop(f: F) -> F:
13 |     @wraps(f)
14 |     def wrapper(self: "TensorBoard", *args: Any, **kwds: Any) -> None:
15 |         if self.writer is None:
16 |             return
17 |         return f(self, *args, **kwds)
18 | 
19 |     return cast(F, wrapper)
20 | 
21 | 
22 | class TensorBoard:
23 |     """A custom TensorBoard class that accepts EagerPy tensors and that
24 |     can be disabled by turned into a noop by passing logdir=False.
25 | 
26 |     This makes it possible to add tensorboard logging without any if
27 |     statements and without any computational overhead if it's disabled.
28 |     """
29 | 
30 |     def __init__(self, logdir: Union[Literal[False], None, str]):
31 |         if logdir or (logdir is None):
32 |             from tensorboardX import SummaryWriter
33 | 
34 |             self.writer = SummaryWriter(logdir=logdir)
35 |         else:
36 |             self.writer = None
37 | 
38 |     @maybenoop
39 |     def close(self) -> None:
40 |         self.writer.close()
41 | 
42 |     @maybenoop
43 |     def scalar(self, tag: str, x: Union[int, float], step: int) -> None:
44 |         self.writer.add_scalar(tag, x, step)
45 | 
46 |     @maybenoop
47 |     def mean(self, tag: str, x: ep.Tensor, step: int) -> None:
48 |         self.writer.add_scalar(tag, x.mean(axis=0).item(), step)
49 | 
50 |     @maybenoop
51 |     def probability(self, tag: str, x: ep.Tensor, step: int) -> None:
52 |         self.writer.add_scalar(tag, x.float32().mean(axis=0).item(), step)
53 | 
54 |     @maybenoop
55 |     def conditional_mean(
56 |         self, tag: str, x: ep.Tensor, cond: ep.Tensor, step: int
57 |     ) -> None:
58 |         cond_ = cond.numpy()
59 |         if ~cond_.any():
60 |             return
61 |         x_ = x.numpy()
62 |         x_ = x_[cond_]
63 |         self.writer.add_scalar(tag, x_.mean(axis=0).item(), step)
64 | 
65 |     @maybenoop
66 |     def probability_ratio(
67 |         self, tag: str, x: ep.Tensor, y: ep.Tensor, step: int
68 |     ) -> None:
69 |         x_ = x.float32().mean(axis=0).item()
70 |         y_ = y.float32().mean(axis=0).item()
71 |         if y_ == 0:
72 |             return
73 |         self.writer.add_scalar(tag, x_ / y_, step)
74 | 
75 |     @maybenoop
76 |     def histogram(
77 |         self, tag: str, x: ep.Tensor, step: int, *, first: bool = True
78 |     ) -> None:
79 |         x = x.numpy()
80 |         self.writer.add_histogram(tag, x, step)
81 |         if first:
82 |             self.writer.add_scalar(tag + "/0", x[0].item(), step)
83 | 


--------------------------------------------------------------------------------
/foolbox/types.py:
--------------------------------------------------------------------------------
 1 | from typing import NewType, NamedTuple, Union, Tuple, Optional, Dict, Any
 2 | 
 3 | 
 4 | class Bounds(NamedTuple):
 5 |     lower: float
 6 |     upper: float
 7 | 
 8 | 
 9 | BoundsInput = Union[Bounds, Tuple[float, float]]
10 | 
11 | L0 = NewType("L0", float)
12 | L1 = NewType("L1", float)
13 | L2 = NewType("L2", float)
14 | Linf = NewType("Linf", float)
15 | 
16 | Preprocessing = Optional[Dict[str, Any]]
17 | 


--------------------------------------------------------------------------------
/foolbox/utils.py:
--------------------------------------------------------------------------------
  1 | from typing import Optional, Tuple, Any
  2 | import eagerpy as ep
  3 | import warnings
  4 | import os
  5 | import numpy as np
  6 | 
  7 | from .types import Bounds
  8 | from .models import Model
  9 | 
 10 | 
 11 | def accuracy(fmodel: Model, inputs: Any, labels: Any) -> float:
 12 |     inputs_, labels_ = ep.astensors(inputs, labels)
 13 |     del inputs, labels
 14 | 
 15 |     predictions = fmodel(inputs_).argmax(axis=-1)
 16 |     accuracy = (predictions == labels_).float32().mean()
 17 |     return accuracy.item()
 18 | 
 19 | 
 20 | def samples(
 21 |     fmodel: Model,
 22 |     dataset: str = "imagenet",
 23 |     index: int = 0,
 24 |     batchsize: int = 1,
 25 |     shape: Tuple[int, int] = (224, 224),
 26 |     data_format: Optional[str] = None,
 27 |     bounds: Optional[Bounds] = None,
 28 | ) -> Any:
 29 |     if hasattr(fmodel, "data_format"):
 30 |         if data_format is None:
 31 |             data_format = fmodel.data_format
 32 |         elif data_format != fmodel.data_format:
 33 |             raise ValueError(
 34 |                 f"data_format ({data_format}) does not match model.data_format ({fmodel.data_format})"
 35 |             )
 36 |     elif data_format is None:
 37 |         raise ValueError(
 38 |             "data_format could not be inferred, please specify it explicitly"
 39 |         )
 40 | 
 41 |     if bounds is None:
 42 |         bounds = fmodel.bounds
 43 | 
 44 |     images, labels = _samples(
 45 |         dataset=dataset,
 46 |         index=index,
 47 |         batchsize=batchsize,
 48 |         shape=shape,
 49 |         data_format=data_format,
 50 |         bounds=bounds,
 51 |     )
 52 | 
 53 |     if hasattr(fmodel, "dummy") and fmodel.dummy is not None:
 54 |         images = ep.from_numpy(fmodel.dummy, images).raw
 55 |         labels = ep.from_numpy(fmodel.dummy, labels).raw
 56 |     else:
 57 |         warnings.warn(f"unknown model type {type(fmodel)}, returning NumPy arrays")
 58 | 
 59 |     return images, labels
 60 | 
 61 | 
 62 | def _samples(
 63 |     dataset: str,
 64 |     index: int,
 65 |     batchsize: int,
 66 |     shape: Tuple[int, int],
 67 |     data_format: str,
 68 |     bounds: Bounds,
 69 | ) -> Tuple[Any, Any]:
 70 |     # TODO: this was copied from foolbox v2
 71 | 
 72 |     from PIL import Image
 73 | 
 74 |     images, labels = [], []
 75 |     basepath = os.path.dirname(__file__)
 76 |     samplepath = os.path.join(basepath, "data")
 77 |     files = os.listdir(samplepath)
 78 | 
 79 |     if batchsize > 20:
 80 |         warnings.warn(
 81 |             "samples() has only 20 samples and repeats itself if batchsize > 20"
 82 |         )
 83 | 
 84 |     for idx in range(index, index + batchsize):
 85 |         i = idx % 20
 86 | 
 87 |         # get filename and label
 88 |         file = [n for n in files if f"{dataset}_{i:02d}_" in n][0]
 89 |         label = int(file.split(".")[0].split("_")[-1])
 90 | 
 91 |         # open file
 92 |         path = os.path.join(samplepath, file)
 93 |         image = Image.open(path)
 94 | 
 95 |         if dataset == "imagenet":
 96 |             image = image.resize(shape)
 97 | 
 98 |         image = np.asarray(image, dtype=np.float32)
 99 | 
100 |         if image.ndim == 2:
101 |             image = image[..., np.newaxis]
102 | 
103 |         assert image.ndim == 3
104 | 
105 |         if data_format == "channels_first":
106 |             image = np.transpose(image, (2, 0, 1))
107 | 
108 |         images.append(image)
109 |         labels.append(label)
110 | 
111 |     images_ = np.stack(images)
112 |     labels_ = np.array(labels).astype(np.int64)
113 | 
114 |     if bounds != (0, 255):
115 |         images_ = images_ / 255 * (bounds[1] - bounds[0]) + bounds[0]
116 |     return images_, labels_
117 | 


--------------------------------------------------------------------------------
/foolbox/zoo/__init__.py:
--------------------------------------------------------------------------------
1 | from .zoo import get_model  # noqa: F401
2 | from .weights_fetcher import fetch_weights  # noqa: F401
3 | from .git_cloner import GitCloneError  # noqa: F401
4 | from .model_loader import ModelLoader  # noqa: F401
5 | 


--------------------------------------------------------------------------------
/foolbox/zoo/common.py:
--------------------------------------------------------------------------------
 1 | import hashlib
 2 | import os
 3 | 
 4 | 
 5 | def sha256_hash(git_uri: str) -> str:
 6 |     m = hashlib.sha256()
 7 |     m.update(git_uri.encode())
 8 |     return m.hexdigest()
 9 | 
10 | 
11 | def home_directory_path(folder: str, hash_digest: str) -> str:
12 |     # does this work on all operating systems?
13 |     home = os.path.expanduser("~")
14 |     return os.path.join(home, folder, hash_digest)
15 | 


--------------------------------------------------------------------------------
/foolbox/zoo/git_cloner.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import shutil
 3 | from git import Repo
 4 | import logging
 5 | from .common import sha256_hash, home_directory_path
 6 | 
 7 | FOLDER = ".foolbox_zoo"
 8 | 
 9 | 
10 | class GitCloneError(RuntimeError):
11 |     pass
12 | 
13 | 
14 | def clone(git_uri: str, overwrite: bool = False) -> str:
15 |     """Clones a remote git repository to a local path.
16 | 
17 |     Args:
18 |         git_uri: The URI to the git repository to be cloned.
19 |         overwrite: Whether or not to overwrite the local path.
20 | 
21 |     Returns:
22 |         The generated local path where the repository has been cloned to.
23 |     """
24 |     hash_digest = sha256_hash(git_uri)
25 |     local_path = home_directory_path(FOLDER, hash_digest)
26 |     exists_locally = os.path.exists(local_path)
27 | 
28 |     if exists_locally and overwrite:
29 |         # TODO: ideally we would just pull the latest changes instead of cloning again
30 |         shutil.rmtree(local_path, ignore_errors=True)
31 |         exists_locally = False
32 | 
33 |     if not exists_locally:
34 |         _clone_repo(git_uri, local_path)
35 |     else:
36 |         logging.info(  # pragma: no cover
37 |             "Git repository already exists locally."
38 |         )  # pragma: no cover
39 | 
40 |     return local_path
41 | 
42 | 
43 | def _clone_repo(git_uri: str, local_path: str) -> None:
44 |     logging.info("Cloning repo %s to %s", git_uri, local_path)
45 |     try:
46 |         Repo.clone_from(git_uri, local_path)
47 |     except Exception as e:
48 |         logging.exception("Failed to clone repository", exc_info=e)
49 |         raise GitCloneError("Failed to clone repository")
50 |     logging.info("Cloned repo successfully.")
51 | 


--------------------------------------------------------------------------------
/foolbox/zoo/model_loader.py:
--------------------------------------------------------------------------------
 1 | from typing import Any, cast, Optional
 2 | from types import ModuleType
 3 | import sys
 4 | import importlib
 5 | import abc
 6 | from abc import abstractmethod
 7 | 
 8 | from ..models import Model
 9 | 
10 | 
11 | class ModelLoader(abc.ABC):
12 |     @abstractmethod
13 |     def load(
14 |         self, path: str, module_name: str = "foolbox_model", **kwargs: Any
15 |     ) -> Model:
16 |         """Loads a model from a local path, to which a git repository has been previously cloned to.
17 | 
18 |         Args:
19 |             path: The path to the local repository containing the code.
20 |             module_name: The name of the module to import.
21 |             kwargs: Additional parameters for the loaded model.
22 | 
23 |         Returns:
24 |             A foolbox-wrapped model.
25 |         """
26 |         ...
27 | 
28 |     @staticmethod
29 |     def get(key: Optional[str] = None) -> "ModelLoader":
30 |         if key is None:
31 |             return DefaultLoader()
32 |         else:
33 |             raise ValueError(f"No model loader for: {key}")
34 | 
35 |     @staticmethod
36 |     def _import_module(path: str, module_name: str = "foolbox_model") -> ModuleType:
37 |         sys.path.insert(0, path)
38 |         module = importlib.import_module(module_name)
39 |         print("imported module: {}".format(module))
40 |         return module
41 | 
42 | 
43 | class DefaultLoader(ModelLoader):
44 |     def load(
45 |         self, path: str, module_name: str = "foolbox_model", **kwargs: Any
46 |     ) -> Model:
47 |         module = super()._import_module(path, module_name=module_name)
48 |         model = module.create(**kwargs)
49 |         return cast(Model, model)
50 | 


--------------------------------------------------------------------------------
/foolbox/zoo/weights_fetcher.py:
--------------------------------------------------------------------------------
 1 | import requests
 2 | import shutil
 3 | import zipfile
 4 | import tarfile
 5 | import os
 6 | import logging
 7 | 
 8 | from .common import sha256_hash, home_directory_path
 9 | 
10 | FOLDER = ".foolbox_zoo/weights"
11 | 
12 | 
13 | def fetch_weights(weights_uri: str, unzip: bool = False) -> str:
14 |     """Provides utilities to download and extract packages
15 |     containing model weights when creating foolbox-zoo compatible
16 |     repositories, if the weights are not part of the repository itself.
17 | 
18 |     Examples
19 |     --------
20 | 
21 |     Download and unzip weights:
22 | 
23 |     >>> from foolbox import zoo
24 |     >>> url = 'https://github.com/MadryLab/mnist_challenge_models/raw/master/secret.zip'  # noqa F501
25 |     >>> weights_path = zoo.fetch_weights(url, unzip=True)
26 | 
27 |     Args:
28 |         weights_uri: The URI to fetch the weights from.
29 |         unzip: Should be `True` if the file to be downloaded is a zipped package.
30 | 
31 |     Returns:
32 |         Local path where the weights have been downloaded and potentially unzipped to.
33 |     """
34 |     assert weights_uri is not None
35 |     hash_digest = sha256_hash(weights_uri)
36 |     local_path = home_directory_path(FOLDER, hash_digest)
37 |     exists_locally = os.path.exists(local_path)
38 | 
39 |     filename = _filename_from_uri(weights_uri)
40 |     file_path = os.path.join(local_path, filename)
41 | 
42 |     if exists_locally:
43 |         logging.info("Weights already stored locally.")  # pragma: no cover
44 |     else:
45 |         _download(file_path, weights_uri, local_path)
46 | 
47 |     if unzip:
48 |         file_path = _extract(local_path, filename)
49 | 
50 |     return file_path
51 | 
52 | 
53 | def _filename_from_uri(url: str) -> str:
54 |     # get last part of the URI, i.e. file-name
55 |     filename = url.split("/")[-1]
56 |     # remove query params if exist
57 |     filename = filename.split("?")[0]
58 |     return filename
59 | 
60 | 
61 | def _download(file_path: str, url: str, directory: str) -> None:
62 |     logging.info("Downloading weights: %s to %s", url, file_path)
63 |     if not os.path.exists(directory):
64 |         os.makedirs(directory)
65 |     # first check ETag or If-Modified-Since header or similar
66 |     # to check whether updated weights are available?
67 |     r = requests.get(url, stream=True)
68 |     if r.status_code == 200:
69 |         with open(file_path, "wb") as f:
70 |             r.raw.decode_content = True
71 |             shutil.copyfileobj(r.raw, f)
72 |     else:
73 |         raise RuntimeError("Failed to fetch weights from %s", url)
74 | 
75 | 
76 | def _extract(directory: str, filename: str) -> str:
77 |     file_path = os.path.join(directory, filename)
78 |     extracted_folder = filename.rsplit(".", 1)[0]
79 |     extracted_folder = os.path.join(directory, extracted_folder)
80 | 
81 |     if not os.path.exists(extracted_folder):
82 |         logging.info("Extracting weights package to %s", extracted_folder)
83 |         os.makedirs(extracted_folder)
84 |         if ".zip" in file_path:
85 |             zip_ref = zipfile.ZipFile(file_path, "r")
86 |             zip_ref.extractall(extracted_folder)
87 |             zip_ref.close()
88 |         elif ".tar.gz" in file_path:  # pragma: no cover
89 |             tar_ref = tarfile.TarFile.open(file_path, "r")
90 |             tar_ref.extractall(extracted_folder)
91 |             tar_ref.close()
92 |     else:
93 |         logging.info(
94 |             "Extraced folder already exists: %s", extracted_folder
95 |         )  # pragma: no cover
96 | 
97 |     return extracted_folder
98 | 


--------------------------------------------------------------------------------
/foolbox/zoo/zoo.py:
--------------------------------------------------------------------------------
 1 | from typing import Any
 2 | 
 3 | from ..models import Model
 4 | 
 5 | from .git_cloner import clone
 6 | from .model_loader import ModelLoader
 7 | 
 8 | 
 9 | def get_model(
10 |     url: str, module_name: str = "foolbox_model", overwrite: bool = False, **kwargs: Any
11 | ) -> Model:
12 |     """Download a Foolbox-compatible model from the given Git repository URL.
13 | 
14 |     Examples
15 |     --------
16 | 
17 |     Instantiate a model:
18 | 
19 |     >>> from foolbox import zoo
20 |     >>> url = "https://github.com/bveliqi/foolbox-zoo-dummy.git"
21 |     >>> model = zoo.get_model(url)  # doctest: +SKIP
22 | 
23 |     Only works with a foolbox-zoo compatible repository.
24 |     I.e. models need to have a `foolbox_model.py` file
25 |     with a `create()`-function, which returns a foolbox-wrapped model.
26 | 
27 |     Using the kwargs parameter it is possible to input an arbitrary number
28 |     of parameters to this methods call. These parameters are forwarded to
29 |     the instantiated model.
30 | 
31 |     Example repositories:
32 | 
33 |         - https://github.com/jonasrauber/foolbox-tensorflow-keras-applications
34 |         - https://github.com/bethgelab/AnalysisBySynthesis
35 |         - https://github.com/bethgelab/mnist_challenge
36 |         - https://github.com/bethgelab/cifar10_challenge
37 |         - https://github.com/bethgelab/convex_adversarial
38 |         - https://github.com/wielandbrendel/logit-pairing-foolbox.git
39 |         - https://github.com/bethgelab/defensive-distillation.git
40 | 
41 |     Args:
42 |         url: URL to the git repository.
43 |         module_name: The name of the module to import.
44 |         kwargs: Optional set of parameters that will be used by the to be instantiated model.
45 | 
46 |     Returns:
47 |         A Foolbox-wrapped model instance.
48 |     """
49 |     repo_path = clone(url, overwrite=overwrite)
50 |     loader = ModelLoader.get()
51 |     model = loader.load(repo_path, module_name=module_name, **kwargs)
52 |     return model
53 | 


--------------------------------------------------------------------------------
/guide/.vuepress/config.js:
--------------------------------------------------------------------------------
 1 | module.exports = {
 2 |   title: 'Foolbox',
 3 |   description: 'A Python toolbox to create adversarial examples that fool neural networks in PyTorch, TensorFlow, and JAX',
 4 |   themeConfig: {
 5 |     nav: [
 6 |       { text: 'Guide', link: '/guide/' },
 7 |       { text: 'API', link: 'https://foolbox.readthedocs.io/en/stable/' },
 8 |       { text: 'GitHub', link: 'https://github.com/bethgelab/foolbox' },
 9 |     ],
10 |     sidebar: [
11 |       {
12 |         title: 'Guide',
13 |         collapsable: false,
14 |         children: [
15 |           '/guide/',
16 |           '/guide/getting-started',
17 |           '/guide/examples',
18 |           '/guide/development',
19 |           '/guide/adding_attacks',
20 |         ],
21 |       },
22 |     ],
23 |   },
24 | }
25 | 


--------------------------------------------------------------------------------
/guide/.vuepress/public/CNAME:
--------------------------------------------------------------------------------
1 | foolbox.jonasrauber.de
2 | 


--------------------------------------------------------------------------------
/guide/.vuepress/public/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/guide/.vuepress/public/logo.png


--------------------------------------------------------------------------------
/guide/.vuepress/public/logo_alpha.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/guide/.vuepress/public/logo_alpha.png


--------------------------------------------------------------------------------
/guide/.vuepress/public/logo_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/guide/.vuepress/public/logo_small.png


--------------------------------------------------------------------------------
/guide/README.md:
--------------------------------------------------------------------------------
 1 | ---
 2 | home: true
 3 | heroImage: /logo.png
 4 | heroText: Foolbox
 5 | tagline: "Foolbox: Fast adversarial attacks to benchmark the robustness of machine learning models in PyTorch, TensorFlow, and JAX"
 6 | actionText: Get Started →
 7 | actionLink: /guide/
 8 | features:
 9 | - title: Native Performance
10 |   details: Foolbox 3 is built on top of EagerPy and runs natively in PyTorch, TensorFlow, and JAX.
11 | - title: State-of-the-art attacks
12 |   details: Foolbox provides a large collection of state-of-the-art gradient-based and decision-based adversarial attacks.
13 | - title: Type Checking
14 |   details: Catch bugs before running your code thanks to extensive type annotations in Foolbox.
15 | footer: Copyright © 2022 Jonas Rauber, Roland S. Zimmermann
16 | 
17 | ---
18 | 
19 | ### What is Foolbox?
20 | 
21 | **Foolbox** is a **Python library** that lets you easily run adversarial attacks against machine learning models like deep neural networks. It is built on top of [**EagerPy**](https://eagerpy.jonasrauber.de) and works natively with models in [**PyTorch**](https://pytorch.org), [**TensorFlow**](https://www.tensorflow.org), and [**JAX**](https://github.com/google/jax).
22 | 
23 | ```python
24 | import foolbox as fb
25 | 
26 | model = ...
27 | fmodel = fb.PyTorchModel(model)
28 | 
29 | attack = fb.attacks.LinfPGD()
30 | epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
31 | advs, _, success = attack(fmodel, images, labels, epsilons=epsilons)
32 | ```
33 | 


--------------------------------------------------------------------------------
/guide/guide/README.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 | 
3 | ## What is Foolbox?
4 | 
5 | **Foolbox** is a **Python library** that lets you easily run adversarial attacks against machine learning models like deep neural networks. It is built on top of [**EagerPy**](https://eagerpy.jonasrauber.de) and works natively with models in [**PyTorch**](https://pytorch.org), [**TensorFlow**](https://www.tensorflow.org), and [**JAX**](https://github.com/google/jax).
6 | 


--------------------------------------------------------------------------------
/guide/guide/adding_attacks.md:
--------------------------------------------------------------------------------
 1 | # Adding Adversarial Attacks
 2 | 
 3 | ::: tip NOTE
 4 | The [development guidelines](./development) explain how to get started with
 5 | with developing features and adversarial attacks for Foolbox.
 6 | :::
 7 | 
 8 | ## The `Attack` base class
 9 | 
10 | Adversarial attacks in Foolbox should either directly or indirectly subclass
11 | the `Attack` base class in `foolbox/attacks/base.py`.
12 | 
13 | An attack in Foolbox needs to implement two methods, `__call__` and `repeat`.
14 | 
15 | Both methods need to be implemented with the same signature as the base class.
16 | The type annotation for the `criterion` argument of `__call__` can be made
17 | more precise, see `foolbox/attacks/carlini_wagner.py` for an example.
18 | 
19 | The `__call__` method should return three values, a list of raw tensors (one
20 | for each epsilon) with the internal raw attack results, a list of tensors
21 | corresponding to the raw tensors but with perturbation sizes guaranteed to
22 | be clipped to the given epsilons, and a boolean tensor with `len(epsilons)`
23 | rows and `len(inputs)` columns indicating for each returned sample whether
24 | it is a successful adversarial example given the respective epsilon and
25 | criterion. If `epsilons` is a single scalar epsilon (and not a list with
26 | one element), then the first and second return value should be a tensor
27 | rather than a list and the third return value should be 1-D tensor.
28 | 
29 | All returned tensors must have the same type as the input tensors. In
30 | particular, native tensors should be returned as native tensors and
31 | EagerPy-wrapped tensors should be returned as EagerPy-wrapped tensors.
32 | Use `astensor_` or `astensors_` and `restore_type`.
33 | 
34 | The `repeat` method should return a version of the attack that repeats itself
35 | n times and returns the best result.
36 | 
37 | ::: warning NOTE
38 | In practice, it is usually not necessary to subclass `Attack` directly.
39 | Instead, for most attacks it is easiest to subclass either `FixedEpsilonAttack`
40 | or `MinimizatonAttack`.
41 | :::
42 | 
43 | ## The `FixedEpsilonAttack` base class
44 | 
45 | Attacks that require a fixed epsilon and try to find an adversarial
46 | perturbation given this perturbation budget (e.g. `FGSM` and `PGD`) should
47 | be implemented by subclassing `FixedEpsilonAttack`. It already provides
48 | implementations of `__call__` and `repeat`. The attack just needs
49 | to specify the `distance` property (simply assign a class variable) and
50 | implement the `run` method that gets a single `epsilon` and returns a batch
51 | of perturbed inputs, ideally adversarial and ideally with a perturbation
52 | size smaller or equal to `epsilon`.
53 | The `distance` is used by `__call__` to determine which perturbed inputs
54 | are actually adversarials given `epsilon` and by `repeat` to determine the
55 | run.
56 | 
57 | ## The `MinimizatonAttack` base class
58 | 
59 | Attacks that try to find adversarial examples with minimal perturbation size
60 | (e.g. the `Carlini & Wagner` attack or the `Boundary Attack`) should
61 | be implemented by subclassing `MinimizatonAttack`. It already provides
62 | implementations of `__call__` and `repeat`. The attack just needs
63 | to specify the `distance` property (simply assign a class variable) and
64 | implement the `run` method that returns a batch of minimally perturbed
65 | adversarials. For `MinimizatonAttack` subclasses, `run` gets called only once
66 | by `__call__` independent of how many `epsilons` are given. The `__call__`
67 | method then compares the minimal adversarial perturbation to the different
68 | epsilons.
69 | 
70 | ::: tip
71 | You should have a look at the implementation of existing attacks
72 | to get an impression of the best practices and conventions used in Foolbox.
73 | :::
74 | 


--------------------------------------------------------------------------------
/guide/guide/development.md:
--------------------------------------------------------------------------------
 1 | # Development
 2 | 
 3 | ::: tip NOTE
 4 | The following is only necessary if you want to contribute features or
 5 | adversarial attacks to Foolbox. As a user of Foolbox, you can just do a normal
 6 | [installation](./getting-started).
 7 | :::
 8 | 
 9 | ## Installation
10 | 
11 | First clone the repsository using `git`:
12 | 
13 | ```bash
14 | git clone https://github.com/bethgelab/foolbox
15 | ```
16 | 
17 | You can then do an editable installation using `pip -e`:
18 | 
19 | ```bash
20 | cd foolbox
21 | pip3 install -e .
22 | ```
23 | 
24 | ::: tip
25 | Create a new branch for each new feature or contribution.
26 | This will be necessary to open a pull request later.
27 | :::
28 | 
29 | ## Coding Style
30 | 
31 | We follow the [PEP 8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/).
32 | We use [black](https://github.com/psf/black) for automatic code formatting.
33 | In addition, we use [flake8](https://flake8.pycqa.org/en/latest/) to detect
34 | certain PEP 8 violations.
35 | 
36 | ::: tip
37 | Have a look at the `Makefile`. It contains many useful commands, e.g. `make black` or `make flake8`.
38 | :::
39 | 
40 | ## Type annotions and MyPy
41 | 
42 | Foolbox uses Python type annotations introduced in [PEP 484](https://www.python.org/dev/peps/pep-0484/).
43 | We use [mypy](http://mypy-lang.org) for static type checking with relatively
44 | strict settings. All code in Foolbox has to be type annotated.
45 | 
46 | We recommend to run MyPy or a comparable type checker automatically in your
47 | editor (e.g. VIM) or IDE (e.g. PyCharm). You can also run MyPy from the
48 | command line:
49 | 
50 | ```bash
51 | make mypy  # run this in the root folder that contains the Makefile
52 | ```
53 | 
54 | ::: tip NOTE
55 | `__init__` methods in Foolbox should not have return type annotations unless
56 | they have no type annotated arguments (i.e. only `self`), in which case
57 | the return type of `__init__` should be specifed as `None`.
58 | :::
59 | 
60 | ## Creating a pull request on GitHub
61 | 
62 | First, fork the [Foolbox repository on GitHub](https://github.com/bethgelab/foolbox).
63 | Then, add the fork to your local GitHub repository:
64 | 
65 | ```bash
66 | git remote add fork https://github.com/YOUR USERNAME/foolbox
67 | ```
68 | 
69 | Finally, push your new branch to GitHub and open a pull request.
70 | 


--------------------------------------------------------------------------------
/guide/guide/examples.md:
--------------------------------------------------------------------------------
 1 | ---
 2 | title: Examples
 3 | 
 4 | ---
 5 | 
 6 | # Examples :tada:
 7 | 
 8 | ::: tip
 9 | More examples can be found in the [examples folder](https://github.com/bethgelab/foolbox/tree/master/examples).
10 | :::
11 | 
12 | <<< @/../examples/single_attack_pytorch_resnet18.py
13 | 


--------------------------------------------------------------------------------
/paper/paper.bib:
--------------------------------------------------------------------------------
  1 | @inproceedings{pytorch,
  2 |   title={{PyTorch}: An imperative style, high-performance deep learning library},
  3 |   author={Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and others},
  4 |   booktitle={Advances in neural information processing systems},
  5 |   pages={8026--8037},
  6 |   year={2019}
  7 | }
  8 | 
  9 | @inproceedings{tensorflow,
 10 |   title={{TensorFlow}: A system for large-scale machine learning},
 11 |   author={Abadi, Mart{\'\i}n and Barham, Paul and Chen, Jianmin and Chen, Zhifeng and Davis, Andy and Dean, Jeffrey and Devin, Matthieu and Ghemawat, Sanjay and Irving, Geoffrey and Isard, Michael and others},
 12 |   booktitle={12th {USENIX} symposium on operating systems design and implementation ({OSDI} 16)},
 13 |   pages={265--283},
 14 |   year={2016}
 15 | }
 16 | 
 17 | @misc{jax,
 18 |   author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James Johnson and Chris Leary and Dougal Maclaurin and Skye Wanderman-Milne},
 19 |   title = {{JAX}: composable transformations of {P}ython+{N}um{P}y programs},
 20 |   url = {http://github.com/google/jax},
 21 |   year = {2018},
 22 | }
 23 | 
 24 | @inproceedings{rauber2017foolbox,
 25 |   title={Foolbox: A {Python} toolbox to benchmark the robustness of machine learning models},
 26 |   author={Rauber, Jonas and Brendel, Wieland and Bethge, Matthias},
 27 |   booktitle={Reliable Machine Learning in the Wild Workshop, 34th International Conference on Machine Learning},
 28 |   year={2017},
 29 |   url={https://arxiv.org/abs/1707.04131},
 30 | }
 31 | 
 32 | @article{rauber2020eagerpy,
 33 |   title={{EagerPy}: Writing Code That Works Natively with {PyTorch}, {TensorFlow}, {JAX}, and {NumPy}},
 34 |   author={Rauber, Jonas and Bethge, Matthias and Brendel, Wieland},
 35 |   journal={arXiv preprint arXiv:2008.04175},
 36 |   year={2020},
 37 |   url={https://eagerpy.jonasrauber.de},
 38 | }
 39 | 
 40 | @misc{numpy,
 41 |   author = {Travis Oliphant},
 42 |   title = {{NumPy}: A guide to {NumPy}},
 43 |   year = {2006},
 44 |   howpublished = {USA: Trelgol Publishing},
 45 |   url = "http://www.numpy.org/",
 46 |  }
 47 | 
 48 | @techreport{pep484,
 49 |   author  = {Guido van Rossum and Jukka Lehtosalo and Łukasz Langa},
 50 |   title   = {Type Hints},
 51 |   year    = {2015},
 52 |   type    = {PEP},
 53 |   number  = {484},
 54 |   institution = {Python Software Foundation},
 55 |   url     = {https://www.python.org/dev/peps/pep-0484/},
 56 | }
 57 | 
 58 | @inproceedings{brendel2018decisionbased,
 59 |   title={Decision-Based Adversarial Attacks: Reliable Attacks Against Black-Box Machine Learning Models},
 60 |   author={Wieland Brendel and Jonas Rauber and Matthias Bethge},
 61 |   booktitle={International Conference on Learning Representations},
 62 |   year={2018},
 63 |   url={https://openreview.net/forum?id=SyZI0GWCZ},
 64 | }
 65 | 
 66 | @inproceedings{schott2018towards,
 67 |   title={Towards the first adversarially robust neural network model on {MNIST}},
 68 |   author={Lukas Schott and Jonas Rauber and Matthias Bethge and Wieland Brendel},
 69 |   booktitle={International Conference on Learning Representations},
 70 |   year={2019},
 71 |   url={https://openreview.net/forum?id=S1EHOsC9tX},
 72 | }
 73 | 
 74 | @article{rauber2020fast,
 75 |   title={Fast Differentiable Clipping-Aware Normalization and Rescaling},
 76 |   author={Rauber, Jonas and Bethge, Matthias},
 77 |   journal={arXiv preprint arXiv:2007.07677},
 78 |   year={2020},
 79 |   url={https://github.com/jonasrauber/clipping-aware-rescaling},
 80 | }
 81 | 
 82 | @inproceedings{brendel2019accurate,
 83 |   title={Accurate, reliable and fast robustness evaluation},
 84 |   author={Wieland Brendel and Jonas Rauber and Matthias K{\"u}mmerer and Ivan Ustyuzhaninov and Matthias Bethge},
 85 |   booktitle={Advances in Neural Information Processing Systems 32},
 86 |   year={2019},
 87 | }
 88 | 
 89 | @inproceedings{chen2020hopskipjumpattack,
 90 |   title={{HopSkipJumpAttack}: A query-efficient decision-based attack},
 91 |   author={Chen, Jianbo and Jordan, Michael I and Wainwright, Martin J},
 92 |   booktitle={2020 IEEE Symposium on Security and Privacy (SP)},
 93 |   pages={1277--1294},
 94 |   year={2020},
 95 |   organization={IEEE},
 96 |   doi={10.1109/SP40000.2020.00045},
 97 |   url={http://dx.doi.org/10.1109/SP40000.2020.00045}
 98 | }
 99 | 
100 | 


--------------------------------------------------------------------------------
/performance/README.md:
--------------------------------------------------------------------------------
 1 | ## Performance comparison between Foolbox versions
 2 | 
 3 | |                                        |   Foolbox `1.8.0`   |   Foolbox `2.4.0`  | Foolbox `3.1.1`<br>(aka Native) |
 4 | |----------------------------------------|:-------------------:|:------------------:|:-------------------------------:|
 5 | | accuracy (single image)                |  `5.02 ms ± 338 µs` | `4.99 ms ± 378 µs` |      **`3.99 ms ± 131 µs`**     |
 6 | | accuracy (16 images)                   | `88.9 ms ± 8.24 ms` |  `12 ms ± 1.34 ms` |     **`8.21 ms ± 54.4 µs`**     |
 7 | | PGD attack (16 images, single epsilon) |      `161.8 s`      |      `37.5 s`      |           **`1.1 s`**           |
 8 | | PGD attack (16 images, 8 epsilons)     |      `164.6 s`      |      `36.9 s`      |           **`9.0 s`**           |
 9 | 
10 | 
11 | All experiments were done on an Nvidia GeForce GTX 1080 using the PGD attack.
12 | 
13 | Note that Foolbox 3 is faster because **1)** it avoids memory copies between GPU
14 | and CPU by using EagerPy instead of NumPy, **2)** it fully supports batches
15 | of inputs, and **3)** it currently uses a different approach for fixed-epsilon attacks
16 | like PGD (instead of minimizing the perturbation, the attack is run for
17 | each epsilon; this is more inline with what is generally expected;
18 | for these attacks the duration therefore now scales with the
19 | number of epsilons; it is however still faster and it produces better results).
20 | 


--------------------------------------------------------------------------------
/performance/images.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/performance/images.npy


--------------------------------------------------------------------------------
/performance/labels.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bethgelab/foolbox/2513a9a8675d7017e5266d3b0ed89124cb436ec5/performance/labels.npy


--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
 1 | [tool.black]
 2 | line-length = 88
 3 | target-version = ['py36', 'py37', 'py38']
 4 | include = '\.pyi?
#39;
 5 | exclude = '''
 6 | /(
 7 |     \.eggs
 8 |   | \.git
 9 |   | \.hg
10 |   | \.mypy_cache
11 |   | \.tox
12 |   | \.venv
13 |   | _build
14 |   | buck-out
15 |   | build
16 |   | dist
17 | 
18 |   # specific to Foolbox
19 |   | .pytest_cache
20 | )/
21 | '''
22 | 


--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
 1 | version: 2
 2 | sphinx:
 3 |   configuration: docs/conf.py
 4 | formats: all
 5 | python:
 6 |   version: 3.6
 7 |   install:
 8 |     - requirements: docs/requirements.txt
 9 |     - method: pip
10 |       path: .
11 | 


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
 1 | flake8==6.1.0
 2 | black==22.3.0
 3 | pytest==7.4.3
 4 | pytest-cov==3.0.0
 5 | coverage==6.3.2
 6 | codecov==2.1.13
 7 | coveralls==3.3.1
 8 | mypy==1.6.1
 9 | pre-commit==2.17.0
10 | 


--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
 1 | [flake8]
 2 | ignore = E203, E266, E501, W503
 3 | max-line-length = 80
 4 | max-complexity = 18
 5 | select = B,C,E,F,W,T4,B9
 6 | 
 7 | [mypy]
 8 | python_version = 3.9
 9 | warn_unused_ignores = True
10 | warn_unused_configs = True
11 | warn_return_any = True
12 | warn_redundant_casts = True
13 | warn_unreachable = True
14 | ignore_missing_imports = False
15 | disallow_any_unimported = True
16 | namespace_packages = True
17 | disallow_untyped_calls = True
18 | no_implicit_optional = True
19 | disallow_untyped_defs = True
20 | 
21 | [mypy-numpy]
22 | ignore_missing_imports = True
23 | 
24 | [mypy-tensorflow]
25 | ignore_missing_imports = True
26 | 
27 | [mypy-tensorboardX]
28 | ignore_missing_imports = True
29 | 
30 | [mypy-jax.*]
31 | ignore_missing_imports = True
32 | 
33 | [mypy-matplotlib.*]
34 | ignore_missing_imports = True
35 | 
36 | [mypy-pytest]
37 | ignore_missing_imports = True
38 | 
39 | [mypy-torchvision.*]
40 | ignore_missing_imports = True
41 | 
42 | [mypy-PIL.*]
43 | ignore_missing_imports = True
44 | 
45 | [mypy-git.*]
46 | ignore_missing_imports = True
47 | 
48 | [mypy-scipy.*]
49 | ignore_missing_imports = True
50 | 
51 | [mypy-responses.*]
52 | ignore_missing_imports = True
53 | 
54 | [mypy-numba]
55 | ignore_missing_imports = True
56 | 
57 | [isort]
58 | profile=black
59 | 
60 | [tool:pytest]
61 | addopts =
62 |     --verbose
63 |     --cov=foolbox
64 |     --cov-report term-missing
65 | filterwarnings =
66 |     ignore::DeprecationWarning
67 |     ignore::PendingDeprecationWarning
68 |     # produced by TensorFlow:
69 |     ignore:.*can't resolve package from __spec__ or __package__.*:ImportWarning
70 | 
71 | [coverage:run]
72 | omit =
73 |     tests/*
74 | 
75 | [coverage:report]
76 | exclude_lines =
77 |     # see: http://coverage.readthedocs.io/en/latest/config.html
78 | 
79 |     # Have to re-enable the standard pragma
80 |     pragma: no cover
81 | 
82 |     # Don't complain if tests don't hit defensive assertion code:
83 |     @abstractmethod
84 |     @overload
85 |     TYPE_CHECKING
86 |     raise AssertionError
87 |     raise NotImplementedError
88 |     @jitclass
89 | 


--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
 1 | from os.path import dirname, join
 2 | 
 3 | from setuptools import find_packages, setup
 4 | 
 5 | with open(join(dirname(__file__), "foolbox/VERSION")) as f:
 6 |     version = f.read().strip()
 7 | 
 8 | try:
 9 |     # obtain long description from README
10 |     readme_path = join(dirname(__file__), "README.rst")
11 |     with open(readme_path, encoding="utf-8") as f:
12 |         README = f.read()
13 |         # remove raw html not supported by PyPI
14 |         README = "\n".join(README.split("\n")[3:])
15 | except IOError:
16 |     README = ""
17 | 
18 | 
19 | install_requires = [
20 |     "numpy",
21 |     "scipy",
22 |     "setuptools",
23 |     "eagerpy>=0.30.0",
24 |     "GitPython>=3.0.7",
25 |     "typing-extensions>=3.7.4.1",
26 |     "requests>=2.24.0",
27 | ]
28 | tests_require = ["pytest>=7.1.1", "pytest-cov>=3.0.0"]
29 | 
30 | 
31 | setup(
32 |     name="foolbox",
33 |     version=version,
34 |     description="Foolbox is an adversarial attacks library that works natively with PyTorch, TensorFlow and JAX",
35 |     long_description=README,
36 |     long_description_content_type="text/x-rst",
37 |     classifiers=[
38 |         "Development Status :: 3 - Alpha",
39 |         "Intended Audience :: Developers",
40 |         "Intended Audience :: Science/Research",
41 |         "License :: OSI Approved :: MIT License",
42 |         "Programming Language :: Python :: 3",
43 |         "Programming Language :: Python :: 3.6",
44 |         "Programming Language :: Python :: 3.7",
45 |         "Topic :: Scientific/Engineering :: Artificial Intelligence",
46 |     ],
47 |     keywords="",
48 |     author="Jonas Rauber, Roland S. Zimmermann",
49 |     author_email="foolbox+rzrolandzimmermann@gmail.com",
50 |     url="https://github.com/bethgelab/foolbox",
51 |     license="MIT License",
52 |     packages=find_packages(),
53 |     include_package_data=True,
54 |     zip_safe=False,
55 |     install_requires=install_requires,
56 |     extras_require={"testing": tests_require},
57 | )
58 | 


--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | torch==2.1.0
2 | torchvision==0.16.0
3 | jax[cpu]==0.4.13
4 | tensorflow==2.13.1
5 | numba==0.58.1
6 | matplotlib==3.7.3
7 | pillow==10.2.0
8 | tensorboardX==2.6.2.2
9 | responses==0.23.3


--------------------------------------------------------------------------------
/tests/test_attacks_base.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | import eagerpy as ep
 3 | import foolbox as fbn
 4 | 
 5 | from conftest import ModeAndDataAndDescription
 6 | 
 7 | 
 8 | attacks = [
 9 |     fbn.attacks.InversionAttack(distance=fbn.distances.l2),
10 |     fbn.attacks.InversionAttack(distance=fbn.distances.l2).repeat(3),
11 |     fbn.attacks.L2ContrastReductionAttack(),
12 |     fbn.attacks.L2ContrastReductionAttack().repeat(3),
13 | ]
14 | 
15 | 
16 | @pytest.mark.parametrize("attack", attacks)
17 | def test_call_one_epsilon(
18 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
19 |     attack: fbn.Attack,
20 | ) -> None:
21 |     (fmodel, x, y), _, _ = fmodel_and_data_ext_for_attacks
22 | 
23 |     assert ep.istensor(x)
24 |     assert ep.istensor(y)
25 | 
26 |     raw, clipped, success = attack(fmodel, x, y, epsilons=1.0)
27 |     assert ep.istensor(raw)
28 |     assert ep.istensor(clipped)
29 |     assert ep.istensor(success)
30 |     assert raw.shape == x.shape
31 |     assert clipped.shape == x.shape
32 |     assert success.shape == (len(x),)
33 | 
34 | 
35 | def test_get_channel_axis() -> None:
36 |     class Model:
37 |         data_format = None
38 | 
39 |     model = Model()
40 |     model.data_format = "channels_first"  # type: ignore
41 |     assert fbn.attacks.base.get_channel_axis(model, 3) == 1  # type: ignore
42 |     model.data_format = "channels_last"  # type: ignore
43 |     assert fbn.attacks.base.get_channel_axis(model, 3) == 2  # type: ignore
44 |     model.data_format = "invalid"  # type: ignore
45 |     with pytest.raises(ValueError):
46 |         assert fbn.attacks.base.get_channel_axis(model, 3)  # type: ignore
47 | 
48 | 
49 | def test_model_bounds(
50 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
51 | ) -> None:
52 |     (fmodel, x, y), _, _ = fmodel_and_data_ext_for_attacks
53 |     attack = fbn.attacks.InversionAttack()
54 | 
55 |     with pytest.raises(AssertionError):
56 |         attack.run(fmodel, x * 0.0 - fmodel.bounds.lower - 0.1, y)
57 |     with pytest.raises(AssertionError):
58 |         attack.run(fmodel, x * 0.0 + fmodel.bounds.upper + 0.1, y)
59 | 


--------------------------------------------------------------------------------
/tests/test_binarization_attack.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | 
 3 | from foolbox import accuracy
 4 | from foolbox.models import ThresholdingWrapper
 5 | from foolbox.devutils import flatten
 6 | from foolbox.attacks import BinarySearchContrastReductionAttack
 7 | from foolbox.attacks import BinarizationRefinementAttack
 8 | 
 9 | from conftest import ModeAndDataAndDescription
10 | 
11 | 
12 | def test_binarization_attack(
13 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
14 | ) -> None:
15 | 
16 |     # get a model with thresholding
17 |     (fmodel, x, y), _, low_dimensional_input = fmodel_and_data_ext_for_attacks
18 | 
19 |     # binarization doesn't work well for imagenet models
20 |     if not low_dimensional_input:
21 |         pytest.skip()
22 | 
23 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
24 |     fmodel = fmodel.transform_bounds((0, 1))
25 |     fmodel = ThresholdingWrapper(fmodel, threshold=0.5)
26 |     acc = accuracy(fmodel, x, y)
27 |     assert acc > 0
28 | 
29 |     # find some adversarials and check that they are non-trivial
30 |     attack = BinarySearchContrastReductionAttack(target=0)
31 |     advs, _, _ = attack(fmodel, x, y, epsilons=None)
32 |     assert accuracy(fmodel, advs, y) < acc
33 | 
34 |     # run the refinement attack
35 |     attack2 = BinarizationRefinementAttack(threshold=0.5, included_in="upper")
36 |     advs2, _, _ = attack2(fmodel, x, y, starting_points=advs, epsilons=None)
37 | 
38 |     # make sure the predicted classes didn't change
39 |     assert (fmodel(advs).argmax(axis=-1) == fmodel(advs2).argmax(axis=-1)).all()
40 | 
41 |     # make sure the perturbations didn't get larger and some got smaller
42 |     norms1 = flatten(advs - x).norms.l2(axis=-1)
43 |     norms2 = flatten(advs2 - x).norms.l2(axis=-1)
44 |     assert (norms2 <= norms1).all()
45 |     assert (norms2 < norms1).any()
46 | 
47 |     # run the refinement attack
48 |     attack2 = BinarizationRefinementAttack(included_in="upper")
49 |     advs2, _, _ = attack2(fmodel, x, y, starting_points=advs, epsilons=None)
50 | 
51 |     # make sure the predicted classes didn't change
52 |     assert (fmodel(advs).argmax(axis=-1) == fmodel(advs2).argmax(axis=-1)).all()
53 | 
54 |     # make sure the perturbations didn't get larger and some got smaller
55 |     norms1 = flatten(advs - x).norms.l2(axis=-1)
56 |     norms2 = flatten(advs2 - x).norms.l2(axis=-1)
57 |     assert (norms2 <= norms1).all()
58 |     assert (norms2 < norms1).any()
59 | 
60 |     with pytest.raises(ValueError, match="starting_points"):
61 |         attack2(fmodel, x, y, epsilons=None)
62 | 
63 |     attack2 = BinarizationRefinementAttack(included_in="lower")
64 |     with pytest.raises(ValueError, match="does not match"):
65 |         attack2(fmodel, x, y, starting_points=advs, epsilons=None)
66 | 
67 |     attack2 = BinarizationRefinementAttack(included_in="invalid")  # type: ignore
68 |     with pytest.raises(ValueError, match="expected included_in"):
69 |         attack2(fmodel, x, y, starting_points=advs, epsilons=None)
70 | 


--------------------------------------------------------------------------------
/tests/test_brendel_bethge_attack.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple, Union, List, Any
 2 | import eagerpy as ep
 3 | 
 4 | import foolbox as fbn
 5 | import foolbox.attacks as fa
 6 | from foolbox.devutils import flatten
 7 | from foolbox.attacks.brendel_bethge import BrendelBethgeAttack
 8 | import pytest
 9 | 
10 | from conftest import ModeAndDataAndDescription
11 | 
12 | 
13 | def get_attack_id(x: Tuple[BrendelBethgeAttack, Union[int, float]]) -> str:
14 |     return repr(x[0])
15 | 
16 | 
17 | attacks: List[Tuple[fa.Attack, Union[int, float]]] = [
18 |     (fa.L0BrendelBethgeAttack(steps=20), 0),
19 |     (fa.L1BrendelBethgeAttack(steps=20), 1),
20 |     (fa.L2BrendelBethgeAttack(steps=20), 2),
21 |     (fa.LinfinityBrendelBethgeAttack(steps=20), ep.inf),
22 | ]
23 | 
24 | 
25 | @pytest.mark.parametrize("attack_and_p", attacks, ids=get_attack_id)
26 | def test_brendel_bethge_untargeted_attack(
27 |     request: Any,
28 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
29 |     attack_and_p: Tuple[BrendelBethgeAttack, Union[int, float]],
30 | ) -> None:
31 |     if request.config.option.skipslow:
32 |         pytest.skip()
33 | 
34 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
35 | 
36 |     if isinstance(x, ep.NumPyTensor):
37 |         pytest.skip()
38 | 
39 |     if low_dimensional_input:
40 |         pytest.skip()
41 | 
42 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
43 |     fmodel = fmodel.transform_bounds((0, 1))
44 | 
45 |     init_attack = fa.DatasetAttack()
46 |     init_attack.feed(fmodel, x)
47 |     init_advs = init_attack.run(fmodel, x, y)
48 | 
49 |     attack, p = attack_and_p
50 |     advs = attack.run(fmodel, x, y, starting_points=init_advs)
51 | 
52 |     init_norms = ep.norms.lp(flatten(init_advs - x), p=p, axis=-1)
53 |     norms = ep.norms.lp(flatten(advs - x), p=p, axis=-1)
54 | 
55 |     is_smaller = norms < init_norms
56 | 
57 |     assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y)
58 |     assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y)
59 |     assert is_smaller.any()
60 | 


--------------------------------------------------------------------------------
/tests/test_criteria.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple
 2 | import foolbox as fbn
 3 | import eagerpy as ep
 4 | 
 5 | 
 6 | def test_correct_unperturbed(
 7 |     fmodel_and_data: Tuple[fbn.Model, ep.Tensor, ep.Tensor]
 8 | ) -> None:
 9 |     fmodel, inputs, _ = fmodel_and_data
10 |     perturbed = inputs
11 |     logits = fmodel(perturbed)
12 |     labels = logits.argmax(axis=-1)
13 | 
14 |     is_adv = fbn.Misclassification(labels)(perturbed, logits)
15 |     assert not is_adv.any()
16 | 
17 |     _, num_classes = logits.shape
18 |     target_classes = (labels + 1) % num_classes
19 |     is_adv = fbn.TargetedMisclassification(target_classes)(perturbed, logits)
20 |     assert not is_adv.any()
21 | 
22 |     combined = fbn.Misclassification(labels) & fbn.Misclassification(labels)
23 |     is_adv = combined(perturbed, logits)
24 |     assert not is_adv.any()
25 | 
26 | 
27 | def test_wrong_unperturbed(
28 |     fmodel_and_data: Tuple[fbn.Model, ep.Tensor, ep.Tensor]
29 | ) -> None:
30 |     fmodel, inputs, _ = fmodel_and_data
31 |     perturbed = inputs
32 |     logits = fmodel(perturbed)
33 |     _, num_classes = logits.shape
34 |     labels = logits.argmax(axis=-1)
35 |     labels = (labels + 1) % num_classes
36 | 
37 |     is_adv = fbn.Misclassification(labels)(perturbed, logits)
38 |     assert is_adv.all()
39 | 
40 |     target_classes = (labels + 1) % num_classes
41 |     is_adv = fbn.TargetedMisclassification(target_classes)(perturbed, logits)
42 |     if num_classes > 2:
43 |         assert not is_adv.any()
44 |     else:
45 |         assert is_adv.all()
46 | 
47 |     is_adv = (fbn.Misclassification(labels) & fbn.Misclassification(labels))(
48 |         perturbed, logits
49 |     )
50 |     assert is_adv.all()
51 | 
52 |     combined = fbn.TargetedMisclassification(labels) & fbn.TargetedMisclassification(
53 |         target_classes
54 |     )
55 |     is_adv = combined(perturbed, logits)
56 |     assert not is_adv.any()
57 | 
58 | 
59 | def test_repr_object() -> None:
60 |     assert repr(object()).startswith("<")
61 | 
62 | 
63 | def test_repr_misclassification(dummy: ep.Tensor) -> None:
64 |     labels = ep.arange(dummy, 10)
65 |     assert not repr(fbn.Misclassification(labels)).startswith("<")
66 | 
67 | 
68 | def test_repr_and(dummy: ep.Tensor) -> None:
69 |     labels = ep.arange(dummy, 10)
70 |     assert not repr(
71 |         fbn.Misclassification(labels) & fbn.Misclassification(labels)
72 |     ).startswith("<")
73 | 
74 | 
75 | def test_repr_targeted_misclassification(dummy: ep.Tensor) -> None:
76 |     target_classes = ep.arange(dummy, 10)
77 |     assert not repr(fbn.TargetedMisclassification(target_classes)).startswith("<")
78 | 


--------------------------------------------------------------------------------
/tests/test_dataset_attack.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | 
 3 | import foolbox as fbn
 4 | 
 5 | from conftest import ModeAndDataAndDescription
 6 | 
 7 | 
 8 | def test_dataset_attack(
 9 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
10 | ) -> None:
11 | 
12 |     (fmodel, x, y), _, _ = fmodel_and_data_ext_for_attacks
13 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
14 |     fmodel = fmodel.transform_bounds((0, 1))
15 | 
16 |     attack = fbn.attacks.DatasetAttack()
17 |     attack.feed(fmodel, x)
18 | 
19 |     assert fbn.accuracy(fmodel, x, y) > 0
20 | 
21 |     advs, _, success = attack(fmodel, x, y, epsilons=None)
22 |     assert success.shape == (len(x),)
23 |     assert success.all()
24 |     assert fbn.accuracy(fmodel, advs, y) == 0
25 | 
26 |     with pytest.raises(ValueError, match="unknown distance"):
27 |         attack(fmodel, x, y, epsilons=[500.0, 1000.0])
28 |     attack = fbn.attacks.DatasetAttack(distance=fbn.distances.l2)
29 |     attack.feed(fmodel, x)
30 |     advss, _, success = attack(fmodel, x, y, epsilons=[500.0, 1000.0])
31 |     assert success.shape == (2, len(x))
32 |     assert success.all()
33 |     assert fbn.accuracy(fmodel, advss[0], y) == 0
34 |     assert fbn.accuracy(fmodel, advss[1], y) == 0
35 | 
36 |     with pytest.raises(TypeError, match="unexpected keyword argument"):
37 |         attack(fmodel, x, y, epsilons=None, invalid=True)
38 | 


--------------------------------------------------------------------------------
/tests/test_devutils.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | import foolbox as fbn
 3 | import eagerpy as ep
 4 | 
 5 | 
 6 | @pytest.mark.parametrize("k", [1, 2, 3, 4])
 7 | def test_atleast_kd_1d(dummy: ep.Tensor, k: int) -> None:
 8 |     x = ep.zeros(dummy, (10,))
 9 |     x = fbn.devutils.atleast_kd(x, k)
10 |     assert x.shape[0] == 10
11 |     assert x.ndim == k
12 | 
13 | 
14 | @pytest.mark.parametrize("k", [1, 2, 3, 4])
15 | def test_atleast_kd_3d(dummy: ep.Tensor, k: int) -> None:
16 |     x = ep.zeros(dummy, (10, 5, 3))
17 |     x = fbn.devutils.atleast_kd(x, k)
18 |     assert x.shape[:3] == (10, 5, 3)
19 |     assert x.ndim == max(k, 3)
20 | 
21 | 
22 | def test_flatten_2d(dummy: ep.Tensor) -> None:
23 |     x = ep.zeros(dummy, (4, 5))
24 |     x = fbn.devutils.flatten(x)
25 |     assert x.shape == (4, 5)
26 | 
27 | 
28 | def test_flatten_3d(dummy: ep.Tensor) -> None:
29 |     x = ep.zeros(dummy, (4, 5, 6))
30 |     x = fbn.devutils.flatten(x)
31 |     assert x.shape == (4, 30)
32 | 
33 | 
34 | def test_flatten_4d(dummy: ep.Tensor) -> None:
35 |     x = ep.zeros(dummy, (4, 5, 6, 7))
36 |     x = fbn.devutils.flatten(x)
37 |     assert x.shape == (4, 210)
38 | 


--------------------------------------------------------------------------------
/tests/test_distances.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple, Any, Dict, Callable, TypeVar
 2 | import numpy as np
 3 | import pytest
 4 | import foolbox as fbn
 5 | import eagerpy as ep
 6 | 
 7 | distances = {
 8 |     0: fbn.distances.l0,
 9 |     1: fbn.distances.l1,
10 |     2: fbn.distances.l2,
11 |     ep.inf: fbn.distances.linf,
12 | }
13 | 
14 | data: Dict[str, Callable[..., Tuple[ep.Tensor, ep.Tensor]]] = {}
15 | 
16 | FuncType = Callable[..., Tuple[ep.Tensor, ep.Tensor]]
17 | F = TypeVar("F", bound=FuncType)
18 | 
19 | 
20 | def register(f: F) -> F:
21 |     data[f.__name__] = f
22 |     return f
23 | 
24 | 
25 | @register
26 | def example_4d(dummy: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
27 |     reference = ep.full(dummy, (10, 3, 32, 32), 0.2)
28 |     perturbed = reference + 0.6
29 |     return reference, perturbed
30 | 
31 | 
32 | @register
33 | def example_batch(dummy: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
34 |     x = ep.arange(dummy, 6).float32().reshape((2, 3))
35 |     x = x / x.max()
36 |     reference = x
37 |     perturbed = 1 - x
38 |     return reference, perturbed
39 | 
40 | 
41 | @pytest.fixture(scope="session", params=list(data.keys()))
42 | def reference_perturbed(request: Any, dummy: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
43 |     return data[request.param](dummy)
44 | 
45 | 
46 | @pytest.mark.parametrize("p", [0, 1, 2, ep.inf])
47 | def test_distance(reference_perturbed: Tuple[ep.Tensor, ep.Tensor], p: float) -> None:
48 |     reference, perturbed = reference_perturbed
49 | 
50 |     actual = distances[p](reference, perturbed).numpy()
51 | 
52 |     diff = perturbed.numpy() - reference.numpy()
53 |     diff = diff.reshape((len(diff), -1))
54 |     desired = np.linalg.norm(diff, ord=p, axis=-1)
55 | 
56 |     np.testing.assert_allclose(actual, desired, rtol=1e-5)
57 | 
58 | 
59 | @pytest.mark.parametrize("p", [0, 1, 2, ep.inf])
60 | def test_distance_repr_str(p: float) -> None:
61 |     assert str(p) in repr(distances[p])
62 |     assert str(p) in str(distances[p])
63 | 
64 | 
65 | @pytest.mark.parametrize("p", [0, 1, 2, ep.inf])
66 | def test_distance_clip(
67 |     reference_perturbed: Tuple[ep.Tensor, ep.Tensor], p: float
68 | ) -> None:
69 |     reference, perturbed = reference_perturbed
70 | 
71 |     ds = distances[p](reference, perturbed).numpy()
72 |     epsilon = np.median(ds)
73 |     too_large = ds > epsilon
74 | 
75 |     desired = np.where(too_large, epsilon, ds)
76 | 
77 |     perturbed = distances[p].clip_perturbation(reference, perturbed, epsilon)
78 |     actual = distances[p](reference, perturbed).numpy()
79 | 
80 |     np.testing.assert_allclose(actual, desired)
81 | 


--------------------------------------------------------------------------------
/tests/test_eot_wrapper.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | 
 3 | import eagerpy as ep
 4 | 
 5 | from foolbox import accuracy
 6 | from foolbox.attacks import (
 7 |     LinfBasicIterativeAttack,
 8 |     L1BasicIterativeAttack,
 9 |     L2BasicIterativeAttack,
10 | )
11 | from foolbox.models import ExpectationOverTransformationWrapper
12 | from foolbox.types import L2, Linf
13 | 
14 | from conftest import ModeAndDataAndDescription
15 | 
16 | 
17 | def test_eot_wrapper(
18 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
19 | ) -> None:
20 | 
21 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
22 | 
23 |     if isinstance(x, ep.NumPyTensor):
24 |         pytest.skip()
25 | 
26 |     # test clean accuracy when wrapping EoT
27 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
28 |     fmodel = fmodel.transform_bounds((0, 1))
29 |     acc = accuracy(fmodel, x, y)
30 | 
31 |     rand_model = ExpectationOverTransformationWrapper(fmodel, n_steps=4)
32 |     rand_acc = accuracy(rand_model, x, y)
33 |     assert acc - rand_acc == 0
34 | 
35 |     # test with base attacks
36 |     # (accuracy should not change, since fmodel is not random)
37 |     attacks = (
38 |         L1BasicIterativeAttack(),
39 |         L2BasicIterativeAttack(),
40 |         LinfBasicIterativeAttack(),
41 |     )
42 |     epsilons = (5000.0, L2(50.0), Linf(1.0))
43 | 
44 |     for attack, eps in zip(attacks, epsilons):
45 | 
46 |         # acc on standard model
47 |         advs, _, _ = attack(fmodel, x, y, epsilons=eps)
48 |         adv_acc = accuracy(fmodel, advs, y)
49 | 
50 |         # acc on eot model
51 |         advs, _, _ = attack(rand_model, x, y, epsilons=eps)
52 |         r_adv_acc = accuracy(rand_model, advs, y)
53 |         assert adv_acc - r_adv_acc == 0
54 | 


--------------------------------------------------------------------------------
/tests/test_fast_minimum_norm_attack.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple, Union, List
 2 | import eagerpy as ep
 3 | 
 4 | import foolbox as fbn
 5 | import foolbox.attacks as fa
 6 | from foolbox.devutils import flatten
 7 | from foolbox.attacks.fast_minimum_norm import FMNAttackLp
 8 | import pytest
 9 | import numpy as np
10 | from conftest import ModeAndDataAndDescription
11 | 
12 | 
13 | def get_attack_id(x: Tuple[FMNAttackLp, Union[int, float]]) -> str:
14 |     return repr(x[0])
15 | 
16 | 
17 | attacks: List[Tuple[fa.Attack, Union[int, float]]] = [
18 |     (fa.L0FMNAttack(steps=20), 0),
19 |     (fa.L1FMNAttack(steps=20), 1),
20 |     (fa.L2FMNAttack(steps=20), 2),
21 |     (fa.LInfFMNAttack(steps=20), ep.inf),
22 |     (fa.LInfFMNAttack(steps=20, min_stepsize=1.0 / 100), ep.inf),
23 | ]
24 | 
25 | 
26 | @pytest.mark.parametrize("attack_and_p", attacks, ids=get_attack_id)
27 | def test_fast_minimum_norm_untargeted_attack(
28 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
29 |     attack_and_p: Tuple[FMNAttackLp, Union[int, float]],
30 | ) -> None:
31 | 
32 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
33 | 
34 |     if isinstance(x, ep.NumPyTensor):
35 |         pytest.skip()
36 | 
37 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
38 |     fmodel = fmodel.transform_bounds((0, 1))
39 | 
40 |     init_attack = fa.DatasetAttack()
41 |     init_attack.feed(fmodel, x)
42 |     init_advs = init_attack.run(fmodel, x, y)
43 | 
44 |     attack, p = attack_and_p
45 |     advs = attack.run(fmodel, x, y, starting_points=init_advs)
46 | 
47 |     init_norms = ep.norms.lp(flatten(init_advs - x), p=p, axis=-1)
48 |     norms = ep.norms.lp(flatten(advs - x), p=p, axis=-1)
49 | 
50 |     is_smaller = norms < init_norms
51 | 
52 |     assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y)
53 |     assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y)
54 |     assert is_smaller.any()
55 | 
56 | 
57 | @pytest.mark.parametrize("attack_and_p", attacks, ids=get_attack_id)
58 | def test_fast_minimum_norm_targeted_attack(
59 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
60 |     attack_and_p: Tuple[FMNAttackLp, Union[int, float]],
61 | ) -> None:
62 | 
63 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
64 | 
65 |     if isinstance(x, ep.NumPyTensor):
66 |         pytest.skip()
67 | 
68 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
69 |     fmodel = fmodel.transform_bounds((0, 1))
70 | 
71 |     unique_preds = np.unique(fmodel(x).argmax(-1).numpy())
72 |     target_classes = ep.from_numpy(
73 |         y,
74 |         np.array(
75 |             [
76 |                 unique_preds[(np.argmax(y_it == unique_preds) + 1) % len(unique_preds)]
77 |                 for y_it in y.numpy()
78 |             ]
79 |         ),
80 |     )
81 |     criterion = fbn.TargetedMisclassification(target_classes)
82 |     adv_before_attack = criterion(x, fmodel(x))
83 |     assert not adv_before_attack.all()
84 | 
85 |     init_attack = fa.DatasetAttack()
86 |     init_attack.feed(fmodel, x)
87 |     init_advs = init_attack.run(fmodel, x, criterion)
88 | 
89 |     attack, p = attack_and_p
90 |     advs = attack.run(fmodel, x, criterion, starting_points=init_advs)
91 | 
92 |     init_norms = ep.norms.lp(flatten(init_advs - x), p=p, axis=-1)
93 |     norms = ep.norms.lp(flatten(advs - x), p=p, axis=-1)
94 | 
95 |     is_smaller = norms < init_norms
96 | 
97 |     assert fbn.accuracy(fmodel, advs, target_classes) == 1.0
98 |     assert is_smaller.any()
99 | 


--------------------------------------------------------------------------------
/tests/test_fetch_weights.py:
--------------------------------------------------------------------------------
 1 | from foolbox.zoo import fetch_weights
 2 | from foolbox.zoo.common import home_directory_path, sha256_hash
 3 | from foolbox.zoo.weights_fetcher import FOLDER
 4 | 
 5 | import os
 6 | import pytest
 7 | import shutil
 8 | 
 9 | import responses
10 | import io
11 | import zipfile
12 | 
13 | 
14 | @responses.activate
15 | def test_fetch_weights_unzipped() -> None:
16 |     weights_uri = "http://localhost:8080/weights.zip"
17 |     raw_body = _random_body(zipped=False)
18 | 
19 |     # mock server
20 |     responses.add(responses.GET, weights_uri, body=raw_body, status=200, stream=True)
21 | 
22 |     expected_path = _expected_path(weights_uri)
23 | 
24 |     if os.path.exists(expected_path):
25 |         shutil.rmtree(expected_path)  # make sure path does not exist already
26 | 
27 |     file_path = fetch_weights(weights_uri)
28 | 
29 |     exists_locally = os.path.exists(expected_path)
30 |     assert exists_locally
31 |     assert expected_path in file_path
32 | 
33 | 
34 | @responses.activate
35 | def test_fetch_weights_zipped() -> None:
36 |     weights_uri = "http://localhost:8080/weights.zip"
37 | 
38 |     # mock server
39 |     raw_body = _random_body(zipped=True)
40 |     responses.add(
41 |         responses.GET,
42 |         weights_uri,
43 |         body=raw_body,
44 |         status=200,
45 |         stream=True,
46 |         content_type="application/zip",
47 |         headers={"Accept-Encoding": "gzip, deflate"},
48 |     )
49 | 
50 |     expected_path = _expected_path(weights_uri)
51 | 
52 |     if os.path.exists(expected_path):
53 |         shutil.rmtree(expected_path)  # make sure path does not exist already
54 | 
55 |     file_path = fetch_weights(weights_uri, unzip=True)
56 | 
57 |     exists_locally = os.path.exists(expected_path)
58 |     assert exists_locally
59 |     assert expected_path in file_path
60 | 
61 | 
62 | @responses.activate
63 | def test_fetch_weights_returns_404() -> None:
64 |     weights_uri = "http://down:8080/weights.zip"
65 | 
66 |     # mock server
67 |     responses.add(responses.GET, weights_uri, status=404)
68 | 
69 |     expected_path = _expected_path(weights_uri)
70 | 
71 |     if os.path.exists(expected_path):
72 |         shutil.rmtree(expected_path)  # make sure path does not exist already
73 | 
74 |     with pytest.raises(RuntimeError):
75 |         fetch_weights(weights_uri, unzip=False)
76 | 
77 | 
78 | def _random_body(zipped: bool = False) -> bytes:
79 |     if zipped:
80 |         data = io.BytesIO()
81 |         with zipfile.ZipFile(data, mode="w") as z:
82 |             z.writestr("test.txt", "no real weights in here :)")
83 |         data.seek(0)
84 |         return data.getvalue()
85 |     else:
86 |         raw_body = os.urandom(1024)
87 |         return raw_body
88 | 
89 | 
90 | def _expected_path(weights_uri: str) -> str:
91 |     hash_digest = sha256_hash(weights_uri)
92 |     local_path = home_directory_path(FOLDER, hash_digest)
93 |     return local_path
94 | 


--------------------------------------------------------------------------------
/tests/test_gen_attack_utils.py:
--------------------------------------------------------------------------------
 1 | import eagerpy as ep
 2 | import numpy as np
 3 | import pytest
 4 | from typing import Any
 5 | 
 6 | from foolbox.attacks.gen_attack_utils import rescale_images
 7 | 
 8 | 
 9 | def test_rescale_axis(request: Any, dummy: ep.Tensor) -> None:
10 |     backend = request.config.option.backend
11 |     if backend == "numpy":
12 |         pytest.skip()
13 | 
14 |     x_np = np.random.uniform(0.0, 1.0, size=(16, 3, 64, 64))
15 |     x_np_ep = ep.astensor(x_np)
16 |     x_up_np_ep = rescale_images(x_np_ep, (16, 3, 128, 128), 1)
17 |     x_up_np = x_up_np_ep.numpy()
18 | 
19 |     x = ep.from_numpy(dummy, x_np)
20 |     x_ep = ep.astensor(x)
21 |     x_up_ep = rescale_images(x_ep, (16, 3, 128, 128), 1)
22 |     x_up = x_up_ep.numpy()
23 | 
24 |     assert np.allclose(x_up_np, x_up, atol=1e-5)
25 | 
26 | 
27 | def test_rescale_axis_nhwc(request: Any, dummy: ep.Tensor) -> None:
28 |     backend = request.config.option.backend
29 |     if backend == "numpy":
30 |         pytest.skip()
31 | 
32 |     x_np = np.random.uniform(0.0, 1.0, size=(16, 64, 64, 3))
33 |     x_np_ep = ep.astensor(x_np)
34 |     x_up_np_ep = rescale_images(x_np_ep, (16, 128, 128, 3), -1)
35 |     x_up_np = x_up_np_ep.numpy()
36 | 
37 |     x = ep.from_numpy(dummy, x_np)
38 |     x_ep = ep.astensor(x)
39 |     x_up_ep = rescale_images(x_ep, (16, 128, 128, 3), -1)
40 |     x_up = x_up_ep.numpy()
41 | 
42 |     assert np.allclose(x_up_np, x_up, atol=1e-5)
43 | 


--------------------------------------------------------------------------------
/tests/test_plot.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | import eagerpy as ep
 3 | import foolbox as fbn
 4 | 
 5 | 
 6 | def test_plot(dummy: ep.Tensor) -> None:
 7 |     # just tests that the calls don't throw any errors
 8 |     images = ep.zeros(dummy, (10, 3, 32, 32))
 9 |     fbn.plot.images(images)
10 |     fbn.plot.images(images, n=3)
11 |     fbn.plot.images(images, n=3, data_format="channels_first")
12 |     fbn.plot.images(images, nrows=4)
13 |     fbn.plot.images(images, ncols=3)
14 |     fbn.plot.images(images, nrows=2, ncols=6)
15 |     fbn.plot.images(images, nrows=2, ncols=4)
16 |     # test for single channel images
17 |     images = ep.zeros(dummy, (10, 32, 32, 1))
18 |     fbn.plot.images(images)
19 |     with pytest.raises(ValueError):
20 |         images = ep.zeros(dummy, (10, 3, 3, 3))
21 |         fbn.plot.images(images)
22 |     with pytest.raises(ValueError):
23 |         images = ep.zeros(dummy, (10, 1, 1, 1))
24 |         fbn.plot.images(images)
25 |     with pytest.raises(ValueError):
26 |         images = ep.zeros(dummy, (10, 32, 32))
27 |         fbn.plot.images(images)
28 |     with pytest.raises(ValueError):
29 |         images = ep.zeros(dummy, (10, 3, 32, 32))
30 |         fbn.plot.images(images, data_format="foo")
31 | 


--------------------------------------------------------------------------------
/tests/test_pointwise_attack.py:
--------------------------------------------------------------------------------
  1 | from typing import List, Any
  2 | import eagerpy as ep
  3 | 
  4 | import foolbox as fbn
  5 | import foolbox.attacks as fa
  6 | from foolbox.devutils import flatten
  7 | import pytest
  8 | 
  9 | from conftest import ModeAndDataAndDescription
 10 | 
 11 | 
 12 | def get_attack_id(x: fa.Attack) -> str:
 13 |     return repr(x)
 14 | 
 15 | 
 16 | attacks: List[fa.Attack] = [
 17 |     fa.PointwiseAttack(),
 18 |     fa.PointwiseAttack(l2_binary_search=False),
 19 | ]
 20 | 
 21 | 
 22 | @pytest.mark.parametrize("attack", attacks, ids=get_attack_id)
 23 | def test_pointwise_untargeted_attack(
 24 |     request: Any,
 25 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
 26 |     attack: fa.PointwiseAttack,
 27 | ) -> None:
 28 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
 29 | 
 30 |     if not low_dimensional_input or not real:
 31 |         pytest.skip()
 32 | 
 33 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
 34 |     fmodel = fmodel.transform_bounds((0, 1))
 35 | 
 36 |     init_attack = fa.SaltAndPepperNoiseAttack(steps=50)
 37 |     init_advs = init_attack.run(fmodel, x, y)
 38 | 
 39 |     advs = attack.run(fmodel, x, y, starting_points=init_advs)
 40 | 
 41 |     init_norms_l0 = ep.norms.lp(flatten(init_advs - x), p=0, axis=-1)
 42 |     norms_l0 = ep.norms.lp(flatten(advs - x), p=0, axis=-1)
 43 | 
 44 |     init_norms_l2 = ep.norms.lp(flatten(init_advs - x), p=2, axis=-1)
 45 |     norms_l2 = ep.norms.lp(flatten(advs - x), p=2, axis=-1)
 46 | 
 47 |     is_smaller_l0 = norms_l0 < init_norms_l0
 48 |     is_smaller_l2 = norms_l2 < init_norms_l2
 49 | 
 50 |     assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y)
 51 |     assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y)
 52 |     assert is_smaller_l2.any()
 53 |     assert is_smaller_l0.any()
 54 | 
 55 | 
 56 | @pytest.mark.parametrize("attack", attacks, ids=get_attack_id)
 57 | def test_pointwise_targeted_attack(
 58 |     request: Any,
 59 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
 60 |     attack: fa.PointwiseAttack,
 61 | ) -> None:
 62 |     (fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
 63 | 
 64 |     if not low_dimensional_input or not real:
 65 |         pytest.skip()
 66 | 
 67 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
 68 |     fmodel = fmodel.transform_bounds((0, 1))
 69 | 
 70 |     init_attack = fa.SaltAndPepperNoiseAttack(steps=50)
 71 |     init_advs = init_attack.run(fmodel, x, y)
 72 | 
 73 |     logits = fmodel(init_advs)
 74 |     num_classes = logits.shape[-1]
 75 |     target_classes = logits.argmax(-1)
 76 |     target_classes = ep.where(
 77 |         target_classes == y, (target_classes + 1) % num_classes, target_classes
 78 |     )
 79 |     criterion = fbn.TargetedMisclassification(target_classes)
 80 | 
 81 |     advs = attack.run(fmodel, x, criterion, starting_points=init_advs)
 82 | 
 83 |     init_norms_l0 = ep.norms.lp(flatten(init_advs - x), p=0, axis=-1)
 84 |     norms_l0 = ep.norms.lp(flatten(advs - x), p=0, axis=-1)
 85 | 
 86 |     init_norms_l2 = ep.norms.lp(flatten(init_advs - x), p=2, axis=-1)
 87 |     norms_l2 = ep.norms.lp(flatten(advs - x), p=2, axis=-1)
 88 | 
 89 |     is_smaller_l0 = norms_l0 < init_norms_l0
 90 |     is_smaller_l2 = norms_l2 < init_norms_l2
 91 | 
 92 |     assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y)
 93 |     assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y)
 94 |     assert fbn.accuracy(fmodel, advs, target_classes) > fbn.accuracy(
 95 |         fmodel, x, target_classes
 96 |     )
 97 |     assert fbn.accuracy(fmodel, advs, target_classes) >= fbn.accuracy(
 98 |         fmodel, init_advs, target_classes
 99 |     )
100 |     assert is_smaller_l2.any()
101 |     assert is_smaller_l0.any()
102 | 


--------------------------------------------------------------------------------
/tests/test_spatial_attack.py:
--------------------------------------------------------------------------------
 1 | from typing import List, Tuple
 2 | import pytest
 3 | import foolbox as fbn
 4 | import foolbox.attacks as fa
 5 | 
 6 | from conftest import ModeAndDataAndDescription
 7 | 
 8 | 
 9 | def get_attack_id(x: fbn.Attack) -> str:
10 |     return repr(x)
11 | 
12 | 
13 | # attack
14 | attacks: List[Tuple[fbn.Attack, bool]] = [
15 |     (fa.SpatialAttack(), False),
16 |     (fa.SpatialAttack(grid_search=False), False),
17 |     (fa.SpatialAttack(grid_search=False), True),
18 | ]
19 | 
20 | 
21 | @pytest.mark.parametrize("attack_grad_real", attacks, ids=get_attack_id)
22 | def test_spatial_attacks(
23 |     fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
24 |     attack_grad_real: Tuple[fbn.Attack, bool],
25 | ) -> None:
26 | 
27 |     attack, repeated = attack_grad_real
28 |     if repeated:
29 |         attack = attack.repeat(2)
30 |     (fmodel, x, y), real, _ = fmodel_and_data_ext_for_attacks
31 |     if not real:
32 |         pytest.skip()
33 | 
34 |     x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
35 |     fmodel = fmodel.transform_bounds((0, 1))
36 |     acc = fbn.accuracy(fmodel, x, y)
37 |     assert acc > 0
38 |     advs, _, _ = attack(fmodel, x, y)  # type: ignore
39 |     assert fbn.accuracy(fmodel, advs, y) < acc
40 | 


--------------------------------------------------------------------------------
/tests/test_tensorboard.py:
--------------------------------------------------------------------------------
 1 | from typing import Union, Any
 2 | from typing_extensions import Literal
 3 | import pytest
 4 | import eagerpy as ep
 5 | import foolbox as fbn
 6 | 
 7 | 
 8 | @pytest.mark.parametrize("logdir", [False, "temp"])
 9 | def test_tensorboard(
10 |     logdir: Union[Literal[False], None, str], tmp_path: Any, dummy: ep.Tensor
11 | ) -> None:
12 |     if logdir == "temp":
13 |         logdir = tmp_path
14 | 
15 |     if logdir:
16 |         before = len(list(tmp_path.iterdir()))
17 | 
18 |     tb = fbn.tensorboard.TensorBoard(logdir)
19 | 
20 |     tb.scalar("a_scalar", 5, step=1)
21 | 
22 |     x = ep.ones(dummy, 10)
23 |     tb.mean("a_mean", x, step=2)
24 | 
25 |     x = ep.ones(dummy, 10) == ep.arange(dummy, 10)
26 |     tb.probability("a_probability", x, step=2)
27 | 
28 |     x = ep.arange(dummy, 10).float32()
29 |     cond = ep.ones(dummy, 10) == (ep.arange(dummy, 10) % 2)
30 |     tb.conditional_mean("a_conditional_mean", x, cond, step=2)
31 | 
32 |     x = ep.arange(dummy, 10).float32()
33 |     cond = ep.ones(dummy, 10) == ep.zeros(dummy, 10)
34 |     tb.conditional_mean("a_conditional_mean_false", x, cond, step=2)
35 | 
36 |     x = ep.ones(dummy, 10) == ep.arange(dummy, 10)
37 |     y = ep.ones(dummy, 10) == (ep.arange(dummy, 10) % 2)
38 |     tb.probability_ratio("a_probability_ratio", x, y, step=5)
39 | 
40 |     x = ep.ones(dummy, 10) == (ep.arange(dummy, 10) % 2)
41 |     y = ep.ones(dummy, 10) == ep.zeros(dummy, 10)
42 |     tb.probability_ratio("a_probability_ratio_y_zero", x, y, step=5)
43 | 
44 |     x = ep.arange(dummy, 10).float32()
45 |     tb.histogram("a_histogram", x, step=9, first=False)
46 |     tb.histogram("a_histogram", x, step=10, first=True)
47 | 
48 |     tb.close()
49 | 
50 |     if logdir:
51 |         after = len(list(tmp_path.iterdir()))
52 |         assert after > before  # make sure something has been written
53 | 


--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
 1 | from typing import Tuple
 2 | import foolbox as fbn
 3 | import eagerpy as ep
 4 | import pytest
 5 | 
 6 | ModelAndData = Tuple[fbn.Model, ep.Tensor, ep.Tensor]
 7 | 
 8 | 
 9 | def test_accuracy(fmodel_and_data: ModelAndData) -> None:
10 |     fmodel, x, y = fmodel_and_data
11 |     accuracy = fbn.accuracy(fmodel, x, y)
12 |     assert 0 <= accuracy <= 1
13 |     assert accuracy > 0.5
14 |     y = fmodel(x).argmax(axis=-1)
15 |     accuracy = fbn.accuracy(fmodel, x, y)
16 |     assert accuracy == 1
17 | 
18 | 
19 | @pytest.mark.parametrize("batchsize", [1, 8])
20 | @pytest.mark.parametrize(
21 |     "dataset", ["imagenet", "cifar10", "cifar100", "mnist", "fashionMNIST"]
22 | )
23 | def test_samples(fmodel_and_data: ModelAndData, batchsize: int, dataset: str) -> None:
24 |     fmodel, _, _ = fmodel_and_data
25 |     if hasattr(fmodel, "data_format"):
26 |         data_format = fmodel.data_format
27 |         x, y = fbn.samples(fmodel, dataset=dataset, batchsize=batchsize)
28 |         assert len(x) == len(y) == batchsize
29 |         assert not ep.istensor(x)
30 |         assert not ep.istensor(y)
31 |         x, y = fbn.samples(fmodel, batchsize=batchsize, data_format=data_format)
32 |         assert len(x) == len(y) == batchsize
33 |         assert not ep.istensor(x)
34 |         assert not ep.istensor(y)
35 |         with pytest.raises(ValueError):
36 |             data_format = {
37 |                 "channels_first": "channels_last",
38 |                 "channels_last": "channels_first",
39 |             }[data_format]
40 |             fbn.samples(fmodel, batchsize=batchsize, data_format=data_format)
41 |     else:
42 |         x, y = fbn.samples(fmodel, batchsize=batchsize, data_format="channels_first")
43 |         assert len(x) == len(y) == batchsize
44 |         assert not ep.istensor(x)
45 |         assert not ep.istensor(y)
46 |         with pytest.raises(ValueError):
47 |             fbn.samples(fmodel, batchsize=batchsize)
48 | 
49 | 
50 | @pytest.mark.parametrize("batchsize", [42])
51 | @pytest.mark.parametrize("dataset", ["imagenet"])
52 | def test_samples_large_batch(
53 |     fmodel_and_data: ModelAndData, batchsize: int, dataset: str
54 | ) -> None:
55 |     fmodel, _, _ = fmodel_and_data
56 |     data_format = getattr(fmodel, "data_format", "channels_first")
57 |     with pytest.warns(UserWarning, match="only 20 samples"):
58 |         x, y = fbn.samples(
59 |             fmodel, dataset=dataset, batchsize=batchsize, data_format=data_format
60 |         )
61 |     assert len(x) == len(y) == batchsize
62 |     assert not ep.istensor(x)
63 |     assert not ep.istensor(y)
64 | 


--------------------------------------------------------------------------------
/tests/test_zoo.py:
--------------------------------------------------------------------------------
 1 | from typing import Any
 2 | import sys
 3 | import pytest
 4 | 
 5 | import foolbox as fbn
 6 | from foolbox.zoo.model_loader import ModelLoader
 7 | 
 8 | 
 9 | @pytest.fixture(autouse=True)
10 | def unload_foolbox_model_module() -> None:
11 |     # reload foolbox_model from scratch for every run
12 |     # to ensure atomic tests without side effects
13 |     module_names = ["foolbox_model", "model"]
14 |     for module_name in module_names:
15 |         if module_name in sys.modules:
16 |             del sys.modules[module_name]
17 | 
18 | 
19 | # test_data = [
20 | #     # private repo won't work on travis
21 | #     ("https://github.com/bethgelab/AnalysisBySynthesis.git", (1, 28, 28)),
22 | #     ("https://github.com/bethgelab/convex_adversarial.git", (1, 28, 28)),
23 | #     ("https://github.com/bethgelab/mnist_challenge.git", 784),
24 | #     (join("file://", dirname(__file__), "data/model_repo"), (3, 224, 224)),
25 | # ]
26 | 
27 | urls = [
28 |     "https://github.com/jonasrauber/foolbox-tensorflow-keras-applications",
29 |     "git@github.com:jonasrauber/foolbox-tensorflow-keras-applications.git",
30 | ]
31 | 
32 | 
33 | # @pytest.mark.parametrize("url, dim", test_data)
34 | @pytest.mark.parametrize("url", urls)
35 | def test_loading_model(request: Any, url: str) -> None:
36 |     backend = request.config.option.backend
37 |     if backend != "tensorflow":
38 |         pytest.skip()
39 | 
40 |     # download model
41 |     try:
42 |         fmodel = fbn.zoo.get_model(url, name="MobileNetV2", overwrite=True)
43 |     except fbn.zoo.GitCloneError:
44 |         pytest.skip()
45 | 
46 |     # download again (test overwriting)
47 |     try:
48 |         fmodel = fbn.zoo.get_model(url, name="MobileNetV2", overwrite=True)
49 |     except fbn.zoo.GitCloneError:
50 |         pytest.skip()
51 | 
52 |     # create a dummy image
53 |     # x = np.zeros(dim, dtype=np.float32)
54 |     # x[:] = np.random.randn(*x.shape)
55 |     x, y = fbn.samples(fmodel, dataset="imagenet", batchsize=16)
56 | 
57 |     # run the model
58 |     # logits = model(x)
59 |     # probabilities = ep.softmax(logits)
60 |     # predicted_class = np.argmax(logits)
61 |     assert fbn.accuracy(fmodel, x, y) > 0.9
62 | 
63 |     # sanity check
64 |     # assert predicted_class >= 0
65 |     # assert np.sum(probabilities) >= 0.9999
66 | 
67 |     # TODO: delete fmodel
68 | 
69 | 
70 | # @pytest.mark.parametrize("url, dim", test_data)
71 | def test_loading_invalid_model(request: Any) -> None:
72 |     url = "https://github.com/jonasrauber/invalid-url"
73 |     with pytest.raises(fbn.zoo.GitCloneError):
74 |         fbn.zoo.get_model(url, name="MobileNetV2", overwrite=True)
75 | 
76 | 
77 | def test_non_default_module_throws_error() -> None:
78 |     with pytest.raises(ValueError):
79 |         ModelLoader.get(key="other")
80 | 


--------------------------------------------------------------------------------