├── .codecov.yml ├── .coveragerc ├── .github └── workflows │ ├── codecov.yml │ ├── lint.yml │ ├── mypy.yml │ └── pytest.yml ├── .gitignore ├── .pre-commit-config.yaml ├── AUTHORS.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── REPO_STRUCTURE.md ├── Tutorials ├── ch5 │ ├── GAN │ │ └── GAN_Tutorial.ipynb │ └── readme.md └── readme.md ├── code_soup ├── __init__.py ├── ch5 │ ├── __init__.py │ └── algorithms │ │ ├── __init__.py │ │ ├── atn.py │ │ ├── configs │ │ └── zoo_attack.json │ │ ├── gan.py │ │ ├── one_pixel_attack.py │ │ └── zoo_attack.py ├── ch8 │ └── __init__.py └── common │ ├── __init__.py │ ├── text │ ├── __init__.py │ └── utils │ │ ├── json │ │ ├── homoglyph.json │ │ └── keys_in_proximity.json │ │ └── perturbations.py │ ├── utils │ ├── __init__.py │ ├── checkpoints.py │ └── seeding.py │ └── vision │ ├── __init__.py │ ├── datasets │ ├── __init__.py │ ├── image_classification.py │ ├── readme.md │ └── vision_dataset.py │ ├── models │ ├── __init__.py │ ├── allconvnet.py │ ├── nin.py │ ├── readme.md │ └── simple_cnn_classifier.py │ └── utils │ └── __init__.py ├── mypy.ini ├── requirements.txt ├── setup.cfg └── tests ├── __init__.py ├── test_ch5 ├── __init__.py └── test_algorithms │ ├── __init__.py │ ├── test_atn.py │ ├── test_gan.py │ ├── test_one_pixel_attack.py │ └── test_zoo_attack.py └── test_common ├── __init__.py ├── test_text ├── __init__.py └── test_utils │ └── test_perturbations.py ├── test_utils ├── __init__.py ├── test_checkpoints.py └── test_seeding.py └── test_vision ├── __init__.py ├── test_datasets └── test_image_classification.py └── test_models ├── __init__.py ├── test_allconv.py ├── test_nin.py └── test_simple_cnn_classifier.py /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | require_ci_to_pass: yes 3 | 4 | coverage: 5 | precision: 2 6 | round: down 7 | range: "70...100" 8 | 9 | comment: 10 | layout: "header, diff, changes, tree" 11 | behavior: default 12 | require_changes: no 13 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | # Regexes for lines to exclude from consideration 3 | exclude_lines = 4 | # Have to re-enable the standard pragma 5 | pragma: no cover 6 | 7 | # Abstract Methods and classes 8 | @abstract 9 | 10 | # Don't complain about missing debug-only code: 11 | def __repr__ 12 | if self\.debug 13 | 14 | # Don't complain if tests don't hit defensive assertion code: 15 | raise AssertionError 16 | raise NotImplementedError 17 | 18 | # Don't complain if non-runnable code isn't run: 19 | if 0: 20 | if __name__ == .__main__.: 21 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Code Coverage 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | coverage: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | # Caching Linux 15 | - uses: actions/cache@v2 16 | if: startsWith(runner.os, 'Linux') 17 | with: 18 | path: ~/.cache/pip 19 | key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} 20 | restore-keys: | 21 | ${{ runner.os }}-pip- 22 | 23 | # Setup 24 | - uses: actions/checkout@v2 25 | - name: Set up Python 3.8 26 | uses: actions/setup-python@v2 27 | with: 28 | python-version: 3.8 29 | 30 | # Install Dependencies 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install pytest-cov codecov 35 | pip install parameterized 36 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 37 | 38 | - name: Tests 39 | run: pytest --cov-report xml --cov='./code_soup/' --cov-config=.coveragerc 40 | 41 | - name: Code Coverage Report 42 | uses: codecov/codecov-action@v1 43 | if: always() 44 | with: 45 | fail_ci_if_error: false 46 | file: coverage.xml 47 | env_vars: OS,PYTHON 48 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Set up Python 3 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: '3.x' 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | - name: Run PreCommit 23 | uses: pre-commit/action@v2.0.2 24 | -------------------------------------------------------------------------------- /.github/workflows/mypy.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Static Typing 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | typing: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: [3.8] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 30 | - name: Enforce typing 31 | run: | 32 | mypy --config-file mypy.ini 33 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Tests 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | test: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: [3.8] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install pytest 30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 31 | - name: Test with pytest 32 | run: | 33 | pytest 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #Hidden Files on Mac 2 | **.DS_Store 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | .vscode/ 9 | input/ 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | pip-wheel-metadata/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 99 | __pypackages__/ 100 | 101 | # Celery stuff 102 | celerybeat-schedule 103 | celerybeat.pid 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.0.1 4 | hooks: 5 | - id: end-of-file-fixer 6 | - id: trailing-whitespace 7 | 8 | - repo: https://github.com/python/black 9 | rev: 21.7b0 10 | hooks: 11 | - id: black 12 | language_version: python3 13 | 14 | - repo: https://github.com/pycqa/isort 15 | rev: 5.9.3 16 | hooks: 17 | - id: isort 18 | args: ["--profile", "black"] 19 | 20 | - repo: https://gitlab.com/pycqa/flake8 21 | rev: 3.9.2 22 | hooks: 23 | - id: flake8 24 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | Credits 2 | ======= 3 | 4 | Authors of the book: Adversarial Deep Learning 5 | ---------------------------------------------- 6 | [Dr. Di Jin](https://scholar.google.com/citations?user=x5QTK9YAAAAJ&hl=en), [Dr. Yifang Yin](https://yifangyin.github.io/), [Yaman Kumar](https://sites.google.com/view/yaman-kumar/), and [Dr. Rajiv Ratn Shah](https://www.iiitd.ac.in/rajivratn) 7 | 8 | 9 | Chefs of the Code-Soup 10 | ---------------------------------------------- 11 | * [Somesh Singh](https://someshsingh22.github.io/) 12 | * [Abheesht Sharma](https://www.linkedin.com/in/abheesht-sharma-567303156/) 13 | * [Harshit Pandey](https://www.linkedin.com/in/harshit-pandey-a77302173/) 14 | * [Gunjan Chhablani](https://gchhablani.github.io/) 15 | * Mehul Rastogi 16 | 17 | 18 | Contributors 19 | ---------------------------------------------- 20 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at singhksomesh@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Code-Soup 2 | This project is a community effort, and everyone is welcome to contribute! Feel free to join the [Slack Workspace](https://join.slack.com/t/ssoc2021/shared_invite/zt-u4eefbut-aX7TYc1WoQWgylPydivUlg) 3 | 4 | If you are interested in contributing to code-soup, there are many ways to help out. Your contributions may fall 5 | into the following categories: 6 | 7 | 1. It helps us very much if you could 8 | - Report issues you’re facing 9 | - Give a :+1: on issues that others reported and that are relevant to you 10 | - Spread a word about the project or simply :star: to say "I use it" 11 | 12 | 2. Answering queries on the issue tracker, investigating bugs and reviewing other developers’ pull requests are 13 | very valuable contributions that decrease the burden on the project maintainers. 14 | 15 | 3. You would like to improve the documentation. This is no less important than improving the library itself! 16 | If you find a typo in the documentation, do not hesitate to submit a GitHub pull request. 17 | 18 | 4. You would like to propose a new feature and implement it 19 | - Post about your intended feature, and we shall discuss the design and 20 | implementation. Once we agree that the plan looks good, go ahead and implement it. 21 | 22 | 5. You would like to implement a feature or bug-fix for an outstanding issue 23 | - Look at the issues labelled as 24 | ["good first issue"](https://github.com/Adversarial-Deep-Learning/code-soup/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22) 25 | - Pick an issue and comment on the task that you want to work on this feature. 26 | - If you need more context on a particular issue, please ask and we shall provide. 27 | 28 | If you modify the code, you will most probably also need to code some tests to ensure the correct behaviour. We are using 29 | `pytest` to write our tests: 30 | - naming convention for files `test_*.py`, e.g. `test_precision.py` 31 | - naming of testing functions `def test_*`, e.g. `def test_precision_on_random_data()` 32 | 33 | New code should be compatible with Python 3.X versions. Once you finish implementing a feature or bugfix and tests, 34 | please run lint checking and tests: 35 | 36 | #### Environment 37 | To make sure all dependencies are accounted for we recommend to use a different environment, If you have not worked on an environment before you can check it out here https://realpython.com/python-virtual-environments-a-primer/, you can use the `environment.yml` file or install the dependencies using 38 | ```python 39 | pip install -r requirements.txt 40 | ``` 41 | In case you have installed a new dependency which is necessary for the issue you are working on (keep in mind we want to keep minimal dependencies) update that in the ```requirements.txt``` file. 42 | 43 | #### Formatting Code 44 | 45 | To ensure the codebase complies with a style guide, we use [flake8](https://flake8.pycqa.org/en/latest/), 46 | [black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/) tools to 47 | format and check codebase for compliance with PEP8. To install those tools with pip, please run 48 | 49 | ```bash 50 | pip install flake8 black isort 51 | ``` 52 | 53 | ##### Formatting without pre-commit 54 | 55 | If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke 56 | black manually to format files and commit them. 57 | 58 | ```bash 59 | # This should autoformat the files 60 | isort -rc . 61 | black . 62 | # Run lint checking 63 | flake8 code-soup/ tests/ 64 | # If everything is OK, then commit 65 | git add . 66 | git commit -m "Added awesome feature" 67 | ``` 68 | 69 | ##### Formatting with pre-commit 70 | 71 | To automate the process, we have configured the repo with [pre-commit hooks](https://pre-commit.com/) to use black to autoformat the staged files to ensure every commit complies with a style guide. This requires some setup, which is described below: 72 | 73 | 1. Install pre-commit in your python environment. 74 | 2. Run pre-commit install that configures a virtual environment to invoke black, isort and flake8 on commits. 75 | 76 | ```bash 77 | pip install pre-commit 78 | pre-commit install 79 | ``` 80 | 81 | 3. When files are committed: 82 | - If the stages files are not compliant with black, black will autoformat the staged files. If this were to happen, files should be staged and committed again. See example code below. 83 | - If the staged files are not compliant with flake8, errors will be raised. These errors should be fixed and the files should be committed again. See example code below. 84 | 85 | ```bash 86 | git add . 87 | git commit -m "Added awesome feature" 88 | # DONT'T WORRY IF ERRORS ARE RAISED. 89 | # YOUR CODE IS NOT COMPLIANT WITH flake8, isort or black 90 | # Fix any flake8 errors by following their suggestions 91 | # isort and black will automatically format the files so they might look different, but you'll need to stage the files 92 | # again for committing 93 | # After fixing any flake8 errors 94 | git add . 95 | git commit -m "Added feature" 96 | ``` 97 | 98 | #### Run tests: 99 | 100 | To run all tests with coverage (assuming installed `pytest-cov`): 101 | ```python 102 | pytest --cov-report term-missing --cov='./code_soup/' --cov-config=.coveragerc 103 | ``` 104 | To install `pytest-cov` 105 | ```python 106 | pip install pytest-cov 107 | ``` 108 | 109 | #### Send a PR 110 | 111 | If everything is OK, please send a Pull Request to https://github.com/Adversarial-Deep-Learning/code-soup 112 | 113 | If you are not familiar with creating a Pull Request, here are some guides: 114 | - http://stackoverflow.com/questions/14680711/how-to-do-a-github-pull-request 115 | - https://help.github.com/articles/creating-a-pull-request/ 116 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Adversarial Deep Learning 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # code-soup 2 | [![codecov](https://codecov.io/gh/Adversarial-Deep-Learning/code-soup/branch/main/graph/badge.svg?token=OQIJCADZF0)](https://codecov.io/gh/Adversarial-Deep-Learning/code-soup) 3 | [![Tests](https://github.com/Adversarial-Deep-Learning/code-soup/actions/workflows/pytest.yml/badge.svg)](https://github.com/Adversarial-Deep-Learning/code-soup/actions/workflows/pytest.yml) 4 | [![Lint](https://github.com/Adversarial-Deep-Learning/code-soup/actions/workflows/lint.yml/badge.svg)](https://github.com/Adversarial-Deep-Learning/code-soup/actions/workflows/lint.yml) 5 | 6 | **code-soup is the python code for the book "Adversarial Deep Learning" and its tutorials. You can use this in conjunction with a course on Adversarial Deep Learning, or for study on your own. We're looking for solid contributors to help.** 7 | 8 | Despite the great success of deep neural networks in a wide range of applications, they have been repeatedly shown to be vulnerable to adversarial attacks. *Adversarial Deep Learning* is a book being written by [Dr. Di Jin](https://scholar.google.com/citations?user=x5QTK9YAAAAJ&hl=en), [Dr. Yifang Yin](https://yifangyin.github.io/), [Yaman Kumar](https://sites.google.com/view/yaman-kumar/), and [Dr. Rajiv Ratn Shah](https://www.iiitd.ac.in/rajivratn), which gives the reader an introduction to the progress made in this field. At code-soup we are building the codebase of these algorithms in a *clean, simple and minimal* manner . We strive to give the reader a smooth experience while reading the book and understanding the code in parallel with a minimal set of dependencies and library. Contact of the core developers can be seen in [AUTHORS](./AUTHORS.md). 9 | 10 | ## Hacktoberfest2021 11 | We will be participating in Hacktoberfest 2021! For instructions join our [Slack Channel](https://join.slack.com/t/ssoc2021/shared_invite/zt-winj985x-rOLZx~Yl239qNVnImZKfNw) here! Look at the contribution guidelines for starters! 12 | 13 | ## Structure of the project 14 | When complete, this project will have Python implementations for all the pseudocode algorithms in the book, as well as tests and examples of use. You can check the exact repository structure here [Repository Structure Docs](./REPO_STRUCTURE.md). 15 | The overall idea is to let the user read the algorithm and understand the attack in the `code-soup/ch{ch_num}/models/{topic}.py` and the demonstration in the tutorial. 16 | 17 | ## Requirements 18 | The requirements are stored in `requirements.txt` you can install them using 19 | ``` 20 | pip install -r requirements.txt 21 | ``` 22 | We recommend to use a virtual environment, the exported yaml is available at `environment.yml`. 23 | 24 | ## Tutorials 25 | The tutorial to each algorithm is available in the Tutorials folder. 26 | 27 | ## Index 28 | Index for tutorials and test suite for each algorithm. 29 | 30 | | Topic, Chapter | Tutorial | 31 | |--|--| 32 | | Generative Adversarial Networks (Chapter 5) | [Tutorial](./Tutorials/ch5/GAN/GAN_Tutorial.ipynb) | 33 | 34 | 35 | ## Contribution 36 | Please take a look the [CONTRIBUTING.md](https://github.com/Adversarial-Deep-Learning/code-soup/blob/main/CONTRIBUTING.md) for details, :star: us if you liked the work. 37 | -------------------------------------------------------------------------------- /REPO_STRUCTURE.md: -------------------------------------------------------------------------------- 1 | # Code Soup Repository Structure 2 | 3 | For every algorithm or ingredient of our code soup we have three targets implementation, unit-tests, tutorials. Pertaining to these three we have three folders **code-soup**, **tests**, **Tutorials** 4 | 5 | ```bash 6 | .code-soup/ 7 | +-- code_soup/ #main package 8 | +-- tests/ #unit-tests 9 | +-- Tutorials/ #tutorials 10 | ``` 11 | Each of these follows a book like structure as shown below 12 | 13 | 14 | Code-Soup, main package 15 | --- 16 | 17 | Main package to be used in fencing and build attacks / defenses 18 | 19 | ```bash 20 | .code-soup/code_soup/ 21 | +-- common/ #Used across the package, parallel to glossary 22 | | +-- vision/ 23 | | | +-- models/ #Commonly used models for eg GPT-2 24 | | | +-- utils/ #Commonly used utils like accuracy metric etc 25 | | | +-- dataset/ #Datasets used in the chapter 26 | | +-- text/ #Same as above 27 | | +-- rl/ #Same as above 28 | # For every chapter -> 29 | +-- ch{Chapter_Number}/ #Code refering to a particular chapter 30 | | +-- algorithms/ #Attackers or Defenders used in the chapter 31 | | | +--{Name_of_Attack/Defense}.py 32 | # There will be exactly one file pertaining to the agents. 33 | # This is supposed to be parallel to the pseudcode in a book. 34 | # Therefore only model states and step functions for attack/defense should be here 35 | ``` 36 | 37 | Tests, Unit tests 38 | --- 39 | 40 | For Unit testing of each module in the package 41 | 42 | ```bash 43 | # Exactly same structure would be followed for the test 44 | .code-soup/tests/ 45 | +-- test_common/ 46 | | +-- test_vision/ 47 | | | +-- test_models/ 48 | | | +-- test_utils/ 49 | | | +-- test_dataset/ 50 | | +-- test_text/ 51 | | +-- test_rl/ 52 | | +-- test_utils/ 53 | 54 | # For every chapter -> 55 | +-- test_ch{Chapter_Number}/ 56 | | +-- test_algorithms/ 57 | | | +--test_{Name_of_Attack/Defense}.py 58 | ``` 59 | 60 | Tutorials 61 | --- 62 | 63 | Tutorial, Demonstration, Success, and Visualisation for each algorithm. 64 | 65 | ```bash 66 | # Follows a similar structure 67 | .code-soup/Tutorial 68 | # For every chapter -> 69 | +-- ch{Chapter_Number} 70 | | +-- {Name_of_Attack/Defense}/ 71 | | | +--{Name_of_Attack/Defense}_Tutorial.ipynb #Main Tutorial Notebbok 72 | | | +--config.json #For storing hyper parameters etc 73 | | | +--results.md #(optional) for storing the results obtained 74 | ``` 75 | 76 | For detailed information of particular syntax/structure followed check the readme in folders of those chapters 77 | -------------------------------------------------------------------------------- /Tutorials/ch5/readme.md: -------------------------------------------------------------------------------- 1 | # Tutorials for Chapter 5: Attack on Image Classifier 2 | -------------------------------------------------------------------------------- /Tutorials/readme.md: -------------------------------------------------------------------------------- 1 | # Tutorials for the book Adversarial Deep Learning 2 | -------------------------------------------------------------------------------- /code_soup/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/__init__.py -------------------------------------------------------------------------------- /code_soup/ch5/__init__.py: -------------------------------------------------------------------------------- 1 | from code_soup.ch5.algorithms.gan import GAN, Discriminator, Generator 2 | from code_soup.ch5.algorithms.one_pixel_attack import OnePixelAttack 3 | from code_soup.ch5.algorithms.zoo_attack import ZooAttack 4 | -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/ch5/algorithms/__init__.py -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/atn.py: -------------------------------------------------------------------------------- 1 | """Implements Adversarial Transformation Networks 2 | 3 | 4 | Assumptions: 5 | - The classifier model outptus softmax logits. 6 | """ 7 | 8 | from typing import Tuple, Union 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | import torch.optim as optim 14 | 15 | 16 | class ATNBase(nn.Module): 17 | def __init__( 18 | self, 19 | classifier_model: torch.nn.Module, 20 | target_idx: int, 21 | alpha: float = 1.5, 22 | beta: float = 0.010, 23 | device: torch.device = torch.device("cpu"), 24 | lr: float = 0.001, 25 | ): 26 | """ 27 | Initializes ATN Base class. 28 | 29 | Args: 30 | classifier_model (torch.nn.Module): 31 | A pre-trained classification model that outputs logits (without softmax). 32 | target_idx (int): The index of the target class. 33 | alpha (float): The value by which the max logit is multiplied and set at target index. 34 | beta (float): The weight for the perturbation loss. 35 | device (torch.device): The device to use. 36 | lr (float): The learning rate. 37 | """ 38 | 39 | super(ATNBase, self).__init__() 40 | if alpha <= 1: 41 | raise ValueError("Alpha must be greater than 1") 42 | self.classifier_model = classifier_model 43 | self.alpha = alpha 44 | self.beta = beta 45 | self.target_idx = target_idx 46 | self.device = device 47 | self.lr = lr 48 | self.loss_fn = nn.MSELoss() 49 | self.optimizer = optim.Adam(self.parameters(), lr=lr) 50 | 51 | # TODO: Check if this seems okay 52 | @torch.no_grad() 53 | def rerank(self, softmax_logits: torch.Tensor) -> torch.Tensor: 54 | """ 55 | Re-ranks the softmax logits. 56 | 57 | Args: 58 | softmax_logits (torch.Tensor): The softmax logits to be re-ranked. 59 | Returns: 60 | torch.Tensor: The re-ranked softmax logits. 61 | """ 62 | 63 | # Get the max logit 64 | max_logits = torch.max(softmax_logits, dim=1).values 65 | 66 | # Set the max logit at the target index and multiply by self.alpha 67 | softmax_logits[:, self.target_idx] = max_logits * self.alpha 68 | softmax_logits = softmax_logits / torch.linalg.norm( 69 | softmax_logits, dim=-1 70 | ).view(-1, 1) 71 | return softmax_logits 72 | 73 | def forward(self, x): 74 | """ 75 | Forward pass of the model. Not implemented for this class. 76 | 77 | Args: 78 | x (torch.Tensor): The input to the model. 79 | 80 | Raises: 81 | NotImplementedError: This method is not implemented. 82 | """ 83 | 84 | raise NotImplementedError( 85 | "Forward for ATNBase has not been implemented. Please use child classes for a model." 86 | ) 87 | 88 | def compute_loss( 89 | self, x: torch.Tensor, x_hat: torch.Tensor, y: torch.Tensor, y_hat: torch.Tensor 90 | ) -> torch.Tensor: 91 | """ 92 | Computes the loss for input and output. 93 | 94 | Args: 95 | x (torch.Tensor): The original input to the classification/ATN model. 96 | x_hat (torch.Tensor): The adversarial output from the ATN model. 97 | y (torch.Tensor): The re-ranked logits from the classification model. 98 | y_hat (torch.Tensor): The output logits from the classifier on the adversarial input. 99 | 100 | Returns: 101 | torch.Tensor: A tensor containing loss. 102 | 103 | """ 104 | return self.beta * self.loss_fn(x, x_hat) + self.loss_fn(y, y_hat) 105 | 106 | def step(self, data: torch.Tensor) -> Tuple[Union[torch.Tensor, float]]: 107 | """ 108 | Performs a single optimization step. 109 | 110 | Args: 111 | data (torch.Tensor): The data to be used for the optimization step. 112 | 113 | Returns: 114 | Tuple[Union[torch.Tensor, float]]: 115 | A tuple containing the adversarial image, 116 | the softmax logits from the classifier model on the adversarial image, 117 | and the loss. 118 | """ 119 | image, label = data 120 | image = image.to(self.device) 121 | 122 | adv_out, adv_logits = self(image) 123 | 124 | self.zero_grad() 125 | cls_model_out = self.classifier_model(image) 126 | softmax_logits = F.softmax(cls_model_out, dim=1) 127 | 128 | # Rerank the softmax logits 129 | reranked_logits = self.rerank(softmax_logits) 130 | 131 | # Calculate loss on a batch 132 | loss = self.compute_loss(image, adv_out, reranked_logits, adv_logits) 133 | loss.backward() 134 | 135 | self.optimizer.step() 136 | return adv_out, adv_logits, loss.item() 137 | 138 | 139 | class SimpleAAE(ATNBase): 140 | def __init__( 141 | self, 142 | classifier_model: torch.nn.Module, 143 | target_idx: int, 144 | alpha: float = 1.5, 145 | beta: float = 0.010, 146 | device: torch.device = torch.device("cpu"), 147 | lr: float = 0.001, 148 | input_shape: tuple = (1, 28, 28), 149 | num_channels: list = [64, 64], 150 | deconv_num_channels: list = [64, 64], 151 | typ="a", 152 | ): 153 | """ 154 | Initializes the SimpleAAE class. In these type of ATNs adversarial images are produced. 155 | 156 | Args: 157 | classifier_model (torch.nn.Module): 158 | A pre-trained classification model that outputs logits (without softmax). 159 | target_idx (int): The index of the target class. 160 | alpha (float): The value by which the max logit is multiplied and set at target index. 161 | beta (float): The weight for the perturbation loss. 162 | device (torch.device): The device to use. 163 | lr (float): The learning rate. 164 | input_shape (tuple): The shape of the input. 165 | num_channels (list): The number of channels in each convolutional layer. 166 | deconv_num_channels (list): The number of channels in each deconvolutional layer. 167 | typ (str): The type of the model. One of 'a', 'b', 'c'. Based on the paper. 168 | """ 169 | 170 | assert typ in ["a", "b", "c"] 171 | super(SimpleAAE, self).__init__( 172 | classifier_model, target_idx, alpha, beta, device, lr 173 | ) 174 | 175 | self.input_shape = input_shape 176 | 177 | if typ == "a": 178 | layers = [] 179 | sizes = ( 180 | [input_shape[0] * input_shape[1] * input_shape[2]] 181 | + num_channels 182 | + [input_shape[0] * input_shape[1] * input_shape[2]] 183 | ) 184 | layers.append(nn.Flatten()) 185 | for i in range(len(sizes) - 1): 186 | layers.append(nn.Linear(sizes[i], sizes[i + 1])) 187 | if i != len(sizes) - 2: 188 | layers.append(nn.ReLU()) 189 | else: 190 | layers.append(nn.Tanh()) 191 | 192 | elif typ == "b": 193 | layers = [] 194 | sizes = [input_shape[0]] + num_channels 195 | for i in range(len(sizes) - 1): 196 | layers.append( 197 | nn.Conv2d(sizes[i], sizes[i + 1], kernel_size=3, padding=1) 198 | ) 199 | layers.append(nn.ReLU()) 200 | # TODO: Check if Max Pooling is needed here (most probably is). 201 | 202 | layers.append(nn.Flatten()) 203 | layers.append( 204 | nn.Linear( 205 | sizes[-1] * input_shape[1] * input_shape[2], 206 | input_shape[0] * input_shape[1] * input_shape[2], 207 | ) 208 | ) 209 | layers.append(nn.Tanh()) 210 | 211 | elif typ == "c": 212 | layers = [] 213 | sizes = [input_shape[0]] + num_channels 214 | for i in range(len(sizes) - 1): 215 | layers.append( 216 | nn.Conv2d(sizes[i], sizes[i + 1], kernel_size=3, padding=1) 217 | ) 218 | layers.append(nn.ReLU()) 219 | 220 | deconv_sizes = [num_channels[-1]] + deconv_num_channels 221 | for j in range(len(deconv_sizes) - 1): 222 | layers.append( 223 | nn.ConvTranspose2d( 224 | deconv_sizes[j], 225 | deconv_sizes[j + 1], 226 | kernel_size=3, 227 | stride=1, 228 | padding=1, 229 | ) 230 | ) 231 | 232 | layers.append(nn.ReLU()) 233 | 234 | layers.append(nn.Flatten()) 235 | 236 | layers.append( 237 | nn.Linear( 238 | deconv_sizes[-1] * input_shape[1] * input_shape[2], 239 | input_shape[0] * input_shape[1] * input_shape[2], 240 | ) 241 | ) 242 | layers.append(nn.Tanh()) 243 | 244 | self.atn = nn.Sequential(*layers) 245 | 246 | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: 247 | """ 248 | Performs a forward pass on the model. 249 | 250 | Args: 251 | x (torch.Tensor): The input to the model. 252 | 253 | Returns: 254 | Tuple[torch.Tensor]: 255 | The adversarial output of the ATN model, 256 | the softmax logits from the classifier model on the adversarial image. 257 | """ 258 | adv_out = self.atn(x) 259 | adv_out = adv_out.view(-1, *self.input_shape) 260 | logits = self.classifier_model(adv_out) 261 | softmax_logits = F.softmax(logits, dim=1) 262 | return adv_out, softmax_logits 263 | 264 | 265 | class SimplePATN(ATNBase): 266 | def __init__( 267 | self, 268 | classifier_model: torch.nn.Module, 269 | target_idx: int, 270 | alpha: float = 1.5, 271 | beta: float = 0.010, 272 | device: torch.device = torch.device("cpu"), 273 | lr: float = 0.001, 274 | input_shape: tuple = (1, 28, 28), 275 | num_channels: list = [64, 64], 276 | ): 277 | 278 | """ 279 | Initializes the SimplePATN class. In these type of ATNs, perturbations are produced. 280 | 281 | Args: 282 | classifier_model (torch.nn.Module): 283 | A pre-trained classification model that outputs logits (without softmax). 284 | target_idx (int): The index of the target class. 285 | alpha (float): The value by which the max logit is multiplied and set at target index. 286 | beta (float): The weight for the perturbation loss. 287 | device (torch.device): The device to use. 288 | lr (float): The learning rate. 289 | input_shape (tuple): The shape of the input. 290 | num_channels (list): The number of channels in each convolutional layer. 291 | """ 292 | super(SimplePATN, self).__init__( 293 | classifier_model, target_idx, alpha, beta, device, lr 294 | ) 295 | 296 | self.input_shape = input_shape 297 | 298 | layers = [] 299 | sizes = [input_shape[0]] + num_channels 300 | for i in range(len(sizes) - 1): 301 | layers.append(nn.Conv2d(sizes[i], sizes[i + 1], kernel_size=3, padding=1)) 302 | layers.append(nn.ReLU()) 303 | 304 | layers.append(nn.Flatten()) 305 | layers.append( 306 | nn.Linear( 307 | sizes[-1] * input_shape[1] * input_shape[2], 308 | input_shape[0] * input_shape[1] * input_shape[2], 309 | ) 310 | ) 311 | layers.append( 312 | nn.Tanh() 313 | ) # TODO: Check if this is the right activation function for PATN 314 | 315 | self.atn = nn.Sequential(*layers) 316 | 317 | def forward(self, x): 318 | """ 319 | Performs a forward pass on the model. 320 | 321 | Args: 322 | x (torch.Tensor): The input to the model. 323 | 324 | Returns: 325 | Tuple[torch.Tensor]: 326 | The adversarial image for the model, 327 | the softmax logits from the classifier model on the adversarial image. 328 | """ 329 | adv_out = self.atn(x) 330 | adv_out = adv_out.view(-1, *self.input_shape) 331 | logits = self.classifier_model(adv_out + x) 332 | softmax_logits = F.softmax(logits, dim=1) 333 | return adv_out + x, softmax_logits 334 | -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/configs/zoo_attack.json: -------------------------------------------------------------------------------- 1 | { 2 | "binary_search_steps": 1, 3 | "max_iterations": 10000, 4 | "learning_rate": 2e-3, 5 | "abort_early": true, 6 | "targeted": true, 7 | "confidence": 0, 8 | "initial_const": 0.5, 9 | "use_log": false, 10 | "use_tanh": true, 11 | "reset_adam_after_found": false, 12 | "batch_size": 128, 13 | "const": 0.5, 14 | "early_stop_iters": 0, 15 | "adam_beta1": 0.9, 16 | "adam_beta2": 0.999, 17 | "use_importance": true, 18 | "use_resize": false, 19 | "init_size": 32, 20 | "adam_eps": 1e-8, 21 | "resize_iter_1": 2000, 22 | "resize_iter_2": 10000 23 | } 24 | -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/gan.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | 8 | class Generator(nn.Module): 9 | """ 10 | Simple generator network. 11 | Methods 12 | ------- 13 | forward(x) 14 | - returns a generated sample 15 | """ 16 | 17 | def __init__(self, image_size: int, channels: int, latent_dims: int, lr: float): 18 | """ 19 | Parameters 20 | ---------- 21 | image_size : int 22 | Number of input dimensions aka side length of image 23 | channels: int 24 | Number of channels in image 25 | latent_dims : int 26 | Number of dimensions in the projecting layer 27 | lr : float 28 | Learning rate 29 | """ 30 | super(Generator, self).__init__() 31 | self.image_size = image_size 32 | self.channels = channels 33 | self.latent_dims = latent_dims 34 | self.main = nn.Sequential( 35 | nn.Linear(self.latent_dims, 256), 36 | nn.LeakyReLU(0.2), 37 | nn.Linear(256, 512), 38 | nn.LeakyReLU(0.2), 39 | nn.Linear(512, 1024), 40 | nn.LeakyReLU(0.2), 41 | nn.Linear(1024, self.image_size * self.image_size * self.channels), 42 | nn.Tanh(), 43 | ) 44 | self.optimizer = optim.Adam(self.parameters(), lr=lr) 45 | 46 | def forward(self, x: torch.Tensor) -> torch.Tensor: 47 | """ 48 | Parameters 49 | ---------- 50 | x : torch.Tensor 51 | Input tensor 52 | Returns 53 | ------- 54 | output : torch.Tensor 55 | Generated sample 56 | """ 57 | output = self.main(x) 58 | return output.view(-1, self.channels, self.image_size, self.image_size) 59 | 60 | 61 | class Discriminator(nn.Module): 62 | """ 63 | Simple discriminator network. 64 | Methods 65 | ------- 66 | forward(x) 67 | - returns a probability that the input is real 68 | """ 69 | 70 | def __init__(self, image_size: int, channels: int, lr: float): 71 | """ 72 | Parameters 73 | ---------- 74 | image_size : int 75 | Number of input dimensions aka side length of image 76 | channels: int 77 | Number of channels in image 78 | lr : float 79 | Learning rate 80 | """ 81 | super(Discriminator, self).__init__() 82 | self.image_size = image_size 83 | self.channels = channels 84 | self.main = nn.Sequential( 85 | nn.Linear(self.image_size * self.image_size * self.channels, 1024), 86 | nn.LeakyReLU(0.2), 87 | nn.Dropout(0.3), 88 | nn.Linear(1024, 512), 89 | nn.LeakyReLU(0.2), 90 | nn.Dropout(0.3), 91 | nn.Linear(512, 256), 92 | nn.LeakyReLU(0.2), 93 | nn.Dropout(0.3), 94 | nn.Linear(256, 1), 95 | nn.Sigmoid(), 96 | ) 97 | self.optimizer = optim.Adam(self.parameters(), lr=lr) 98 | 99 | def forward(self, x): 100 | """ 101 | Parameters 102 | ---------- 103 | x : torch.Tensor 104 | Input tensor 105 | Returns 106 | ------- 107 | output : torch.Tensor 108 | Probability that the input is real 109 | """ 110 | x = x.view(-1, self.image_size * self.image_size * self.channels) 111 | return self.main(x) 112 | 113 | 114 | class GAN: 115 | """ 116 | Generative Adversarial Network Model Class. 117 | Refer to the paper for more details: `Generative Adversarial Nets `_ 118 | Methods 119 | ------- 120 | step(self, i, data) 121 | Iterates the model for a single batch of data 122 | """ 123 | 124 | def __init__( 125 | self, 126 | image_size: int, 127 | channels: int, 128 | latent_dims: int, 129 | device: torch.device, 130 | lr: float, 131 | ): 132 | """ 133 | Parameters 134 | ---------- 135 | image_size : int 136 | Number of input dimensions aka side length of image 137 | channels: int 138 | Number of channels in image 139 | latent_dims : int 140 | Number of dimensions in the projecting layer 141 | device : torch.device 142 | Device to run the model on 143 | lr : float 144 | Learning rate 145 | """ 146 | self.image_size = image_size 147 | self.channels = channels 148 | self.latent_dims = latent_dims 149 | self.device = device 150 | self.generator = Generator(image_size, channels, latent_dims, lr).to(device) 151 | self.discriminator = Discriminator(image_size, channels, lr).to(device) 152 | self.criterion = torch.nn.BCELoss() 153 | self.real_label, self.fake_label = 1.0, 0.0 154 | 155 | def step(self, data: torch.Tensor) -> Tuple: 156 | """ 157 | Iterates the model for a single batch of data, calculates the loss and updates the model parameters. 158 | Parameters 159 | ---------- 160 | data : torch.Tensor 161 | Batch of data 162 | Returns 163 | ------- 164 | D_x: 165 | The average output (across the batch) of the discriminator for the all real batch 166 | D_G_z1: 167 | Average discriminator outputs for the all fake batch before updating discriminator 168 | errD: 169 | Discriminator loss 170 | D_G_z2: 171 | Average discriminator outputs for the all fake batch after updating discriminator 172 | errG: 173 | Generator loss 174 | """ 175 | real_image, _ = data 176 | real_image = real_image.to(self.device) 177 | batch_size = real_image.shape[0] 178 | label = torch.full( 179 | (batch_size,), self.real_label, dtype=torch.float, device=self.device 180 | ) 181 | self.discriminator.zero_grad() 182 | # Forward pass real batch through D 183 | output = self.discriminator(real_image).view(-1) 184 | # Calculate loss on all-real batch 185 | errD_real = self.criterion(output, label) 186 | errD_real.backward() 187 | 188 | D_x = output.mean().item() 189 | # Train with all-fake batch 190 | # Generate batch of latent vectors 191 | noise = torch.randn(batch_size, self.latent_dims, device=self.device) 192 | # Generate fake image batch with G 193 | fake = self.generator(noise) 194 | label.fill_(self.fake_label) 195 | # Classify all fake batch with D 196 | output = self.discriminator(fake.detach()).view(-1) 197 | # Calculate D's loss on the all-fake batch 198 | errD_fake = self.criterion(output, label) 199 | errD_fake.backward() 200 | D_G_z1 = output.mean().item() 201 | # Compute error of D as sum over the fake and the real batches 202 | errD = errD_real + errD_fake 203 | # Update D 204 | self.discriminator.optimizer.step() 205 | 206 | self.generator.zero_grad() 207 | label.fill_(self.real_label) # fake labels are real for generator cost 208 | # Since we just updated D, perform another forward pass of all-fake batch through D 209 | output = self.discriminator(fake).view(-1) 210 | # Calculate G's loss based on this output 211 | errG = self.criterion(output, label) 212 | # Calculate gradients for G 213 | errG.backward() 214 | D_G_z2 = output.mean().item() 215 | # Update G 216 | self.generator.optimizer.step() 217 | return D_x, D_G_z1, errD, D_G_z2, errG 218 | -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/one_pixel_attack.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from scipy.optimize import differential_evolution 4 | 5 | """ 6 | The code is a slight modification and refactoring from the following repository 7 | https://github.com/Hyperparticle/one-pixel-attack-keras 8 | 9 | """ 10 | 11 | 12 | class OnePixelAttack: 13 | 14 | """ 15 | Attack using One Pixel 16 | 17 | """ 18 | 19 | def __init__(self, model, device=None): 20 | 21 | """ 22 | 23 | Initialize the Attack obj 24 | Parameters 25 | ---------- 26 | model: torch.nn 27 | model to be attacked 28 | device : string 29 | 'cpu' / 'cuda' 30 | 31 | """ 32 | 33 | self.model = model 34 | self.device = device 35 | if self.device is None: 36 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 37 | 38 | def perturb_image(self, perturbation, orig_img): # pragma: no cover 39 | """ 40 | Parameters 41 | ---------- 42 | orig_image: image to be changed 43 | perturbation: tuple of (x,y,r,g,b) on basis of which the image is changed 44 | [(),(),()]. Image can have multiple perturbations 45 | """ 46 | x_pos, y_pos, *rgb = perturbation 47 | new_img = orig_img 48 | new_img[:, x_pos, y_pos] = rgb 49 | 50 | return new_img 51 | 52 | def perturbation_image(self, perturbation_array, image): # pragma: no cover 53 | """ 54 | Applies multiple perturbation to a single image 55 | Parameters 56 | ---------- 57 | image: the image to be perturbated 58 | perturbation_array: array like [x1, y1, r1, g1, b1, x2, y2, r2, g2, b2, ...] 59 | the number of pixels to be changed 60 | """ 61 | perturbation_array = np.array(perturbation_array) 62 | 63 | perturbation_array = perturbation_array.astype(int) 64 | 65 | perturbation_array = np.split(perturbation_array, len(perturbation_array) // 5) 66 | 67 | new_image = image 68 | 69 | for perturb in perturbation_array: 70 | new_image = self.perturb_image(perturb, new_image) 71 | 72 | return new_image 73 | 74 | def predict_class(self, xs, img, target_class, minimize=True): # pragma: no cover 75 | """ 76 | Parameters 77 | ---------- 78 | xs: 1d array to be evolved 79 | img: image to be perturbed 80 | target_class: class to be targeted or avoided 81 | minimize: This function should always be minimized, so return its complement 82 | if needed basically is targetted increase the prob 83 | """ 84 | # Perturb the image with the given pixel(s) x and get the prediction of the model 85 | img_perturbed = self.perturbation_image(xs, img) 86 | prediction = self.model_predict(img_perturbed)[target_class] 87 | 88 | # This function should always be minimized, so return its complement 89 | # if needed basically is targetted increase the prob 90 | return prediction if minimize else 1 - prediction 91 | 92 | def model_predict(self, image): # pragma: no cover 93 | """ 94 | Helper function to predict probs from the model of just 1 image 95 | """ 96 | prediction = None 97 | with torch.no_grad(): 98 | image = torch.FloatTensor(image).reshape(1, *image.shape) 99 | image = image.to(self.device) 100 | prediction = self.model(image)[0].detach().numpy() 101 | return prediction 102 | 103 | def attack_success( 104 | self, x, img, target_class, targeted_attack=False, verbose=False 105 | ): # pragma: no cover 106 | """ 107 | check if the attack is a success. the callback helper function for differential_evolution 108 | Parameters 109 | ---------- 110 | x: 1d array that is evolved 111 | img: image to be perturbed 112 | target_class: class to be targeted or avoided 113 | Returns: 114 | ------- 115 | true if the evolution needs to be stopped 116 | """ 117 | # Perturb the image with the given pixel(s) and get the prediction of the model 118 | attack_image = self.perturbation_image(x, img) 119 | 120 | confidence = self.model_predict(attack_image) 121 | predicted_class = np.argmax(confidence) 122 | 123 | # If the prediction is what we want (misclassification or 124 | # targeted classification), return True 125 | if verbose: 126 | print("Confidence:", confidence[target_class]) 127 | if (targeted_attack and predicted_class == target_class) or ( 128 | not targeted_attack and predicted_class != target_class 129 | ): 130 | return True 131 | 132 | return False 133 | 134 | def attack( 135 | self, 136 | image, 137 | original_label, 138 | target=None, 139 | pixel_count=1, 140 | maxiter=75, 141 | popsize=400, 142 | verbose=False, 143 | ): # pragma: no cover 144 | 145 | """ 146 | Runs the attack on a single image, searches the image space 147 | Parameters 148 | ---------- 149 | image: Numpy.Array 150 | image of shape(channel, height, width) 151 | original_label: int 152 | class the image belongs too 153 | target: int 154 | class to be targetted 155 | pixels_count: int 156 | Number of Pixels to be perturbed (changed) 157 | maxiter:int, optional 158 | The maximum number of generations over which the entire population is evolved. 159 | The maximum number of function evaluations (with no polishing) is: (maxiter + 1) * popsize * len(x) 160 | popsize:int, optional 161 | A multiplier for setting the total population size. The population has popsize * len(x) individuals. 162 | verbose: boolean 163 | set to true to print the confidence 164 | Returns 165 | ------- 166 | perturbation_array: 167 | List of all the best perturbations to the images in the batch 168 | 169 | 170 | """ 171 | 172 | # Change the target class based on whether this is a targeted attack or not 173 | targeted_attack = target is not None 174 | target_class = target if targeted_attack else original_label 175 | 176 | # Define bounds for a flat vector of x,y,r,g,b values 177 | # For more pixels, repeat this layout 178 | print("Image shape:", image.shape) 179 | dim_x, dim_y = image.shape[1], image.shape[2] 180 | bounds = [(0, dim_x), (0, dim_y), (0, 256), (0, 256), (0, 256)] * pixel_count 181 | 182 | # Population multiplier, in terms of the size of the perturbation vector x 183 | popmul = max(1, popsize // len(bounds)) 184 | 185 | # Format the predict/callback functions for the differential evolution algorithm 186 | def predict_fn(xs): 187 | print("Predicting!") 188 | 189 | return self.predict_class(xs, image, target_class, target is None) 190 | 191 | def callback_fn(x, convergence): 192 | print("check success!") 193 | return self.attack_success(x, image, target_class, targeted_attack, verbose) 194 | 195 | # Call Scipy's Implementation of Differential Evolution 196 | attack_result = differential_evolution( 197 | predict_fn, 198 | bounds, 199 | maxiter=maxiter, 200 | popsize=popmul, 201 | recombination=1, 202 | atol=-1, 203 | callback=callback_fn, 204 | polish=False, 205 | ) 206 | 207 | # ----------------- Calculate some useful statistics to return from this function 208 | # attack_image = self.perturbation_image(attack_result.x, image) 209 | # prior_probs = self.model_predict(image) 210 | # predicted_probs = self.model_predict(attack_image) 211 | # predicted_class = np.argmax(predicted_probs) 212 | # actual_class = original_label 213 | # success = predicted_class != actual_class 214 | # cdiff = prior_probs[actual_class] - predicted_probs[actual_class] 215 | 216 | # Show the best attempt at a solution (successful or not) 217 | # if plot: 218 | # helper.plot_image(attack_image, actual_class, self.class_names, predicted_class) 219 | 220 | # return [pixel_count, attack_image, actual_class, predicted_class, success, cdiff, prior_probs, 221 | # predicted_probs, attack_result.x] 222 | 223 | # return the best perturbation array 224 | return attack_result.x 225 | 226 | def step( 227 | self, 228 | data, 229 | labels=None, 230 | pixels_perturbed=1, 231 | targeted=False, 232 | maxiter=75, 233 | popsize=400, 234 | verbose=False, 235 | ): 236 | """ 237 | Runs the attack on a batch of images, searches the image space on a single batch of images 238 | Parameters 239 | ---------- 240 | data : torch.Tensor 241 | Batch of data 242 | labels: List 243 | list of all the unique classes from the dataset 244 | pixels_perturbed: int 245 | Number of Pixels to be perturbed (changed) 246 | targeted: boolean 247 | To decide if this is a targetted attack or not (in casee of targetted attack run all labels) 248 | maxiter:int, optional 249 | The maximum number of generations over which the entire population is evolved. 250 | The maximum number of function evaluations (with no polishing) is: (maxiter + 1) * popsize * len(x) 251 | popsize:int, optional 252 | A multiplier for setting the total population size. The population has popsize * len(x) individuals. 253 | verbose: boolean 254 | set to true to print the confidence 255 | 256 | Returns 257 | ------- 258 | perturbation_array: 259 | List of all the best perturbations to the images in the batch 260 | 261 | """ 262 | 263 | images, image_orig_label = data 264 | batch_size = len(images) 265 | 266 | # store the best perturbation for all the images in the batch 267 | perturbation_array = [] 268 | 269 | for i in range(batch_size): 270 | image = images[i].detach().numpy() 271 | orig_label = image_orig_label[i].detach().numpy().astype(int) 272 | targets = [None] if not targeted else range(len(labels)) 273 | 274 | for target in targets: 275 | if targeted: # pragma: no cover 276 | print("Attacking with target", labels[target]) 277 | if target == orig_label: 278 | continue 279 | best_perturbation = self.attack( 280 | image, 281 | orig_label, 282 | target, 283 | pixels_perturbed, 284 | maxiter=maxiter, 285 | popsize=popsize, 286 | verbose=verbose, 287 | ) 288 | perturbation_array.append(best_perturbation) 289 | 290 | return perturbation_array 291 | -------------------------------------------------------------------------------- /code_soup/ch5/algorithms/zoo_attack.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implements ZOO (Zero-Order Optimization) Attack. 3 | 4 | This code is based on the L2-attack from the original implementation of the attack: 5 | https://github.com/huanzhang12/ZOO-Attack/blob/master/l2_attack_black.py 6 | 7 | Usage: 8 | >>> import json 9 | >>> from code_soup.ch5.models.zoo_attack import ZOOAttack 10 | >>> config = json.load(open('./code-soup/ch5/models/configs/zoo_attack.json')) 11 | >>> attack = ZOOAttack(model, config, input_image_shape=[28, 28, 3], device = 'cpu') 12 | >>> adv_img, const = attack.attack(orig_img, target) 13 | 14 | """ 15 | from typing import Dict, List 16 | 17 | import cv2 18 | import numpy as np 19 | import torch 20 | import torch.nn.functional as F 21 | from PIL import Image 22 | 23 | 24 | class ZooAttack: 25 | """ 26 | Implements the ZooAttack class. 27 | """ 28 | 29 | def __init__( 30 | self, 31 | model: torch.nn.Module, 32 | config: Dict, 33 | input_image_shape: List[int], 34 | device: str, 35 | ): 36 | """Initializes the ZooAttack class. 37 | 38 | Args: 39 | model (torch.nn.Module): A PyTorch model. 40 | config (Dict): A dictionary containing the configuration for the attack. 41 | input_image_shape (List[int]): A tuple of ints containing the shape of the input image. 42 | device (str): The device to perform the attack on. 43 | 44 | Raises: 45 | NotImplementedError: If `use_tanh` is `False` and `use_resize` is `True`. 46 | """ 47 | 48 | assert len(input_image_shape) == 3, "`input_image_shape` must be of length 3" 49 | 50 | self.config = config 51 | 52 | if self.config["use_tanh"] is False and self.config["use_resize"] is True: 53 | # NOTE: self.up and self.down need to be updated dynamically to match the modifier shape. 54 | # Original Implementation is possibly flawed in this aspect. 55 | raise NotImplementedError( 56 | "Current implementation does not support `use_tanh` as `False` and `use_resize` as `True` at the same time." 57 | ) 58 | 59 | if self.config["early_stop_iters"] == 0: 60 | self.config["early_stop_iters"] = self.config["max_iterations"] // 10 61 | 62 | self.device = device 63 | self.input_image_shape = input_image_shape 64 | 65 | # Put model in eval mode 66 | self.model = model.to(device) 67 | self.model.eval() 68 | 69 | # DUMMIES - Values will be reset during attack 70 | var_size = np.prod(input_image_shape) # width * height * num_channels 71 | self.var_list = np.array(range(0, var_size), dtype=np.int32) 72 | 73 | # Initialize Adam optimizer values 74 | self.mt_arr = np.zeros(var_size, dtype=np.float32) 75 | self.vt_arr = np.zeros(var_size, dtype=np.float32) 76 | self.adam_epochs = np.ones(var_size, dtype=np.int64) 77 | 78 | # Sampling Probabilities 79 | self.sample_prob = np.ones(var_size, dtype=np.float32) / var_size 80 | 81 | def get_perturbed_image(self, orig_img: torch.tensor, modifier: np.ndarray): 82 | """Calculates the perturbed image given `orig_img` and `modifier`. 83 | 84 | Args: 85 | orig_img (torch.tensor): The original image with a batch dimension. Expected batch size is 1. 86 | Using any other batch size may lead to unexpected behavior. 87 | modifier (np.ndarray): A numpy array with modifier(s) for the image. 88 | 89 | Returns: 90 | torch.tensor: The perturbed image from original image and modifier. 91 | """ 92 | 93 | assert orig_img.ndim == 4, "`orig_img` must be a 4D tensor" 94 | assert modifier.ndim == 4, "`modifier` must be a 4D tensor" 95 | 96 | b = modifier.shape[0] 97 | x = orig_img.shape[1] 98 | y = orig_img.shape[2] 99 | z = orig_img.shape[3] 100 | 101 | new_modifier = np.zeros((b, x, y, z), dtype=np.float32) 102 | 103 | if x != modifier.shape[1] or y != modifier.shape[2]: 104 | for k, v in enumerate(modifier): 105 | new_modifier[k, :, :, :] = cv2.resize( 106 | modifier[k, :, :, :], 107 | (x, y), 108 | interpolation=cv2.INTER_LINEAR, 109 | ) 110 | else: 111 | new_modifier = modifier 112 | 113 | if self.config["use_tanh"]: 114 | return torch.tanh(orig_img + new_modifier) / 2 115 | else: 116 | return orig_img + new_modifier 117 | 118 | def l2_distance_loss(self, orig_img: torch.tensor, new_img: torch.tensor): 119 | """Calculates the L2 loss between the image and the new images. 120 | 121 | Args: 122 | orig_img (torch.tensor): The original image tensor. 123 | new_img (torch.tensor): The tensor containing the perturbed images from the original image. 124 | 125 | Returns: 126 | np.ndarray: The numpy array containing the L2 loss between the original image and the perturbed images. 127 | """ 128 | 129 | # assert orig_img.shape == new_img.shape, "Images must be the same shape" 130 | 131 | assert new_img.ndim == 4, "`new_img` must be a 4D tensor" 132 | dim = (1, 2, 3) 133 | 134 | if self.config["use_tanh"]: 135 | return ( 136 | torch.sum(torch.square(new_img - torch.tanh(orig_img) / 2), dim=dim) 137 | .detach() 138 | .cpu() 139 | .numpy() 140 | ) 141 | else: 142 | return ( 143 | torch.sum(torch.square(new_img - orig_img), dim=dim) 144 | .detach() 145 | .cpu() 146 | .numpy() 147 | ) 148 | 149 | def confidence_loss(self, new_img: torch.tensor, target: torch.tensor): 150 | """Calculate the confidence loss between the perturbed images and target. 151 | 152 | Args: 153 | new_img (torch.tensor): A 4D tensor containing the perturbed images. 154 | target (torch.tensor): A 2D tensor containing the target labels. 155 | 156 | Returns: 157 | np.ndarray: A numpy array containing the confidence loss between the perturbed images and target. 158 | """ 159 | assert new_img.ndim == 4, "`new_img` must be of shape (N, H, W, C)" 160 | assert ( 161 | target.ndim == 2 162 | ), "`target` must be of shape (N,L) where L is number of classes" 163 | 164 | new_img = new_img.permute(0, 3, 1, 2) 165 | 166 | model_output = self.model(new_img) 167 | 168 | if self.config["use_log"]: 169 | model_output = F.softmax(model_output, dim=1) 170 | 171 | real = torch.sum(target * model_output, dim=1) 172 | other = torch.max((1 - target) * model_output - (target * 10000), dim=1)[0] 173 | 174 | if self.config["use_log"]: 175 | real = torch.log(real + 1e-30) 176 | other = torch.log(other + 1e-30) 177 | 178 | confidence = torch.tensor(self.config["confidence"], device=self.device).type( 179 | torch.float64 180 | ) 181 | 182 | if self.config["targeted"]: 183 | # If targetted, optimize for making the other class most likely 184 | output = ( 185 | torch.max(torch.zeros_like(real), other - real + confidence) 186 | .detach() 187 | .cpu() 188 | .numpy() 189 | ) 190 | else: 191 | # If untargetted, optimize for making this class least likely. 192 | output = ( 193 | torch.max(torch.zeros_like(real), real - other + confidence) 194 | .detach() 195 | .cpu() 196 | .numpy() 197 | ) 198 | 199 | return output, model_output 200 | 201 | def total_loss( 202 | self, 203 | orig_img: torch.tensor, 204 | new_img: torch.tensor, 205 | target: torch.tensor, 206 | const: int, 207 | ): 208 | """Calculate the total loss for the original image and the perturbed images. 209 | 210 | Args: 211 | orig_img (torch.tensor): A 4D tensor containing the original image. 212 | new_img (torch.tensor): A 4D tensor containing the perturbed images. 213 | target (torch.tensor): A 2D tensor containing the target labels. 214 | const (int): The constant to be used in calculating the loss, with which confidence loss is scaled. 215 | 216 | Returns: 217 | np.ndarray: A numpy array containing the total loss for the original image and the perturbed images. 218 | """ 219 | l2_loss = self.l2_distance_loss(orig_img, new_img) 220 | 221 | confidence_loss, model_output = self.confidence_loss(new_img, target) 222 | 223 | return ( 224 | l2_loss + const * confidence_loss, 225 | l2_loss, 226 | confidence_loss, 227 | model_output, 228 | ) 229 | 230 | # Adapted from original code 231 | def max_pooling(self, modifier: np.ndarray, patch_size: int): 232 | """Max pooling operation on a single-channel modifier with a given patch size. 233 | 234 | The array remains the same size after the operation, only the patches have max value throughout. 235 | 236 | Args: 237 | modifier (np.ndarray): A numpy array containing a channel of the perturbation. 238 | patch_size (int): The size of the patches to be max pooled. 239 | 240 | Returns: 241 | np.ndarray: A 2D modifier array containing the max pooled patches. 242 | """ 243 | 244 | assert modifier.ndim == 2, "`modifier` must be a 2D array" 245 | img_pool = np.copy(modifier) 246 | img_x = modifier.shape[0] 247 | img_y = modifier.shape[1] 248 | for i in range(0, img_x, patch_size): 249 | for j in range(0, img_y, patch_size): 250 | img_pool[i : i + patch_size, j : j + patch_size] = np.max( 251 | modifier[i : i + patch_size, j : j + patch_size] 252 | ) 253 | return img_pool 254 | 255 | def zero_order_gradients(self, losses: np.ndarray): 256 | """Calculate the zero order gradients for the losses. 257 | 258 | Args: 259 | losses (np.ndarray): A numpy array containing the losses with length - 2 * batch_size + 1 260 | 261 | Returns: 262 | np.ndarray: A numpy array containing the zero order gradients for the losses. 263 | """ 264 | 265 | grad = np.zeros(self.config["batch_size"]) 266 | for i in range(self.config["batch_size"]): 267 | grad[i] = (losses[i * 2 + 1] - losses[i * 2 + 2]) / 0.0002 268 | return grad 269 | 270 | def coordinate_adam( 271 | self, indices: np.ndarray, grad: np.ndarray, modifier: np.ndarray, proj: bool 272 | ): 273 | """Perform inplace coordinate-wise Adam update on modifier. 274 | 275 | Args: 276 | indices (np.ndarray): A numpy array containing the indices of the coordinates to be updated. 277 | grad (np.ndarray): A numpy array containing the gradients. 278 | modifier (np.ndarray): A numpy array containing the current modifier/perturbation. 279 | proj (bool): Whether to limit the new values of the modifier between up and down limits. 280 | """ 281 | # First moment 282 | mt = self.mt_arr[indices] 283 | mt = self.config["adam_beta1"] * mt + (1 - self.config["adam_beta1"]) * grad 284 | 285 | self.mt_arr[indices] = mt 286 | 287 | # Second moment 288 | vt = self.vt_arr[indices] 289 | vt = self.config["adam_beta2"] * vt + (1 - self.config["adam_beta2"]) * ( 290 | grad * grad 291 | ) 292 | 293 | self.vt_arr[indices] = vt 294 | 295 | epochs = self.adam_epochs[indices] 296 | 297 | # Bias Correction 298 | mt_hat = mt / (1 - np.power(self.config["adam_beta1"], epochs)) 299 | vt_hat = vt / (1 - np.power(self.config["adam_beta2"], epochs)) 300 | 301 | m = modifier.reshape(-1) 302 | old_val = m[indices] 303 | old_val -= ( 304 | self.config["learning_rate"] 305 | * mt_hat 306 | / (np.sqrt(vt_hat) + self.config["adam_eps"]) 307 | ) 308 | if proj: 309 | old_val = np.maximum( 310 | np.minimum(old_val, self.up[indices]), self.down[indices] 311 | ) 312 | m[indices] = old_val 313 | self.adam_epochs[indices] = epochs + 1 314 | 315 | # return m.reshape(modifier.shape) 316 | 317 | # Adapted from original code 318 | def get_new_prob( 319 | self, modifier: np.ndarray, max_pooling_ratio: int = 8, gen_double: bool = False 320 | ): 321 | """ 322 | Calculate the new probabilities by performing max pooling on the modifier. 323 | 324 | Args: 325 | modifier (np.ndarray): A numpy array containing the perturbation. 326 | max_pooling_ratio (int): The ratio of the size of the patches to be max pooled. 327 | gen_double (bool): Whether to double the size of the perturbation after max pooling. 328 | 329 | Returns: 330 | np.ndarray: A numpy array containing the new probabilities. 331 | 332 | """ 333 | modifier = np.squeeze(modifier) 334 | old_shape = modifier.shape 335 | if gen_double: 336 | new_shape = (old_shape[0] * 2, old_shape[1] * 2, old_shape[2]) 337 | else: 338 | new_shape = old_shape 339 | prob = np.empty(shape=new_shape, dtype=np.float32) 340 | for i in range(modifier.shape[2]): 341 | image = np.abs(modifier[:, :, i]) 342 | image_pool = self.max_pooling(image, old_shape[0] // max_pooling_ratio) 343 | if gen_double: 344 | prob[:, :, i] = np.array( 345 | Image.fromarray(image_pool).resize( 346 | (new_shape[0], new_shape[1]), Image.NEAREST 347 | ) 348 | ) 349 | else: 350 | prob[:, :, i] = image_pool 351 | 352 | # NOTE: This is here to handle all zeros input 353 | if np.sum(prob) != 0: 354 | prob /= np.sum(prob) 355 | else: # pragma: no cover 356 | prob = np.ones(shape=new_shape, dtype=np.float32) 357 | prob /= np.sum(prob) 358 | 359 | return prob 360 | 361 | # Adapted from original code 362 | def resize_img( 363 | self, 364 | small_x: int, 365 | small_y: int, 366 | num_channels: int, 367 | modifier: np.ndarray, 368 | max_pooling_ratio: int = 8, 369 | reset_only: bool = False, 370 | ): 371 | """ 372 | Resize the image to the specified size. 373 | 374 | Args: 375 | small_x (int): The new x size of the image. 376 | small_y (int): The new y size of the image. 377 | num_channels (int): The number of channels in the image. 378 | modifier (np.ndarray): A numpy array containing the perturbation. 379 | max_pooling_ratio (int): The ratio of the size of the patches to be max pooled. 380 | reset_only (bool): Whether to only reset the image, or to resize and crop as well. 381 | """ 382 | 383 | small_single_shape = (small_x, small_y, num_channels) 384 | 385 | new_modifier = np.zeros((1,) + small_single_shape, dtype=np.float32) 386 | if not reset_only: 387 | # run the resize_op once to get the scaled image 388 | assert modifier.ndim == 4, "Expected 4D array as modifier" 389 | prev_modifier = np.copy(modifier) 390 | for k, v in enumerate(modifier): 391 | new_modifier[k, :, :, :] = cv2.resize( 392 | modifier[k, :, :, :], 393 | (small_x, small_y), 394 | interpolation=cv2.INTER_LINEAR, 395 | ) 396 | 397 | # prepare the list of all valid variables 398 | var_size = np.prod(small_single_shape) 399 | self.var_list = np.array(range(0, var_size), dtype=np.int32) 400 | # ADAM status 401 | self.mt_arr = np.zeros(var_size, dtype=np.float32) 402 | self.vt_arr = np.zeros(var_size, dtype=np.float32) 403 | self.adam_epochs = np.ones(var_size, dtype=np.int32) 404 | # update sample probability 405 | if reset_only: 406 | self.sample_prob = np.ones(var_size, dtype=np.float32) / var_size 407 | else: 408 | self.sample_prob = self.get_new_prob(prev_modifier, max_pooling_ratio, True) 409 | self.sample_prob = self.sample_prob.reshape(var_size) 410 | 411 | return new_modifier 412 | 413 | def single_step( 414 | self, 415 | modifier: np.ndarray, 416 | orig_img: torch.tensor, 417 | target: torch.tensor, 418 | const: int, 419 | max_pooling_ratio: int = 8, 420 | var_indice: list = None, 421 | ): 422 | """ 423 | Perform a single step of optimization. 424 | 425 | Args: 426 | modifier (np.ndarray): A numpy array containing the perturbation. 427 | orig_img (torch.tensor): The original image. 428 | target (torch.tensor): The target image. 429 | const (int): The constant to be used in the loss function. 430 | max_pooling_ratio (int): The ratio of the size of the patches to be max pooled. 431 | var_indice (list): The indices of the coordinates to be optimized. 432 | 433 | Returns: 434 | (float, float, float, np.ndarray, torch.tensor): 435 | The total loss, the L2 loss, the confidence loss, 436 | model output on perturbed image, the perturbed image. 437 | 438 | """ 439 | 440 | assert modifier.ndim == 4, "Expected 4D array as modifier" 441 | assert modifier.shape[0] == 1, "Expected 1 batch for modifier" 442 | assert target.ndim == 2, "Expected 2D tensor as target" 443 | 444 | var = np.repeat(modifier, self.config["batch_size"] * 2 + 1, axis=0) 445 | var_size = modifier.size 446 | 447 | # Select indices for current iteration 448 | 449 | if var_indice is None: 450 | if self.config["use_importance"]: 451 | var_indice = np.random.choice( 452 | self.var_list.size, 453 | self.config["batch_size"], 454 | replace=False, 455 | p=self.sample_prob, 456 | ) 457 | else: 458 | var_indice = np.random.choice( 459 | self.var_list.size, self.config["batch_size"], replace=False 460 | ) 461 | indices = self.var_list[var_indice] 462 | 463 | for i in range(self.config["batch_size"]): 464 | var[i * 2 + 1].reshape(-1)[indices[i]] += 0.0001 465 | var[i * 2 + 2].reshape(-1)[indices[i]] -= 0.0001 466 | 467 | new_img = self.get_perturbed_image(orig_img, var) 468 | losses, l2_losses, confidence_losses, model_output = self.total_loss( 469 | orig_img, new_img, target, const 470 | ) 471 | 472 | if modifier.shape[1] > self.config["init_size"]: 473 | self.sample_prob = self.get_new_prob( 474 | modifier, max_pooling_ratio=max_pooling_ratio 475 | ) 476 | self.sample_prob = self.sample_prob.reshape(var_size) 477 | 478 | grad = self.zero_order_gradients(losses) 479 | 480 | # Modifier is updated here, so is adam epochs, mt_arr, and vt_arr 481 | self.coordinate_adam(indices, grad, modifier, not self.config["use_tanh"]) 482 | 483 | return ( 484 | losses[0], 485 | l2_losses[0], 486 | confidence_losses[0], 487 | model_output[0].detach().numpy(), 488 | new_img[0], 489 | ) 490 | 491 | def attack( 492 | self, 493 | orig_img: np.ndarray, 494 | target: np.ndarray, 495 | modifier_init: np.ndarray = None, 496 | max_pooling_ratio: int = 8, 497 | ): 498 | """ 499 | Perform the attack on coordinate-batches. 500 | 501 | Args: 502 | orig_img (np.ndarray): The original image. 503 | target (np.ndarray): The target image. 504 | modifier_init (np.ndarray): The initial modifier. Default is `None`. 505 | max_pooling_ratio (int): The ratio of the size of the patches to be max pooled. 506 | 507 | Returns: 508 | (np.ndarray, np.ndarray): The best perturbed image and best constant for scaling confidence loss. 509 | """ 510 | 511 | def compare(x, y): 512 | if not isinstance(x, (float, int, np.int64)): 513 | x = np.copy(x) 514 | if self.config["targeted"]: 515 | x[y] -= self.config["confidence"] 516 | else: 517 | x[y] += self.config["confidence"] 518 | x = np.argmax(x) 519 | if self.config["targeted"]: 520 | return x == y 521 | else: 522 | return x != y 523 | 524 | assert orig_img.ndim == 3, "Expected 3D array as image" 525 | assert target.ndim == 1, "Expected 1D array as target" 526 | 527 | if modifier_init is not None: 528 | assert modifier_init.ndim == 3, "Expected 3D array as modifier" 529 | modifier = modifier_init.copy() 530 | else: 531 | if self.config["use_resize"]: 532 | modifier = self.resize_img( 533 | self.config["init_size"], 534 | self.config["init_size"], 535 | 3, 536 | modifier_init, 537 | max_pooling_ratio, 538 | reset_only=True, 539 | ) 540 | else: 541 | modifier = np.zeros(orig_img.shape, dtype=np.float32) 542 | 543 | if self.config["use_tanh"]: 544 | orig_img = np.arctanh(orig_img * 1.999999) 545 | 546 | var_size = np.prod(orig_img.shape) # width * height * num_channels 547 | self.var_list = np.array(range(0, var_size), dtype=np.int32) 548 | 549 | # Initialize Adam optimizer values 550 | self.mt_arr = np.zeros(var_size, dtype=np.float32) 551 | self.vt_arr = np.zeros(var_size, dtype=np.float32) 552 | self.adam_epochs = np.ones(var_size, dtype=np.int64) 553 | self.up = np.zeros(var_size, dtype=np.float32) 554 | self.down = np.zeros(var_size, dtype=np.float32) 555 | 556 | # Sampling Probabilities 557 | self.sample_prob = np.ones(var_size, dtype=np.float32) / var_size 558 | 559 | low = 0.0 560 | mid = self.config["initial_const"] 561 | high = 1e10 562 | 563 | if not self.config["use_tanh"]: 564 | self.up = 0.5 - orig_img.reshape(-1) 565 | self.down = -0.5 - orig_img.reshape(-1) 566 | 567 | outer_best_const = mid 568 | outer_best_l2 = 1e10 569 | outer_best_score = -1 570 | outer_best_adv = orig_img 571 | 572 | # Make Everything 4D and Tensorize 573 | orig_img = torch.from_numpy(orig_img).unsqueeze(0).to(self.device) 574 | target = torch.from_numpy(target).unsqueeze(0).to(self.device) 575 | modifier = modifier.reshape((-1,) + modifier.shape) 576 | 577 | for outer_step in range(self.config["binary_search_steps"]): 578 | 579 | best_l2 = 1e10 580 | best_score = -1 581 | 582 | # NOTE: In the original implemenation there is a step to move mid to high 583 | # at last step with some condition 584 | 585 | prev = 1e6 586 | last_confidence_loss = 1.0 587 | 588 | if modifier_init is not None: 589 | assert modifier_init.ndim == 3, "Expected 3D array as modifier" 590 | modifier = modifier_init.copy() 591 | modifier = modifier.reshape((-1,) + modifier.shape) 592 | else: 593 | if self.config["use_resize"]: 594 | modifier = self.resize_img( 595 | self.config["init_size"], 596 | self.config["init_size"], 597 | 3, 598 | modifier_init, 599 | max_pooling_ratio, 600 | reset_only=True, 601 | ) 602 | 603 | else: 604 | modifier = np.zeros(orig_img.shape, dtype=np.float32) 605 | 606 | self.mt_arr.fill(0.0) 607 | self.vt_arr.fill(0.0) 608 | self.adam_epochs.fill(1) 609 | stage = 0 610 | eval_costs = 0 611 | 612 | # NOTE: Original code allows for a custom start point in iterations 613 | for iter in range(0, self.config["max_iterations"]): 614 | if self.config["use_resize"]: 615 | if iter == self.config["resize_iter_1"]: 616 | modifier = self.resize_img( 617 | self.config["init_size"] * 2, 618 | self.config["init_size"] * 2, 619 | 3, 620 | modifier, 621 | max_pooling_ratio, 622 | ) 623 | if iter == self.config["resize_iter_2"]: 624 | modifier = self.resize_img( 625 | self.config["init_size"] * 4, 626 | self.config["init_size"] * 4, 627 | 3, 628 | modifier, 629 | max_pooling_ratio, 630 | ) 631 | if iter % (self.config["max_iterations"] // 10) == 0: 632 | new_img = self.get_perturbed_image(orig_img, modifier) 633 | ( 634 | total_losses, 635 | l2_losses, 636 | confidence_losses, 637 | model_output, 638 | ) = self.total_loss(orig_img, new_img, target, mid) 639 | print( 640 | f"iter = {iter}, cost = {eval_costs}, size = {modifier.shape}, " 641 | f"total_loss = {total_losses[0]:.5g}, l2_loss = {l2_losses[0]:.5g}, " 642 | f"confidence_loss = {confidence_losses[0]:.5g}" 643 | ) 644 | 645 | ( 646 | total_loss, 647 | l2_loss, 648 | confidence_loss, 649 | model_output, 650 | adv_img, 651 | ) = self.single_step( 652 | modifier, orig_img, target, mid, max_pooling_ratio=max_pooling_ratio 653 | ) 654 | 655 | eval_costs += self.config["batch_size"] 656 | 657 | if ( 658 | confidence_loss == 0.0 659 | and last_confidence_loss != 0.0 660 | and stage == 0 661 | ): 662 | 663 | if self.config["reset_adam_after_found"]: 664 | print("Resetting Adam") 665 | self.mt_arr.fill(0.0) 666 | self.vt_arr.fill(0.0) 667 | self.adam_epochs.fill(1) 668 | print("Setting Stage to 1") 669 | stage = 1 670 | 671 | last_confidence_loss = confidence_loss 672 | 673 | if ( 674 | self.config["abort_early"] 675 | and iter % self.config["early_stop_iters"] == 0 676 | ): 677 | if total_loss > prev * 0.9999: 678 | print("Early stopping because there is no improvement") 679 | break 680 | prev = total_loss 681 | 682 | if l2_loss < best_l2 and compare(model_output, np.argmax(target[0])): 683 | best_l2 = l2_loss 684 | best_score = np.argmax(model_output) 685 | 686 | if l2_loss < outer_best_l2 and compare( 687 | model_output, np.argmax(target[0]) 688 | ): 689 | outer_best_l2 = l2_loss 690 | outer_best_score = np.argmax(model_output) 691 | outer_best_adv = adv_img 692 | outer_best_const = mid 693 | 694 | if compare(best_score, np.argmax(target[0])) and best_score != -1: 695 | 696 | print("Old Constant: ", mid) 697 | high = min(high, mid) 698 | if high < 1e9: 699 | mid = (low + high) / 2 700 | print("New Constant: ", mid) 701 | else: 702 | print("Old Constant: ", mid) 703 | low = max(low, mid) 704 | if high < 1e9: # pragma: no cover 705 | mid = (low + high) / 2 706 | else: # pragma: no cover 707 | mid *= 10 708 | print("new constant: ", mid) 709 | 710 | return outer_best_adv, outer_best_const 711 | -------------------------------------------------------------------------------- /code_soup/ch8/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/ch8/__init__.py -------------------------------------------------------------------------------- /code_soup/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/common/__init__.py -------------------------------------------------------------------------------- /code_soup/common/text/__init__.py: -------------------------------------------------------------------------------- 1 | # utils import 2 | from code_soup.common.text.utils import perturbations 3 | 4 | # datasets 5 | 6 | # models 7 | -------------------------------------------------------------------------------- /code_soup/common/text/utils/json/homoglyph.json: -------------------------------------------------------------------------------- 1 | { 2 | "a": "@a\u0251\u03b1\u0430\u237a\uff41\ud835\udc1a\ud835\udc4e\ud835\udc82\ud835\udcb6\ud835\udcea\ud835\udd1e\ud835\udd52\ud835\udd86\ud835\uddba\ud835\uddee\ud835\ude22\ud835\ude56\ud835\ude8a\ud835\udec2\ud835\udefc\ud835\udf36\ud835\udf70\ud835\udfaa", 3 | "b": "b\u0184\u042c\u13cf\u1472\u15af\uff42\ud835\udc1b\ud835\udc4f\ud835\udc83\ud835\udcb7\ud835\udceb\ud835\udd1f\ud835\udd53\ud835\udd87\ud835\uddbb\ud835\uddef\ud835\ude23\ud835\ude57\ud835\ude8b", 4 | "c": "c\u03f2\u0441\u1d04\u217d\u2ca5\uabaf\uff43\ud801\udc3d\ud835\udc1c\ud835\udc50\ud835\udc84\ud835\udcb8\ud835\udcec\ud835\udd20\ud835\udd54\ud835\udd88\ud835\uddbc\ud835\uddf0\ud835\ude24\ud835\ude58\ud835\ude8c", 5 | "d": "d\u0501\u13e7\u146f\u2146\u217e\ua4d2\uff44\ud835\udc1d\ud835\udc51\ud835\udc85\ud835\udcb9\ud835\udced\ud835\udd21\ud835\udd55\ud835\udd89\ud835\uddbd\ud835\uddf1\ud835\ude25\ud835\ude59\ud835\ude8d", 6 | "e": "e\u0435\u04bd\u212e\u212f\u2147\uab32\uff45\ud835\udc1e\ud835\udc52\ud835\udc86\ud835\udcee\ud835\udd22\ud835\udd56\ud835\udd8a\ud835\uddbe\ud835\uddf2\ud835\ude26\ud835\ude5a\ud835\ude8e", 7 | "f": "f\u017f\u03dd\u0584\u1e9d\ua799\uab35\uff46\ud835\udc1f\ud835\udc53\ud835\udc87\ud835\udcbb\ud835\udcef\ud835\udd23\ud835\udd57\ud835\udd8b\ud835\uddbf\ud835\uddf3\ud835\ude27\ud835\ude5b\ud835\ude8f\ud835\udfcb", 8 | "g": "g\u018d\u0261\u0581\u1d83\u210a\uff47\ud835\udc20\ud835\udc54\ud835\udc88\ud835\udcf0\ud835\udd24\ud835\udd58\ud835\udd8c\ud835\uddc0\ud835\uddf4\ud835\ude28\ud835\ude5c\ud835\ude90", 9 | "h": "h\u04bb\u0570\u13c2\u210e\uff48\ud835\udc21\ud835\udc89\ud835\udcbd\ud835\udcf1\ud835\udd25\ud835\udd59\ud835\udd8d\ud835\uddc1\ud835\uddf5\ud835\ude29\ud835\ude5d\ud835\ude91", 10 | "i": "!i\u0131\u0269\u026a\u02db\u037a\u03b9\u0456\u04cf\u13a5\u1fbe\u2139\u2148\u2170\u2373\ua647\uab75\uff49\ud806\udcc3\ud835\udc22\ud835\udc56\ud835\udc8a\ud835\udcbe\ud835\udcf2\ud835\udd26\ud835\udd5a\ud835\udd8e\ud835\uddc2\ud835\uddf6\ud835\ude2a\ud835\ude5e\ud835\ude92\ud835\udea4\ud835\udeca\ud835\udf04\ud835\udf3e\ud835\udf78\ud835\udfb2", 11 | "j": "j\u03f3\u0458\u2149\uff4a\ud835\udc23\ud835\udc57\ud835\udc8b\ud835\udcbf\ud835\udcf3\ud835\udd27\ud835\udd5b\ud835\udd8f\ud835\uddc3\ud835\uddf7\ud835\ude2b\ud835\ude5f\ud835\ude93", 12 | "k": "k\uff4b\ud835\udc24\ud835\udc58\ud835\udc8c\ud835\udcc0\ud835\udcf4\ud835\udd28\ud835\udd5c\ud835\udd90\ud835\uddc4\ud835\uddf8\ud835\ude2c\ud835\ude60\ud835\ude94", 13 | "l": "1", 14 | "m": "m\uff4d", 15 | "n": "n\u0578\u057c\uff4e\ud835\udc27\ud835\udc5b\ud835\udc8f\ud835\udcc3\ud835\udcf7\ud835\udd2b\ud835\udd5f\ud835\udd93\ud835\uddc7\ud835\uddfb\ud835\ude2f\ud835\ude63\ud835\ude97", 16 | "o": "0", 17 | "p": "p\u03c1\u03f1\u0440\u2374\u2ca3\uff50\ud835\udc29\ud835\udc5d\ud835\udc91\ud835\udcc5\ud835\udcf9\ud835\udd2d\ud835\udd61\ud835\udd95\ud835\uddc9\ud835\uddfd\ud835\ude31\ud835\ude65\ud835\ude99\ud835\uded2\ud835\udee0\ud835\udf0c\ud835\udf1a\ud835\udf46\ud835\udf54\ud835\udf80\ud835\udf8e\ud835\udfba\ud835\udfc8", 18 | "q": "q\u051b\u0563\u0566\uff51\ud835\udc2a\ud835\udc5e\ud835\udc92\ud835\udcc6\ud835\udcfa\ud835\udd2e\ud835\udd62\ud835\udd96\ud835\uddca\ud835\uddfe\ud835\ude32\ud835\ude66\ud835\ude9a", 19 | "r": "r\u0433\u1d26\u2c85\uab47\uab48\uab81\uff52\ud835\udc2b\ud835\udc5f\ud835\udc93\ud835\udcc7\ud835\udcfb\ud835\udd2f\ud835\udd63\ud835\udd97\ud835\uddcb\ud835\uddff\ud835\ude33\ud835\ude67\ud835\ude9b", 20 | "s": "s\u01bd\u0455\ua731\uabaa\uff53\ud801\udc48\ud806\udcc1\ud835\udc2c\ud835\udc60\ud835\udc94\ud835\udcc8\ud835\udcfc\ud835\udd30\ud835\udd64\ud835\udd98\ud835\uddcc\ud835\ude00\ud835\ude34\ud835\ude68\ud835\ude9c", 21 | "t": "t\uff54\ud835\udc2d\ud835\udc61\ud835\udc95\ud835\udcc9\ud835\udcfd\ud835\udd31\ud835\udd65\ud835\udd99\ud835\uddcd\ud835\ude01\ud835\ude35\ud835\ude69\ud835\ude9d", 22 | "u": "u\u028b\u03c5\u057d\u1d1c\ua79f\uab4e\uab52\uff55\ud801\udcf6\ud806\udcd8\ud835\udc2e\ud835\udc62\ud835\udc96\ud835\udcca\ud835\udcfe\ud835\udd32\ud835\udd66\ud835\udd9a\ud835\uddce\ud835\ude02\ud835\ude36\ud835\ude6a\ud835\ude9e\ud835\uded6\ud835\udf10\ud835\udf4a\ud835\udf84\ud835\udfbe", 23 | "v": "v\u03bd\u0475\u05d8\u1d20\u2174\u2228\u22c1\uaba9\uff56\ud805\udf06\ud806\udcc0\ud835\udc2f\ud835\udc63\ud835\udc97\ud835\udccb\ud835\udcff\ud835\udd33\ud835\udd67\ud835\udd9b\ud835\uddcf\ud835\ude03\ud835\ude37\ud835\ude6b\ud835\ude9f\ud835\udece\ud835\udf08\ud835\udf42\ud835\udf7c\ud835\udfb6", 24 | "w": "w\u026f\u0461\u051d\u0561\u1d21\uab83\uff57\ud805\udf0a\ud805\udf0e\ud805\udf0f\ud835\udc30\ud835\udc64\ud835\udc98\ud835\udccc\ud835\udd00\ud835\udd34\ud835\udd68\ud835\udd9c\ud835\uddd0\ud835\ude04\ud835\ude38\ud835\ude6c\ud835\udea0", 25 | "x": "x\u00d7\u0445\u1541\u157d\u166e\u2179\u292b\u292c\u2a2f\uff58\ud835\udc31\ud835\udc65\ud835\udc99\ud835\udccd\ud835\udd01\ud835\udd35\ud835\udd69\ud835\udd9d\ud835\uddd1\ud835\ude05\ud835\ude39\ud835\ude6d\ud835\udea1", 26 | "y": "y\u0263\u028f\u03b3\u0443\u04af\u10e7\u1d8c\u1eff\u213d\uab5a\uff59\ud806\udcdc\ud835\udc32\ud835\udc66\ud835\udc9a\ud835\udcce\ud835\udd02\ud835\udd36\ud835\udd6a\ud835\udd9e\ud835\uddd2\ud835\ude06\ud835\ude3a\ud835\ude6e\ud835\udea2\ud835\udec4\ud835\udefe\ud835\udf38\ud835\udf72\ud835\udfac", 27 | "z": "z\u1d22\uab93\uff5a\ud806\udcc4\ud835\udc33\ud835\udc67\ud835\udc9b\ud835\udccf\ud835\udd03\ud835\udd37\ud835\udd6b\ud835\udd9f\ud835\uddd3\ud835\ude07\ud835\ude3b\ud835\ude6f\ud835\udea3", 28 | "A": "A\u0391\u0410\u13aa\u15c5\u1d00\ua4ee\uab7a\uff21\ud800\udea0\ud81b\udf40\ud835\udc00\ud835\udc34\ud835\udc68\ud835\udc9c\ud835\udcd0\ud835\udd04\ud835\udd38\ud835\udd6c\ud835\udda0\ud835\uddd4\ud835\ude08\ud835\ude3c\ud835\ude70\ud835\udea8\ud835\udee2\ud835\udf1c\ud835\udf56\ud835\udf90", 29 | "B": "B\u0299\u0392\u0412\u0432\u13f4\u13fc\u15f7\u16d2\u212c\ua4d0\ua7b4\uff22\ud800\ude82\ud800\udea1\ud800\udf01\ud835\udc01\ud835\udc35\ud835\udc69\ud835\udcd1\ud835\udd05\ud835\udd39\ud835\udd6d\ud835\udda1\ud835\uddd5\ud835\ude09\ud835\ude3d\ud835\ude71\ud835\udea9\ud835\udee3\ud835\udf1d\ud835\udf57\ud835\udf91", 30 | "C": "C\u03f9\u0421\u13df\u2102\u212d\u216d\u2ca4\ua4da\uff23\ud800\udea2\ud800\udf02\ud801\udc15\ud801\udd1c\ud806\udce9\ud806\udcf2\ud835\udc02\ud835\udc36\ud835\udc6a\ud835\udc9e\ud835\udcd2\ud835\udd6e\ud835\udda2\ud835\uddd6\ud835\ude0a\ud835\ude3e\ud835\ude72\ud83d\udf4c", 31 | "D": "D\u13a0\u15de\u15ea\u1d05\u2145\u216e\ua4d3\uab70\uff24\ud835\udc03\ud835\udc37\ud835\udc6b\ud835\udc9f\ud835\udcd3\ud835\udd07\ud835\udd3b\ud835\udd6f\ud835\udda3\ud835\uddd7\ud835\ude0b\ud835\ude3f\ud835\ude73", 32 | "E": "E\u0395\u0415\u13ac\u1d07\u2130\u22ff\u2d39\ua4f0\uab7c\uff25\ud800\ude86\ud806\udca6\ud806\udcae\ud835\udc04\ud835\udc38\ud835\udc6c\ud835\udcd4\ud835\udd08\ud835\udd3c\ud835\udd70\ud835\udda4\ud835\uddd8\ud835\ude0c\ud835\ude40\ud835\ude74\ud835\udeac\ud835\udee6\ud835\udf20\ud835\udf5a\ud835\udf94", 33 | "F": "F\u03dc\u15b4\u2131\ua4dd\ua798\uff26\ud800\ude87\ud800\udea5\ud801\udd25\ud806\udca2\ud806\udcc2\ud834\ude13\ud835\udc05\ud835\udc39\ud835\udc6d\ud835\udcd5\ud835\udd09\ud835\udd3d\ud835\udd71\ud835\udda5\ud835\uddd9\ud835\ude0d\ud835\ude41\ud835\ude75\ud835\udfca", 34 | "G": "G\u0262\u050c\u050d\u13c0\u13f3\u13fb\ua4d6\uab90\uff27\ud835\udc06\ud835\udc3a\ud835\udc6e\ud835\udca2\ud835\udcd6\ud835\udd0a\ud835\udd3e\ud835\udd72\ud835\udda6\ud835\uddda\ud835\ude0e\ud835\ude42\ud835\ude76", 35 | "H": "H\u029c\u0397\u041d\u043d\u13bb\u157c\u210b\u210c\u210d\u2c8e\ua4e7\uab8b\uff28\ud800\udecf\ud835\udc07\ud835\udc3b\ud835\udc6f\ud835\udcd7\ud835\udd73\ud835\udda7\ud835\udddb\ud835\ude0f\ud835\ude43\ud835\ude77\ud835\udeae\ud835\udee8\ud835\udf22\ud835\udf5c\ud835\udf96", 36 | "J": "J\u037f\u0408\u13ab\u148d\u1d0a\ua4d9\ua7b2\uab7b\uff2a\ud835\udc09\ud835\udc3d\ud835\udc71\ud835\udca5\ud835\udcd9\ud835\udd0d\ud835\udd41\ud835\udd75\ud835\udda9\ud835\udddd\ud835\ude11\ud835\ude45\ud835\ude79", 37 | "K": "K\u039a\u041a\u13e6\u16d5\u212a\u2c94\ua4d7\uff2b\ud801\udd18\ud835\udc0a\ud835\udc3e\ud835\udc72\ud835\udca6\ud835\udcda\ud835\udd0e\ud835\udd42\ud835\udd76\ud835\uddaa\ud835\uddde\ud835\ude12\ud835\ude46\ud835\ude7a\ud835\udeb1\ud835\udeeb\ud835\udf25\ud835\udf5f\ud835\udf99", 38 | "L": "L\u029f\u13de\u14aa\u2112\u216c\u2cd0\u2cd1\ua4e1\uabae\uff2c\ud801\udc1b\ud801\udc43\ud801\udd26\ud806\udca3\ud806\udcb2\ud81b\udf16\ud834\ude2a\ud835\udc0b\ud835\udc3f\ud835\udc73\ud835\udcdb\ud835\udd0f\ud835\udd43\ud835\udd77\ud835\uddab\ud835\udddf\ud835\ude13\ud835\ude47\ud835\ude7b", 39 | "M": "M\u039c\u03fa\u041c\u13b7\u15f0\u16d6\u2133\u216f\u2c98\ua4df\uff2d\ud800\udeb0\ud800\udf11\ud835\udc0c\ud835\udc40\ud835\udc74\ud835\udcdc\ud835\udd10\ud835\udd44\ud835\udd78\ud835\uddac\ud835\udde0\ud835\ude14\ud835\ude48\ud835\ude7c\ud835\udeb3\ud835\udeed\ud835\udf27\ud835\udf61\ud835\udf9b", 40 | "N": "N\u0274\u039d\u2115\u2c9a\ua4e0\uff2e\ud801\udd13\ud835\udc0d\ud835\udc41\ud835\udc75\ud835\udca9\ud835\udcdd\ud835\udd11\ud835\udd79\ud835\uddad\ud835\udde1\ud835\ude15\ud835\ude49\ud835\ude7d\ud835\udeb4\ud835\udeee\ud835\udf28\ud835\udf62\ud835\udf9c", 41 | "P": "P\u03a1\u0420\u13e2\u146d\u1d18\u1d29\u2119\u2ca2\ua4d1\uabb2\uff30\ud800\ude95\ud835\udc0f\ud835\udc43\ud835\udc77\ud835\udcab\ud835\udcdf\ud835\udd13\ud835\udd7b\ud835\uddaf\ud835\udde3\ud835\ude17\ud835\ude4b\ud835\ude7f\ud835\udeb8\ud835\udef2\ud835\udf2c\ud835\udf66\ud835\udfa0", 42 | "Q": "Q\u211a\u2d55\uff31\ud835\udc10\ud835\udc44\ud835\udc78\ud835\udcac\ud835\udce0\ud835\udd14\ud835\udd7c\ud835\uddb0\ud835\udde4\ud835\ude18\ud835\ude4c\ud835\ude80", 43 | "R": "R\u01a6\u0280\u13a1\u13d2\u1587\u16b1\u211b\u211c\u211d\ua4e3\uab71\uaba2\uff32\ud801\udcb4\ud81b\udf35\ud834\ude16\ud835\udc11\ud835\udc45\ud835\udc79\ud835\udce1\ud835\udd7d\ud835\uddb1\ud835\udde5\ud835\ude19\ud835\ude4d\ud835\ude81", 44 | "S": "S\u0405\u054f\u13d5\u13da\ua4e2\uff33\ud800\ude96\ud801\udc20\ud81b\udf3a\ud835\udc12\ud835\udc46\ud835\udc7a\ud835\udcae\ud835\udce2\ud835\udd16\ud835\udd4a\ud835\udd7e\ud835\uddb2\ud835\udde6\ud835\ude1a\ud835\ude4e\ud835\ude82", 45 | "T": "T\u03a4\u03c4\u0422\u0442\u13a2\u1d1b\u22a4\u27d9\u2ca6\ua4d4\uab72\uff34\ud800\ude97\ud800\udeb1\ud800\udf15\ud806\udcbc\ud81b\udf0a\ud835\udc13\ud835\udc47\ud835\udc7b\ud835\udcaf\ud835\udce3\ud835\udd17\ud835\udd4b\ud835\udd7f\ud835\uddb3\ud835\udde7\ud835\ude1b\ud835\ude4f\ud835\ude83\ud835\udebb\ud835\uded5\ud835\udef5\ud835\udf0f\ud835\udf2f\ud835\udf49\ud835\udf69\ud835\udf83\ud835\udfa3\ud835\udfbd\ud83d\udf68", 46 | "U": "U\u054d\u1200\u144c\u222a\u22c3\ua4f4\uff35\ud801\udcce\ud806\udcb8\ud81b\udf42\ud835\udc14\ud835\udc48\ud835\udc7c\ud835\udcb0\ud835\udce4\ud835\udd18\ud835\udd4c\ud835\udd80\ud835\uddb4\ud835\udde8\ud835\ude1c\ud835\ude50\ud835\ude84", 47 | "V": "V\u0474\u0667\u06f7\u13d9\u142f\u2164\u2d38\ua4e6\ua6df\uff36\ud801\udd1d\ud806\udca0\ud81b\udf08\ud834\ude0d\ud835\udc15\ud835\udc49\ud835\udc7d\ud835\udcb1\ud835\udce5\ud835\udd19\ud835\udd4d\ud835\udd81\ud835\uddb5\ud835\udde9\ud835\ude1d\ud835\ude51\ud835\ude85", 48 | "W": "W\u051c\u13b3\u13d4\ua4ea\uff37\ud806\udce6\ud806\udcef\ud835\udc16\ud835\udc4a\ud835\udc7e\ud835\udcb2\ud835\udce6\ud835\udd1a\ud835\udd4e\ud835\udd82\ud835\uddb6\ud835\uddea\ud835\ude1e\ud835\ude52\ud835\ude86", 49 | "X": "X\u03a7\u0425\u166d\u16b7\u2169\u2573\u2cac\u2d5d\ua4eb\ua7b3\uff38\ud800\ude90\ud800\udeb4\ud800\udf17\ud800\udf22\ud801\udd27\ud806\udcec\ud835\udc17\ud835\udc4b\ud835\udc7f\ud835\udcb3\ud835\udce7\ud835\udd1b\ud835\udd4f\ud835\udd83\ud835\uddb7\ud835\uddeb\ud835\ude1f\ud835\ude53\ud835\ude87\ud835\udebe\ud835\udef8\ud835\udf32\ud835\udf6c\ud835\udfa6", 50 | "Y": "Y\u03a5\u03d2\u0423\u04ae\u13a9\u13bd\u2ca8\ua4ec\uff39\ud800\udeb2\ud806\udca4\ud81b\udf43\ud835\udc18\ud835\udc4c\ud835\udc80\ud835\udcb4\ud835\udce8\ud835\udd1c\ud835\udd50\ud835\udd84\ud835\uddb8\ud835\uddec\ud835\ude20\ud835\ude54\ud835\ude88\ud835\udebc\ud835\udef6\ud835\udf30\ud835\udf6a\ud835\udfa4", 51 | "Z": "Z\u0396\u13c3\u2124\u2128\ua4dc\uff3a\ud800\udef5\ud806\udca9\ud806\udce5\ud835\udc19\ud835\udc4d\ud835\udc81\ud835\udcb5\ud835\udce9\ud835\udd85\ud835\uddb9\ud835\udded\ud835\ude21\ud835\ude55\ud835\ude89\ud835\udead\ud835\udee7\ud835\udf21\ud835\udf5b\ud835\udf95" 52 | } 53 | -------------------------------------------------------------------------------- /code_soup/common/text/utils/json/keys_in_proximity.json: -------------------------------------------------------------------------------- 1 | { 2 | "a": [ 3 | "q", 4 | "w", 5 | "s", 6 | "x", 7 | "z" 8 | ], 9 | "b": [ 10 | "v", 11 | "g", 12 | "h", 13 | "n" 14 | ], 15 | "c": [ 16 | "x", 17 | "d", 18 | "f", 19 | "v" 20 | ], 21 | "d": [ 22 | "s", 23 | "e", 24 | "r", 25 | "f", 26 | "c", 27 | "x" 28 | ], 29 | "e": [ 30 | "w", 31 | "s", 32 | "d", 33 | "r" 34 | ], 35 | "f": [ 36 | "d", 37 | "r", 38 | "t", 39 | "g", 40 | "v", 41 | "c" 42 | ], 43 | "g": [ 44 | "f", 45 | "t", 46 | "y", 47 | "h", 48 | "b", 49 | "v" 50 | ], 51 | "h": [ 52 | "g", 53 | "y", 54 | "u", 55 | "j", 56 | "n", 57 | "b" 58 | ], 59 | "i": [ 60 | "u", 61 | "j", 62 | "k", 63 | "o" 64 | ], 65 | "j": [ 66 | "h", 67 | "u", 68 | "i", 69 | "k", 70 | "n", 71 | "m" 72 | ], 73 | "k": [ 74 | "j", 75 | "i", 76 | "o", 77 | "l", 78 | "m" 79 | ], 80 | "l": [ 81 | "k", 82 | "o", 83 | "p" 84 | ], 85 | "m": [ 86 | "n", 87 | "j", 88 | "k", 89 | "l" 90 | ], 91 | "n": [ 92 | "b", 93 | "h", 94 | "j", 95 | "m" 96 | ], 97 | "o": [ 98 | "i", 99 | "k", 100 | "l", 101 | "p" 102 | ], 103 | "p": [ 104 | "o", 105 | "l" 106 | ], 107 | "q": [ 108 | "w", 109 | "a", 110 | "s" 111 | ], 112 | "r": [ 113 | "e", 114 | "d", 115 | "f", 116 | "t" 117 | ], 118 | "s": [ 119 | "w", 120 | "e", 121 | "d", 122 | "x", 123 | "z", 124 | "a" 125 | ], 126 | "t": [ 127 | "r", 128 | "f", 129 | "g", 130 | "y" 131 | ], 132 | "u": [ 133 | "y", 134 | "h", 135 | "j", 136 | "i" 137 | ], 138 | "v": [ 139 | "c", 140 | "f", 141 | "g", 142 | "v", 143 | "b" 144 | ], 145 | "w": [ 146 | "q", 147 | "a", 148 | "s", 149 | "e" 150 | ], 151 | "x": [ 152 | "z", 153 | "s", 154 | "d", 155 | "c" 156 | ], 157 | "y": [ 158 | "t", 159 | "g", 160 | "h", 161 | "u" 162 | ], 163 | "z": [ 164 | "a", 165 | "s", 166 | "x" 167 | ], 168 | "A": [ 169 | "Q", 170 | "W", 171 | "S", 172 | "X", 173 | "Z" 174 | ], 175 | "B": [ 176 | "V", 177 | "G", 178 | "H", 179 | "N" 180 | ], 181 | "C": [ 182 | "X", 183 | "D", 184 | "F", 185 | "V" 186 | ], 187 | "D": [ 188 | "S", 189 | "E", 190 | "R", 191 | "F", 192 | "C", 193 | "X" 194 | ], 195 | "E": [ 196 | "W", 197 | "S", 198 | "D", 199 | "R" 200 | ], 201 | "F": [ 202 | "D", 203 | "R", 204 | "T", 205 | "G", 206 | "V", 207 | "C" 208 | ], 209 | "G": [ 210 | "F", 211 | "T", 212 | "Y", 213 | "H", 214 | "B", 215 | "V" 216 | ], 217 | "H": [ 218 | "G", 219 | "Y", 220 | "U", 221 | "J", 222 | "N", 223 | "B" 224 | ], 225 | "I": [ 226 | "U", 227 | "J", 228 | "K", 229 | "O" 230 | ], 231 | "J": [ 232 | "H", 233 | "U", 234 | "I", 235 | "K", 236 | "N", 237 | "M" 238 | ], 239 | "K": [ 240 | "J", 241 | "I", 242 | "O", 243 | "L", 244 | "M" 245 | ], 246 | "L": [ 247 | "K", 248 | "O", 249 | "P" 250 | ], 251 | "M": [ 252 | "N", 253 | "J", 254 | "K", 255 | "L" 256 | ], 257 | "N": [ 258 | "B", 259 | "H", 260 | "J", 261 | "M" 262 | ], 263 | "O": [ 264 | "I", 265 | "K", 266 | "L", 267 | "P" 268 | ], 269 | "P": [ 270 | "O", 271 | "L" 272 | ], 273 | "Q": [ 274 | "W", 275 | "A", 276 | "S" 277 | ], 278 | "R": [ 279 | "E", 280 | "D", 281 | "F", 282 | "T" 283 | ], 284 | "S": [ 285 | "W", 286 | "E", 287 | "D", 288 | "X", 289 | "Z", 290 | "A" 291 | ], 292 | "T": [ 293 | "R", 294 | "F", 295 | "G", 296 | "Y" 297 | ], 298 | "U": [ 299 | "Y", 300 | "H", 301 | "J", 302 | "I" 303 | ], 304 | "V": [ 305 | "C", 306 | "F", 307 | "G", 308 | "V", 309 | "B" 310 | ], 311 | "W": [ 312 | "Q", 313 | "A", 314 | "S", 315 | "E" 316 | ], 317 | "X": [ 318 | "Z", 319 | "S", 320 | "D", 321 | "C" 322 | ], 323 | "Y": [ 324 | "T", 325 | "G", 326 | "H", 327 | "U" 328 | ], 329 | "Z": [ 330 | "A", 331 | "S", 332 | "X" 333 | ] 334 | } 335 | -------------------------------------------------------------------------------- /code_soup/common/text/utils/perturbations.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import json 3 | import math 4 | import random 5 | import string 6 | from pathlib import Path 7 | 8 | import numpy as np 9 | 10 | 11 | class CharacterPerturbations(metaclass=abc.ABCMeta): 12 | """ 13 | An abstract class used to represent the character perturbations.SubClass should implement the apply method. 14 | Methods 15 | ------- 16 | apply(self, word: str, **kwargs) 17 | - applies the perturbation on the word and returns it. 18 | """ 19 | 20 | @abc.abstractmethod 21 | def apply(self, word: str, **kwargs): # pragma: no cover 22 | """Applies perturbation and returns the word.""" 23 | raise NotImplementedError 24 | 25 | def get_ignore_default_value(self): 26 | return True 27 | 28 | def get_string_not_a_word_error_msg(self): 29 | return "given string is not a word" 30 | 31 | 32 | class InsertSpaceCharacterPerturbations(CharacterPerturbations): 33 | """ 34 | A class used to apply space character perturbations. 35 | Methods 36 | ------- 37 | apply(self, word: str, **kwargs) 38 | - applies the space perturbation on the word and returns it. 39 | """ 40 | 41 | def apply(self, word: str, char_perturb=False, **kwargs): 42 | """ 43 | Insert space or character at a random position in the word 44 | 45 | word="Somesh" 46 | edited_word=insert_space(word) 47 | print(edited_word) 48 | S omesh 49 | 50 | word="Hello" 51 | edited_word=insert_space(word,char_perturb=True) 52 | print(edited_word) 53 | Henllo 54 | 55 | :param 56 | :word: word to be edited 57 | :char_perturb: default(False), boolean, adds a character instead of spaces 58 | :ignore: default (True), boolean if assertions should be ignored 59 | -returns edited word a random space in between 60 | """ 61 | 62 | if kwargs.get("ignore", self.get_ignore_default_value()) and ( 63 | " " in word or len(word) < 2 64 | ): 65 | return word 66 | 67 | assert " " not in word, self.get_string_not_a_word_error_msg() 68 | 69 | assert ( 70 | len(word) >= 2 71 | ), "Word needs to have a minimum length of 2 for an insert operation" 72 | 73 | if char_perturb: 74 | index = random.randint(0, len(word)) # select random index 75 | return ( 76 | word[:index] + random.choice(string.ascii_letters[:26]) + word[index:] 77 | ) # insert character 78 | else: 79 | index = random.randint(1, len(word) - 1) # select random index 80 | return word[:index] + " " + word[index:] # insert space 81 | 82 | 83 | class ShuffleCharacterPerturbations(CharacterPerturbations): 84 | """ 85 | A class used to apply shuffle character perturbations. 86 | Methods 87 | ------- 88 | apply(self, word: str, **kwargs) 89 | - applies the shuffle perturbation on the word and returns it. 90 | """ 91 | 92 | def apply(self, word: str, **kwargs): 93 | """ 94 | if mid=True: 95 | shuffles the characters of a word at random, barring the initial and last character 96 | else: 97 | swaps any two characters of a word at random, barring the initial and last character 98 | 99 | 100 | word = "Adversarial" 101 | print(shuffle('Adversarial',mid=True)) 102 | Aaidsvrreal 103 | 104 | word = "WHAT" 105 | print(shuffle('WHAT',mid=False)) 106 | WAHT 107 | 108 | :param word : word to be shuffled 109 | :param mid : 110 | if set, it shuffle all the characters barring the initial and last 111 | if not set, it swap any two characters barring the initial and last 112 | 113 | 114 | returns shuffled word with first and last character intact 115 | 116 | """ 117 | if kwargs.get("ignore", self.get_ignore_default_value()) and ( 118 | " " in word or len(word) < 4 119 | ): 120 | return word 121 | 122 | assert " " not in word, self.get_string_not_a_word_error_msg() 123 | 124 | assert ( 125 | len(word) >= 4 126 | ), "Word needs to have a minimum length of 4 for a shuffle operation" 127 | 128 | if kwargs.get("mid", True): 129 | # Split word into first & last letter, and middle letters 130 | first, mid, last = word[0], word[1:-1], word[-1] 131 | 132 | mid = list(mid) 133 | random.shuffle(mid) 134 | 135 | return first + "".join(mid) + last 136 | else: 137 | char_list = list(word) 138 | index = random.randint(1, len(word) - 3) # select random offset for tuple 139 | char_list[index], char_list[index + 1] = ( 140 | char_list[index + 1], 141 | char_list[index], 142 | ) # swap tuple 143 | return "".join(char_list) 144 | 145 | 146 | class DeleteCharacterPerturbations(CharacterPerturbations): 147 | """ 148 | A class used to apply delete character perturbations. 149 | Methods 150 | ------- 151 | apply(self, word: str, **kwargs) 152 | - applies the delete perturbation on the word and returns it. 153 | """ 154 | 155 | def apply(self, word: str, **kwargs): 156 | """ 157 | Deletes a random character which is not at the either end 158 | Implies that the word is at least three characters long 159 | 160 | word=input() 161 | 162 | #If input's length is less than 3 163 | delete(word) #Input He 164 | Assertion Error 165 | 166 | #If input's lenght is greater than or equal to 3 167 | delete(word) #Input Hey 168 | Hy 169 | 170 | :word: word to be edited 171 | :ignore: default (True), boolean if assertions should be ignored 172 | 173 | -returns word with random character deletion 174 | """ 175 | if kwargs.get("ignore", self.get_ignore_default_value()) and ( 176 | " " in word or len(word) < 3 177 | ): 178 | return word 179 | 180 | assert " " not in word, self.get_string_not_a_word_error_msg() 181 | 182 | assert ( 183 | len(word) >= 3 184 | ), "Word needs to have a minimum length of 3 characters for a delete operation" 185 | index = random.randint(1, len(word) - 2) # select random index 186 | return word[:index] + word[index + 1 :] # delete index 187 | 188 | 189 | class TypoCharacterPerturbations(CharacterPerturbations): 190 | """ 191 | A class used to apply typo character perturbations. 192 | Methods 193 | ------- 194 | apply(self, word: str, **kwargs) 195 | - applies the typo perturbation on the word and returns it. 196 | """ 197 | 198 | def apply(self, word: str, **kwargs): 199 | """ 200 | shifts a character by one keyboard space: 201 | one space up, down, left or right 202 | each word is typofied with some probability 'p': 203 | 1. (p*100) percent of character will become typos 204 | keyboard is defined as: 205 | qwertyuiop 206 | asdfghjkl 207 | zxcvbnm 208 | word = "Noise" 209 | print(typo('Noise',0.1)) 210 | Noide 211 | :param word : word to be shuffled 212 | :param probability: probability of a typo 213 | returns typofied word 214 | """ 215 | 216 | if kwargs.get("ignore", self.get_ignore_default_value()) and (" " in word): 217 | return word 218 | 219 | assert " " not in word, self.get_string_not_a_word_error_msg() 220 | 221 | word = list(word) 222 | chars = len(word) 223 | num_chars_to_shift = math.ceil(chars * kwargs.get("probability", 0.1)) 224 | 225 | # list of characters to be switched 226 | positions_to_shift = random.sample(range(chars), num_chars_to_shift) 227 | 228 | # defining a dictionary of keys located close to each character 229 | json_path = Path("code_soup/common/text/utils/json/keys_in_proximity.json") 230 | keys_in_proximity = json.load(open(json_path, "r")) 231 | 232 | for i, c in enumerate(word): 233 | # Check Upper 234 | 235 | # Check if in position and given keys 236 | if i in positions_to_shift and c in keys_in_proximity: 237 | word[i] = random.choice(keys_in_proximity[c]) 238 | 239 | # recombine 240 | word = "".join(word) 241 | return word 242 | 243 | 244 | class VisuallySimilarCharacterPerturbations(CharacterPerturbations): 245 | """ 246 | A class used to apply visually similar character perturbations. 247 | Methods 248 | ------- 249 | apply(self, word: str, **kwargs) 250 | - applies the visually similar perturbation on the word and returns it. 251 | """ 252 | 253 | def __init__(self, *args): 254 | """ 255 | args are the methods in which 256 | you want to perturb the word. 257 | Pass "unicode" and "homoglyph" as 258 | the args. 259 | """ 260 | json_path = Path("code_soup/common/text/utils/json/homoglyph.json") 261 | 262 | self.homoglyph_dic = json.load(open(json_path, "r")) 263 | self.arg = args 264 | 265 | def apply(self, word: str, seed=None, **kwargs): 266 | """ 267 | unicode_array is a list of different unicodes. 268 | each char of the word is perturbed by a unicode chosen at random 269 | from the unicode_array. 270 | 271 | :word: word to be edited 272 | :ignore: default (True), boolean if assertions should be ignored 273 | 274 | eg: 275 | input : adversarial 276 | output : a̐d̅v̕e̒ŕŝa̅r̕îál̂ 277 | 278 | visual_similar_chars("Hey Stop") 279 | Hey Stop 280 | 281 | visual_similar_chars("Hey Stop", ignore=False) 282 | assertion error 283 | """ 284 | if seed is not None: 285 | np.random.seed(seed) 286 | 287 | if kwargs.get("ignore", self.get_ignore_default_value()) and " " in word: 288 | return word 289 | assert " " not in word, self.get_string_not_a_word_error_msg() 290 | 291 | unicode_array = np.array( 292 | [u"\u0301", u"\u0310", u"\u0305", u"\u0315", u"\u0312", u"\u0302"] 293 | ) 294 | method_pick = np.random.choice(len(self.arg), 1)[0] 295 | 296 | if self.arg[method_pick] == "unicode": 297 | char_array = np.array(list(word)) 298 | 299 | picked_unicode = np.random.choice(unicode_array, size=len(word)) 300 | 301 | perturbed_array = np.char.add(char_array, picked_unicode) 302 | return "".join(perturbed_array) 303 | 304 | if self.arg[method_pick] == "homoglyph": 305 | char_list = list(word) 306 | 307 | char_list_glyph = [] 308 | for char in char_list: 309 | if char in self.homoglyph_dic: 310 | glyph_string = self.homoglyph_dic[char] 311 | glyph_pick = np.random.choice(len(glyph_string), 1)[0] 312 | char_list_glyph.append(glyph_string[glyph_pick]) 313 | else: 314 | char_list_glyph.append(char) # pragma: no cover 315 | return "".join(char_list_glyph) 316 | -------------------------------------------------------------------------------- /code_soup/common/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from code_soup.common.utils.checkpoints import Checkpoints 2 | from code_soup.common.utils.seeding import Seeding 3 | -------------------------------------------------------------------------------- /code_soup/common/utils/checkpoints.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Checkpoints: 5 | """ 6 | A class to save and load checkpoints 7 | """ 8 | 9 | @classmethod 10 | def save(self, PATH, model, optimizer, EPOCH=None, LOSS=None): 11 | """ 12 | Parameters 13 | ---------- 14 | PATH : str 15 | - The path where the model is saved 16 | model: 17 | - The model which is saved 18 | optimizer : torch.optim 19 | - Default: None , optimizer saved at the checkpoint 20 | EPOCH (optional): int 21 | - Default:None , epoch 22 | LOSS (optional): 23 | - Default: None , loss saved at the checkpoint 24 | 25 | Saves the model state and checkpoint state with optimizer and loss if specified 26 | """ 27 | checkpoint = { 28 | "model": model, 29 | "state_dict": model.state_dict(), 30 | "optimizer": optimizer.state_dict(), 31 | "epoch": EPOCH, 32 | "loss": LOSS, 33 | } 34 | torch.save(checkpoint, PATH) 35 | 36 | @classmethod 37 | def load(self, PATH): 38 | """ 39 | Paramters 40 | --------- 41 | PATH: str 42 | - The path from where the model is loaded 43 | 44 | Returns the loaded model 45 | """ 46 | checkpoint = torch.load(PATH) 47 | model = checkpoint["model"] 48 | model.load_state_dict(checkpoint["state_dict"]) 49 | model.eval() 50 | return model 51 | -------------------------------------------------------------------------------- /code_soup/common/utils/seeding.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | class Seeding: 8 | """ 9 | A class used for seeding 10 | 11 | Class Variables 12 | --------------- 13 | value 14 | - to store value of seed 15 | 16 | Class Methods 17 | ------------- 18 | seed(self, value) 19 | -Set random seed for everything 20 | """ 21 | 22 | value = 42 23 | 24 | @classmethod 25 | def seed(self, value): 26 | self.value = value 27 | np.random.seed(self.value) 28 | torch.manual_seed(self.value) 29 | random.seed(self.value) 30 | torch.backends.cudnn.deterministic = True 31 | torch.backends.cudnn.benchmark = False 32 | -------------------------------------------------------------------------------- /code_soup/common/vision/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/common/vision/__init__.py -------------------------------------------------------------------------------- /code_soup/common/vision/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from code_soup.common.vision.datasets.image_classification import ( 2 | ImageClassificationDataset, 3 | ) 4 | from code_soup.common.vision.datasets.vision_dataset import ( # THE ABSTRACT DATASET CLASS 5 | VisionDataset, 6 | ) 7 | -------------------------------------------------------------------------------- /code_soup/common/vision/datasets/image_classification.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Tuple 2 | 3 | import torch 4 | import torchvision 5 | 6 | from code_soup.common.vision.datasets.vision_dataset import VisionDataset 7 | 8 | 9 | class ImageClassificationDataset(torch.utils.data.Dataset, VisionDataset): 10 | """ 11 | Image Classification Dataset Class, Inherits from VisionDataset Abstract class and Torch Dataset 12 | Parameters 13 | ---------- 14 | dataset : torchvision.datasets 15 | - A dataset from torchvision.datasets 16 | transform : torchvision.transforms 17 | - A transform to be applied on the dataset 18 | root : str 19 | - The path where downloads are stored 20 | train: bool 21 | - If the split is training or testing 22 | """ 23 | 24 | def __init__(self, dataset, transform, root="./input/data", train=True): 25 | self.data = dataset(root=root, train=train, download=True, transform=transform) 26 | 27 | def __len__(self) -> int: 28 | """ 29 | Returns 30 | ------- 31 | length : int 32 | - Length of the dataset 33 | """ 34 | return len(self.data) 35 | 36 | def __getitem__(self, idx: int) -> Tuple[Any, Any]: 37 | """ 38 | Returns 39 | ------- 40 | element : torch.Tensor 41 | - A element from the dataset 42 | """ 43 | return self.data.__getitem__(idx) 44 | -------------------------------------------------------------------------------- /code_soup/common/vision/datasets/readme.md: -------------------------------------------------------------------------------- 1 | # Vision Datasets 2 | 3 | List of implemented datasets 4 | --- 5 | 6 | 1. [MNIST] 7 | 2. [CIFAR] 8 | -------------------------------------------------------------------------------- /code_soup/common/vision/datasets/vision_dataset.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Tuple 3 | 4 | 5 | class VisionDataset(ABC): 6 | @classmethod 7 | @abstractmethod 8 | def __init__(self, dataset: callable, transform: callable, root: str, train: bool): 9 | """ 10 | Parameters 11 | ---------- 12 | dataset : torchvision.datasets 13 | - A dataset from torchvision.datasets 14 | transform : torchvision.transforms 15 | - A transform to be applied on the dataset 16 | root : str 17 | - The path where downloads are stored 18 | train: bool 19 | - If the split is training or testing 20 | """ 21 | pass 22 | 23 | @abstractmethod 24 | def __len__(self) -> int: 25 | """ 26 | Returns 27 | ------- 28 | length : int 29 | - Length of the dataset 30 | """ 31 | pass 32 | 33 | @abstractmethod 34 | def __getitem__(self, idx: int) -> Tuple[Any, Any]: 35 | """ 36 | Returns 37 | ------- 38 | element : torch.Tensor 39 | - A element from the dataset 40 | """ 41 | pass 42 | -------------------------------------------------------------------------------- /code_soup/common/vision/models/__init__.py: -------------------------------------------------------------------------------- 1 | from torchvision.models import ( 2 | alexnet, 3 | densenet121, 4 | densenet161, 5 | densenet169, 6 | densenet201, 7 | googlenet, 8 | inception_v3, 9 | mnasnet0_5, 10 | mnasnet0_75, 11 | mnasnet1_0, 12 | mnasnet1_3, 13 | mobilenet_v2, 14 | mobilenet_v3_large, 15 | mobilenet_v3_small, 16 | resnet18, 17 | resnet34, 18 | resnet50, 19 | resnet101, 20 | resnet152, 21 | resnext50_32x4d, 22 | resnext101_32x8d, 23 | shufflenet_v2_x0_5, 24 | shufflenet_v2_x1_0, 25 | shufflenet_v2_x1_5, 26 | shufflenet_v2_x2_0, 27 | squeezenet1_0, 28 | squeezenet1_1, 29 | vgg11, 30 | vgg13, 31 | vgg16, 32 | vgg19, 33 | wide_resnet50_2, 34 | wide_resnet101_2, 35 | ) 36 | 37 | from code_soup.common.vision.models.allconvnet import AllConvNet 38 | from code_soup.common.vision.models.nin import NIN 39 | from code_soup.common.vision.models.simple_cnn_classifier import SimpleCnnClassifier 40 | -------------------------------------------------------------------------------- /code_soup/common/vision/models/allconvnet.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | import torch.optim as optim 7 | 8 | 9 | class AllConvNet(nn.Module): 10 | """ 11 | Following the architecture given in the paper: `All Convolutional Network `_ 12 | Methods 13 | -------- 14 | forward(x) 15 | - return prediction tensor 16 | 17 | step(self, data) 18 | - Iterates the model for a single batch of data 19 | """ 20 | 21 | def __init__( 22 | self, image_size: int, n_classes: int, device: torch.device, lr: float 23 | ): 24 | """ 25 | Parameters 26 | ---------- 27 | image_size : int 28 | Number of input dimensions aka side length of image 29 | n_classes: int 30 | Number of classes 31 | device : torch.device 32 | Device to run the model on 33 | lr : float 34 | Learning rate 35 | """ 36 | super(AllConvNet, self).__init__() 37 | self.image_size = image_size 38 | self.n_classes = n_classes 39 | self.criterion = torch.nn.BCELoss() 40 | self.label = 1.0 41 | self.device = device 42 | self.lr = lr 43 | # Constructing the model as per the paper 44 | self.conv1 = nn.Conv2d(image_size, 96, 3, padding=2) 45 | self.conv2 = nn.Conv2d(96, 96, 3, padding=2) 46 | self.conv3 = nn.MaxPool2d(3, stride=2) 47 | self.conv4 = nn.Conv2d(96, 192, 3, padding=2) 48 | self.conv5 = nn.Conv2d(192, 192, 3, padding=2) 49 | self.conv6 = nn.MaxPool2d(3, stride=2) 50 | self.conv7 = nn.Conv2d(192, 192, 3, padding=2) 51 | self.conv8 = nn.Conv2d(192, 192, 1) 52 | self.conv9 = nn.Conv2d(10, 10, 1) 53 | self.class_conv = nn.Conv2d(192, n_classes, 1) 54 | self.optimizer = optim.Adam(self.parameters(), lr=lr) 55 | 56 | # Forward pass of the model 57 | def forward(self, x): 58 | """ 59 | Parameters 60 | ---------- 61 | x : torch.Tensor 62 | Input tensor 63 | Returns 64 | ------- 65 | output : torch.Tensor 66 | Generated sample 67 | """ 68 | conv1_out = F.relu(self.conv1(x)) 69 | conv2_out = F.relu(self.conv2(conv1_out)) 70 | conv3_out = F.relu(self.conv3(conv2_out)) 71 | conv4_out = F.relu(self.conv4(conv3_out)) 72 | conv5_out = F.relu(self.conv5(conv4_out)) 73 | conv6_out = F.relu(self.conv6(conv5_out)) 74 | conv7_out = F.relu(self.conv7(conv6_out)) 75 | conv8_out = F.relu(self.conv8(conv7_out)) 76 | class_out = F.relu(self.class_conv(conv8_out)) 77 | pool_out = F.adaptive_avg_pool2d(class_out, 1) 78 | pool_out.squeeze_(-1) 79 | pool_out = F.softmax(pool_out.squeeze_(-1)) 80 | return pool_out 81 | 82 | def step(self, data: torch.Tensor) -> Tuple: 83 | """ 84 | Iterates the model for a single batch of data, calculates the loss and updates the model parameters. 85 | Parameters 86 | ---------- 87 | data : torch.Tensor 88 | Batch of data 89 | Returns 90 | ------- 91 | avg_out: 92 | The average output (across the batch) of the model 93 | """ 94 | image, _ = data 95 | image = image.to(self.device) 96 | batch_size = image.shape[0] 97 | label = torch.full( 98 | (batch_size,), self.label, dtype=torch.float, device=self.device 99 | ) 100 | self.zero_grad() 101 | # Forward pass 102 | output = self(image).view(-1) 103 | # Calculate loss on a batch 104 | err = self.criterion(output, label) 105 | err.backward() 106 | avg_out = output.mean().item() 107 | self.optimizer.step() 108 | return avg_out 109 | -------------------------------------------------------------------------------- /code_soup/common/vision/models/nin.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | import torch.optim as optim 7 | 8 | 9 | class NIN(nn.Module): 10 | """ 11 | Following the architecture given in the paper: `Network in Network `_ 12 | Methods 13 | -------- 14 | forward(x) 15 | - return prediction tensor 16 | 17 | _initialize_weights(self) 18 | - Initializes the model with weights ans bias 19 | 20 | step(self, data) 21 | - Iterates the model for a single batch of data 22 | """ 23 | 24 | def __init__( 25 | self, input_size: int, n_classes: int, device: torch.device, lr: float 26 | ): 27 | """ 28 | Parameters 29 | ---------- 30 | input_size : int 31 | Number of input dimensions aka side length of image 32 | n_classes: int 33 | Number of classes 34 | device : torch.device 35 | Device to run the model on 36 | lr : float 37 | Learning rate 38 | """ 39 | super(NIN, self).__init__() 40 | self.input_size = input_size 41 | self.n_classes = n_classes 42 | self.device = device 43 | self.label = 1.0 44 | self.lr = lr 45 | self.criterion = torch.nn.BCELoss() 46 | self.conv1 = nn.Conv2d(input_size, 192, 5, padding=2) 47 | self.conv2 = nn.Conv2d(192, 160, 1) 48 | self.conv3 = nn.Conv2d(160, 96, 1) 49 | self.conv4 = nn.Conv2d(96, 192, 5, padding=6) 50 | self.conv5 = nn.Conv2d(192, 192, 1) 51 | self.conv6 = nn.Conv2d(192, 192, 1) 52 | self.conv7 = nn.Conv2d(192, 192, 3, padding=6) 53 | self.conv8 = nn.Conv2d(192, 192, 1) 54 | self.conv9 = nn.Conv2d(192, n_classes, 1) 55 | self.optimizer = optim.Adam(self.parameters(), lr=lr) 56 | self._initialize_weights() 57 | 58 | # forward pass of the model 59 | def forward(self, x): 60 | """ 61 | Parameters 62 | ---------- 63 | x : torch.Tensor 64 | Input tensor 65 | Returns 66 | ------- 67 | output : torch.Tensor 68 | Generated sample 69 | """ 70 | conv1_out = F.relu(self.conv1(x)) 71 | conv2_out = F.relu(self.conv2(conv1_out)) 72 | conv3_out = F.relu(self.conv3(conv2_out)) 73 | conv3_pool_out = F.max_pool2d( 74 | conv3_out, kernel_size=3, stride=2, ceil_mode=True 75 | ) 76 | conv3_drop_out = F.dropout(conv3_pool_out, 0.5) 77 | 78 | conv4_out = F.relu(self.conv4(conv3_drop_out)) 79 | conv5_out = F.relu(self.conv5(conv4_out)) 80 | conv6_out = F.relu(self.conv6(conv5_out)) 81 | conv6_pool_out = F.avg_pool2d(conv6_out, 1) 82 | conv6_drop_out = F.dropout(conv6_pool_out, 0.5) 83 | 84 | conv7_out = F.relu(self.conv7(conv6_drop_out)) 85 | conv8_out = F.relu(self.conv8(conv7_out)) 86 | conv9_out = F.relu(self.conv9(conv8_out)) 87 | pool_out = F.adaptive_avg_pool2d(conv9_out, 1) 88 | pool_out.squeeze_(-1) 89 | pool_out.squeeze_(-1) 90 | return pool_out 91 | 92 | def _initialize_weights(self): 93 | """ 94 | Initializes the model with weights and bias 95 | Conv layers get random weights from a normal distribution and bias is set to 0 96 | """ 97 | for m in self.modules(): 98 | if isinstance(m, nn.Conv2d): 99 | m.weight.data.normal_(0, 0.05) 100 | if m.bias is not None: 101 | m.bias.data.zero_() 102 | 103 | def step(self, data: torch.Tensor) -> Tuple: 104 | """ 105 | Iterates the model for a single batch of data, calculates the loss and updates the model parameters. 106 | Parameters 107 | ---------- 108 | data : torch.Tensor 109 | Batch of data 110 | Returns 111 | ------- 112 | avg_out: 113 | The average output (across the batch) of the model 114 | """ 115 | image, _ = data 116 | image = image.to(self.device) 117 | batch_size = image.shape[0] 118 | label = torch.full( 119 | (batch_size,), self.label, dtype=torch.float, device=self.device 120 | ) 121 | self.zero_grad() 122 | # Forward pass 123 | output = self(image).view(-1) 124 | # Calculate loss on a batch 125 | err = self.criterion(output, label) 126 | err.backward() 127 | avg_out = output.mean().item() 128 | self.optimizer.step() 129 | return avg_out 130 | -------------------------------------------------------------------------------- /code_soup/common/vision/models/readme.md: -------------------------------------------------------------------------------- 1 | # Common Models for Vision 2 | 3 | List of implemented models 4 | --- 5 | 1. [AllConvNet](allconvnet.py), [Striving for Simplicity: The All Convolutional Net](https://arxiv.org/abs/1412.6806) 6 | 2. [Network in Network](nin.py), [Network In Network](https://arxiv.org/abs/1312.4400) 7 | 8 | # Notes on existing Torchvision models 9 | 10 | | Model | Input sizes | 11 | | ------------- | ------------- | 12 | | AlexNet | Pre-trained to work on RGB images of sizes 256 x 256. Input to the first layer is a random crop of size 227 x 227 (not 224 x 224 as mentioned in the paper). The required minimum input size of the model is 227x227.| 13 | | VGG-net | Pre-trained to work on RGB images of sizes 256 x 256, cropped to 224 x 224. IThe required minimum input size of the model is 224x224. | 14 | | ResNet | Pre-trained to work on RGB images of sizes 256 x 256, cropped to 224 x 244. IThe required minimum input size of the model is 224x224. | 15 | | Inception-v3 | Pre-trained to work on RGB images of sizes 299 x 299. The pre-trained model, with default aux_logits=True, would work for images of size>=299x299 (example: ImageNet), but not for images of size<299x299 (example: CIFAR-10 and MNIST).| 16 | 17 | All other pre-trained models require a minimum input size of 224 x 224. 18 | -------------------------------------------------------------------------------- /code_soup/common/vision/models/simple_cnn_classifier.py: -------------------------------------------------------------------------------- 1 | """Implements a simple CNN Classifier model""" 2 | 3 | import torch.nn as nn 4 | 5 | 6 | class SimpleCnnClassifier(nn.Module): 7 | def __init__(self, input_shape=(1, 28, 28), num_labels=10): 8 | super().__init__() 9 | self.num_channels = input_shape[0] 10 | self.image_size = input_shape[1:] 11 | self.num_labels = num_labels 12 | self.model = nn.Sequential( 13 | nn.Conv2d( 14 | in_channels=self.num_channels, out_channels=32, kernel_size=(3, 3) 15 | ), 16 | nn.ReLU(), 17 | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3)), 18 | nn.ReLU(), 19 | nn.MaxPool2d(kernel_size=(2, 2)), 20 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)), 21 | nn.ReLU(), 22 | nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3)), 23 | nn.ReLU(), 24 | nn.MaxPool2d(kernel_size=(2, 2)), 25 | nn.Flatten(), 26 | nn.Linear( 27 | in_features=(((self.image_size[0] - 4) // 2 - 4) // 2) 28 | * (((self.image_size[1] - 4) // 2 - 4) // 2) 29 | * 64, 30 | out_features=200, 31 | ), 32 | nn.ReLU(), 33 | nn.Linear(in_features=200, out_features=200), 34 | nn.ReLU(), 35 | nn.Linear(in_features=200, out_features=num_labels), 36 | ) 37 | 38 | def forward(self, x): 39 | return self.model(x) 40 | -------------------------------------------------------------------------------- /code_soup/common/vision/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/code_soup/common/vision/utils/__init__.py -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | files = code_soup 3 | pretty = True 4 | show_error_codes = True 5 | 6 | check_untyped_defs = True 7 | disallow_incomplete_defs = True 8 | disallow_subclassing_any = True 9 | disallow_untyped_decorators = True 10 | disallow_untyped_defs = True 11 | no_implicit_optional = True 12 | strict_equality = True 13 | warn_redundant_casts = True 14 | warn_unused_configs = True 15 | warn_unused_ignores = True 16 | 17 | [mypy-numpy.*] 18 | ignore_missing_imports = True 19 | 20 | [mypy-torchvision.*] 21 | ignore_missing_imports = True 22 | 23 | [mypy-scipy.*] 24 | ignore_missing_imports = True 25 | 26 | [mypy-cv2.*] 27 | ignore_missing_imports = True 28 | 29 | [mypy-PIL.*] 30 | ignore_missing_imports = True 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.21.1 2 | Pillow==8.3.1 3 | torch==1.9.0 4 | torchvision==0.10.0 5 | parameterized==0.8.1 6 | scipy==1.6.2 7 | opencv-python==4.5.3.56 8 | mypy==0.910 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 128 3 | ignore = E203,E231,E305,E402,E721,E722,E741,F401,F403,F405,F821,F841,F999,W503 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_ch5/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/test_ch5/__init__.py -------------------------------------------------------------------------------- /tests/test_ch5/test_algorithms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/test_ch5/test_algorithms/__init__.py -------------------------------------------------------------------------------- /tests/test_ch5/test_algorithms/test_atn.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.ch5.algorithms.atn import ATNBase, SimpleAAE, SimplePATN 7 | 8 | 9 | class TestATNBase(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | cls.classifier_model = nn.Sequential( 13 | nn.Flatten(), nn.Linear(784, 10), nn.Softmax(dim=1) 14 | ) 15 | cls.model = ATNBase( 16 | classifier_model=cls.classifier_model, 17 | target_idx=2, 18 | device=torch.device("cpu"), 19 | ) 20 | 21 | def test_init_with_alpha_less_than_one(self): 22 | with self.assertRaises(ValueError): 23 | model = ATNBase(classifier_model=nn.Linear(1, 1), target_idx=0, alpha=0.5) 24 | 25 | def test_rerank(self): 26 | softmax_logits = torch.tensor( 27 | [ 28 | [ 29 | 0.11319724, 30 | 0.02375807, 31 | 0.17357929, 32 | 0.31361626, 33 | 0.01670836, 34 | 0.04249263, 35 | 0.05608005, 36 | 0.11300851, 37 | 0.09807534, 38 | 0.04948426, 39 | ], 40 | [ 41 | 0.01500716, 42 | 0.01716916, 43 | 0.04945158, 44 | 0.07586802, 45 | 0.0117808, 46 | 0.09239875, 47 | 0.13248007, 48 | 0.15326169, 49 | 0.17005756, 50 | 0.2825252, 51 | ], 52 | ] 53 | ) 54 | 55 | rerank_logits = self.model.rerank(softmax_logits) 56 | print(rerank_logits) 57 | self.assertEqual(rerank_logits.shape, (2, 10)) 58 | 59 | def test_forward(self): 60 | 61 | with self.assertRaises(NotImplementedError): 62 | out = self.model.forward(torch.tensor([])) 63 | 64 | def test_compute_loss(self): 65 | x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) 66 | x_hat = torch.tensor([[1.1, 2.1, 3.1], [4.1, 5.1, 6.1]]) 67 | 68 | y = torch.tensor([[2.0, 3.0, 4.0], [5.0, 6.0, 7.0]]) 69 | y_hat = torch.tensor([[2.1, 3.1, 4.1], [5.1, 6.1, 7.1]]) 70 | 71 | loss = self.model.compute_loss(x, x_hat, y, y_hat) 72 | 73 | self.assertTrue(isinstance(loss, torch.Tensor)) 74 | self.assertEqual(tuple(loss.shape), ()) 75 | 76 | def test_step(self): 77 | with self.assertRaises(NotImplementedError): 78 | adv_out, adv_logits = self.model.step((torch.tensor([]), torch.tensor([]))) 79 | 80 | 81 | class TestSimpleAAE(unittest.TestCase): 82 | @classmethod 83 | def setUpClass(cls) -> None: 84 | classifier_model = nn.Sequential( 85 | nn.Flatten(), nn.Linear(9, 10), nn.Softmax(dim=1) 86 | ) 87 | cls.model_a = SimpleAAE( 88 | classifier_model=classifier_model, 89 | target_idx=2, 90 | device=torch.device("cpu"), 91 | input_shape=(1, 3, 3), 92 | ) 93 | 94 | cls.model_b = SimpleAAE( 95 | classifier_model=classifier_model, 96 | target_idx=2, 97 | device=torch.device("cpu"), 98 | input_shape=(1, 3, 3), 99 | typ="b", 100 | ) 101 | 102 | cls.model_c = SimpleAAE( 103 | classifier_model=classifier_model, 104 | target_idx=2, 105 | device=torch.device("cpu"), 106 | input_shape=(1, 3, 3), 107 | typ="c", 108 | ) 109 | 110 | def test_forward(self): 111 | x = torch.tensor( 112 | [ 113 | [ 114 | [ 115 | [0.1677, 1.3509, -0.8152], 116 | [-0.6369, -1.2858, 0.4709], 117 | [0.8874, 0.0070, 0.1990], 118 | ] 119 | ], 120 | [ 121 | [ 122 | [-1.4228, 0.6089, -0.2605], 123 | [-0.3259, -0.1384, -0.8231], 124 | [-2.6140, 0.3131, -0.4660], 125 | ] 126 | ], 127 | ] 128 | ) 129 | 130 | adv_out, adv_logits = self.model_a(x) 131 | 132 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 133 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 134 | 135 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 136 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 137 | 138 | adv_out, adv_logits = self.model_b(x) 139 | 140 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 141 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 142 | 143 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 144 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 145 | 146 | adv_out, adv_logits = self.model_c(x) 147 | 148 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 149 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 150 | 151 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 152 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 153 | 154 | def test_step(self): 155 | x = torch.tensor( 156 | [ 157 | [ 158 | [ 159 | [0.1677, 1.3509, -0.8152], 160 | [-0.6369, -1.2858, 0.4709], 161 | [0.8874, 0.0070, 0.1990], 162 | ] 163 | ], 164 | [ 165 | [ 166 | [-1.4228, 0.6089, -0.2605], 167 | [-0.3259, -0.1384, -0.8231], 168 | [-2.6140, 0.3131, -0.4660], 169 | ] 170 | ], 171 | ] 172 | ) 173 | 174 | y = torch.tensor([]) 175 | 176 | adv_out, adv_logits, loss_item = self.model_a.step((x, y)) 177 | 178 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 179 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 180 | 181 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 182 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 183 | 184 | self.assertTrue(isinstance(loss_item, float)) 185 | 186 | 187 | class TestSimplePATN(unittest.TestCase): 188 | @classmethod 189 | def setUpClass(cls) -> None: 190 | classifier_model = nn.Sequential( 191 | nn.Flatten(), nn.Linear(9, 10), nn.Softmax(dim=1) 192 | ) 193 | cls.model = SimplePATN( 194 | classifier_model=classifier_model, 195 | target_idx=2, 196 | device=torch.device("cpu"), 197 | input_shape=(1, 3, 3), 198 | ) 199 | 200 | def test_forward(self): 201 | x = torch.tensor( 202 | [ 203 | [ 204 | [ 205 | [0.1677, 1.3509, -0.8152], 206 | [-0.6369, -1.2858, 0.4709], 207 | [0.8874, 0.0070, 0.1990], 208 | ] 209 | ], 210 | [ 211 | [ 212 | [-1.4228, 0.6089, -0.2605], 213 | [-0.3259, -0.1384, -0.8231], 214 | [-2.6140, 0.3131, -0.4660], 215 | ] 216 | ], 217 | ] 218 | ) 219 | 220 | adv_out, adv_logits = self.model(x) 221 | 222 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 223 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 224 | 225 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 226 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 227 | 228 | def test_step(self): 229 | x = torch.tensor( 230 | [ 231 | [ 232 | [ 233 | [0.1677, 1.3509, -0.8152], 234 | [-0.6369, -1.2858, 0.4709], 235 | [0.8874, 0.0070, 0.1990], 236 | ] 237 | ], 238 | [ 239 | [ 240 | [-1.4228, 0.6089, -0.2605], 241 | [-0.3259, -0.1384, -0.8231], 242 | [-2.6140, 0.3131, -0.4660], 243 | ] 244 | ], 245 | ] 246 | ) 247 | 248 | y = torch.tensor([]) 249 | 250 | adv_out, adv_logits, loss_item = self.model.step((x, y)) 251 | 252 | self.assertTrue(isinstance(adv_out, torch.Tensor)) 253 | self.assertEqual(tuple(adv_out.shape), (2, 1, 3, 3)) 254 | 255 | self.assertTrue(isinstance(adv_logits, torch.Tensor)) 256 | self.assertEqual(tuple(adv_logits.shape), (2, 10)) 257 | 258 | self.assertTrue(isinstance(loss_item, float)) 259 | -------------------------------------------------------------------------------- /tests/test_ch5/test_algorithms/test_gan.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.ch5 import GAN, Discriminator, Generator 7 | 8 | 9 | class TestDiscriminator(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | cls.model = Discriminator(image_size=28, channels=1, lr=0.002) 13 | 14 | def test_discriminator_output_shape(self): 15 | input_data = torch.randn(64, 1, 28, 28) 16 | output = self.model(input_data) 17 | self.assertEqual(output.shape, torch.Size([64, 1])) 18 | 19 | def test_discriminator_variable_layer_weights(self): 20 | self.assertEqual( 21 | self.model.main[0].weight.data.shape, torch.Size([1024, 28 * 28]) 22 | ) 23 | 24 | 25 | class TestGenerator(unittest.TestCase): 26 | @classmethod 27 | def setUpClass(cls) -> None: 28 | cls.model = Generator(image_size=28, channels=1, latent_dims=128, lr=0.02) 29 | 30 | def test_generator_output_shape(self): 31 | input_data = torch.randn(64, 128) 32 | output = self.model(input_data) 33 | self.assertEqual(output.shape, torch.Size([64, 1, 28, 28])) 34 | 35 | def test_generator_variable_layer_weights(self): 36 | self.assertEqual(self.model.main[0].weight.data.shape, torch.Size([256, 128])) 37 | self.assertEqual(self.model.main[-2].weight.data.shape, torch.Size([784, 1024])) 38 | 39 | 40 | class TestGAN(unittest.TestCase): 41 | @classmethod 42 | def setUpClass(cls) -> None: 43 | cls.model = GAN( 44 | image_size=28, 45 | channels=1, 46 | latent_dims=128, 47 | device=torch.device("cpu"), 48 | lr=0.02, 49 | ) 50 | 51 | def test_step(self): 52 | self.model.step([torch.randn(4, 28, 28), torch.ones(4)]) 53 | -------------------------------------------------------------------------------- /tests/test_ch5/test_algorithms/test_one_pixel_attack.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.ch5 import OnePixelAttack 7 | 8 | 9 | class TestOnePixelAttack(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | model_to_attack = nn.Sequential(nn.Flatten(), nn.Linear(2 * 2 * 3, 10)).cpu() 13 | cls.model = OnePixelAttack(model=model_to_attack) 14 | 15 | def test_step(self): 16 | self.model.step( 17 | [torch.randn(4, 3, 2, 2).cpu(), torch.zeros(4).cpu()], 18 | ["car"], 19 | pixels_perturbed=1, 20 | targeted=False, 21 | maxiter=1, 22 | popsize=1, 23 | verbose=False, 24 | ) 25 | -------------------------------------------------------------------------------- /tests/test_ch5/test_algorithms/test_zoo_attack.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from copy import deepcopy 3 | 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | import torchvision 8 | 9 | from code_soup.ch5 import ZooAttack 10 | 11 | 12 | class TestZooAttack(unittest.TestCase): 13 | @classmethod 14 | def setUpClass(cls) -> None: 15 | 16 | cls.orig_img = torch.tensor( 17 | [ 18 | [ 19 | [ 20 | [-1.6505998, -1.0305759, 1.0229983], 21 | [-0.49261865, 1.0394262, -2.0290275], 22 | [0.21951008, -2.1673787, -0.38990623], 23 | [-0.2866124, 1.0799991, -0.11442444], 24 | ], 25 | [ 26 | [-0.7052935, -0.5529446, 0.26524046], 27 | [-1.0540642, 0.6887131, 1.6723113], 28 | [1.1097006, 2.1335971, 0.9231482], 29 | [0.37910375, -0.12366215, -0.25093704], 30 | ], 31 | [ 32 | [-1.9404864, -1.3078933, 0.88476175], 33 | [0.35099706, -1.254437, 0.05408821], 34 | [0.7342985, -0.43663985, 0.11520719], 35 | [-0.07479854, -2.5859993, 1.4102333], 36 | ], 37 | [ 38 | [0.21304935, -0.3496548, -0.19856042], 39 | [-0.434919, -0.27774376, 1.1471609], 40 | [1.4504786, 0.67261624, -0.23560882], 41 | [1.0592173, 0.6655428, 1.1890292], 42 | ], 43 | ] 44 | ], 45 | dtype=torch.float32, 46 | ) 47 | 48 | cls.modifier = np.array( 49 | [ 50 | [ 51 | [ 52 | [-0.21563086, 0.54629284, 1.0879989], 53 | [-0.17234534, 0.37302095, 1.5072422], 54 | [-0.14709516, -0.08446954, -1.0199878], 55 | [-0.46581882, 0.41346493, -1.6357177], 56 | ], 57 | [ 58 | [0.97039294, -0.46038368, -0.5377948], 59 | [-0.08285582, -1.4017423, -0.6447743], 60 | [-0.6031785, -2.003339, -0.01103557], 61 | [0.41714168, -1.94303, 0.6685426], 62 | ], 63 | [ 64 | [-0.83851266, 0.79823476, 0.2532903], 65 | [-0.76351106, 0.90984505, 1.331635], 66 | [-1.1300149, -0.8444777, -2.2185612], 67 | [1.0166003, 0.9233805, 0.98315567], 68 | ], 69 | [ 70 | [-0.88205546, -0.3438152, -0.36559045], 71 | [0.56274384, 1.5836877, -1.2370849], 72 | [1.4234338, -0.5929535, -1.3011148], 73 | [0.84160084, 0.90161383, 0.80880517], 74 | ], 75 | ] 76 | ], 77 | dtype=np.float32, 78 | ) 79 | 80 | cls.labels = torch.tensor([[0, 1]]) 81 | 82 | cls.config = { 83 | "binary_search_steps": 1, 84 | "max_iterations": 100, 85 | "learning_rate": 2e-3, 86 | "abort_early": True, 87 | "targeted": True, 88 | "confidence": 0, 89 | "initial_const": 0.5, 90 | "use_log": False, 91 | "use_tanh": True, 92 | "reset_adam_after_found": True, 93 | "batch_size": 4, 94 | "const": 0.5, 95 | "early_stop_iters": 0, 96 | "adam_beta1": 0.9, 97 | "adam_beta2": 0.999, 98 | "use_importance": True, 99 | "use_resize": False, 100 | "init_size": 4, 101 | "adam_eps": 1e-8, 102 | "resize_iter_1": 2000, 103 | "resize_iter_2": 10000, 104 | } 105 | cls.model = nn.Sequential( 106 | nn.Conv2d( 107 | in_channels=3, out_channels=1, kernel_size=2, padding=0, bias=False 108 | ), 109 | nn.Flatten(), 110 | nn.Linear(4 * 4 * 3, 2, bias=False), 111 | ) 112 | 113 | with torch.no_grad(): 114 | cls.model[0].weight = nn.Parameter( 115 | torch.tensor( 116 | [ 117 | [ 118 | [[0.18992287], [-0.6111586], [-0.41560256]], 119 | [[0.19819254], [0.06157357], [-0.29873127]], 120 | ], 121 | [ 122 | [[0.08528781], [-0.4988662], [0.51414317]], 123 | [[0.5520558], [0.35638297], [0.29052997]], 124 | ], 125 | ] 126 | ).permute(3, 2, 0, 1) 127 | ) 128 | cls.model[2].weight = nn.Parameter( 129 | torch.tensor( 130 | [ 131 | [0.26311237, 0.7238547], 132 | [-0.2869757, -0.6140047], 133 | [-0.11846703, -0.57517225], 134 | [-0.72543985, 0.6393444], 135 | [0.45188862, 0.35718697], 136 | [-0.7197881, 0.17988789], 137 | [0.18161213, 0.32464463], 138 | [0.37511164, 0.07291293], 139 | [-0.27989575, -0.37013885], 140 | ] 141 | ).T 142 | ) 143 | cls.attack = ZooAttack( 144 | model=cls.model, 145 | config=cls.config, 146 | input_image_shape=cls.orig_img.shape[1:], 147 | device="cpu:0", 148 | ) 149 | 150 | def test_get_perturbed_image(self): 151 | perturbed_image = self.attack.get_perturbed_image(self.orig_img, self.modifier) 152 | self.assertEqual(perturbed_image.shape, self.orig_img.shape) 153 | 154 | output = torch.tanh(self.orig_img + self.modifier) / 2 155 | self.assertTrue(torch.allclose(perturbed_image, output, atol=1e-5)) 156 | 157 | # Without Tanh 158 | attack = deepcopy(self.attack) 159 | attack.config["use_tanh"] = False 160 | 161 | perturbed_image_2 = attack.get_perturbed_image(self.orig_img, self.modifier) 162 | self.assertEqual(perturbed_image_2.shape, self.orig_img.shape) 163 | 164 | output_2 = self.orig_img + torch.from_numpy(self.modifier) 165 | self.assertTrue(torch.allclose(perturbed_image_2, output_2, atol=1e-5)) 166 | 167 | # Integration Test 168 | self.assertTrue( 169 | torch.allclose( 170 | perturbed_image, 171 | torch.tensor( 172 | [ 173 | [ 174 | [-0.47662562, -0.22483358, 0.4855427], 175 | [-0.2908287, 0.44400635, -0.23953833], 176 | [0.0361443, -0.4890532, -0.44373578], 177 | [-0.3182986, 0.45198008, -0.47069582], 178 | ], 179 | [ 180 | [0.12952949, -0.38356757, -0.13300003], 181 | [-0.4066872, -0.30628642, 0.38645932], 182 | [0.23361546, 0.06476317, 0.36107236], 183 | [0.33096626, -0.48422426, 0.19745564], 184 | ], 185 | [ 186 | [-0.49615836, -0.23483951, 0.40687856], 187 | [-0.19530259, -0.16578534, 0.44111317], 188 | [-0.18813889, -0.42839125, -0.4853233], 189 | [0.3680245, -0.46528453, 0.49172965], 190 | ], 191 | [ 192 | [-0.2921629, -0.3001033, -0.25552535], 193 | [0.06356658, 0.43162277, -0.04484118], 194 | [0.49682045, 0.03974732, -0.4557841], 195 | [0.4781537, 0.4582861, 0.4819371], 196 | ], 197 | ], 198 | ), 199 | ) 200 | ) 201 | 202 | def test_l2_distance_loss(self): 203 | new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier) 204 | loss = self.attack.l2_distance_loss(self.orig_img, new_img) 205 | self.assertEqual(loss.shape[0], self.orig_img.shape[0]) 206 | 207 | # Without Tanh 208 | attack = deepcopy(self.attack) 209 | attack.config["use_tanh"] = False 210 | 211 | new_img_2 = attack.get_perturbed_image(self.orig_img, self.modifier) 212 | loss_2 = attack.l2_distance_loss(self.orig_img, new_img_2) 213 | self.assertEqual(loss_2.shape[0], self.orig_img.shape[0]) 214 | 215 | # Integration Test 216 | self.assertTrue(np.allclose(np.array([3.7336116]), loss, atol=1e-5)) 217 | 218 | def test_confidence_loss(self): 219 | new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier) 220 | loss, model_output = self.attack.confidence_loss(new_img, self.labels) 221 | 222 | self.assertEqual(loss.shape[0], new_img.shape[0]) 223 | 224 | # With Log and Untargeted 225 | attack = deepcopy(self.attack) 226 | attack.config["use_log"] = True 227 | attack.config["targeted"] = False 228 | 229 | new_img_2 = attack.get_perturbed_image(self.orig_img, self.modifier) 230 | loss_2, model_output = attack.confidence_loss(new_img_2, self.labels) 231 | 232 | self.assertEqual(loss_2.shape[0], new_img_2.shape[0]) 233 | 234 | # Integration Test 235 | self.assertTrue(np.allclose(np.array([0.2148518]), loss, atol=1e-5)) 236 | 237 | def test_zero_order_gradients(self): 238 | losses = np.random.randn(2 * self.config["batch_size"] + 1) 239 | grads = self.attack.zero_order_gradients(losses) 240 | self.assertEqual(grads.shape, (self.config["batch_size"],)) 241 | 242 | def test_total_loss(self): 243 | new_img = self.attack.get_perturbed_image(self.orig_img, self.modifier) 244 | 245 | loss, l2_loss, confidence_loss, model_output = self.attack.total_loss( 246 | self.orig_img, new_img, self.labels, self.config["initial_const"] 247 | ) 248 | self.assertEqual(loss.shape[0], self.orig_img.shape[0]) 249 | 250 | self.assertEqual(confidence_loss.shape[0], self.orig_img.shape[0]) 251 | 252 | self.assertEqual(l2_loss.shape[0], self.orig_img.shape[0]) 253 | 254 | self.assertEqual(model_output.shape, self.labels.shape) 255 | 256 | def test_max_pooling(self): 257 | modifier = self.modifier[0][:, :, 0] 258 | pooled_output = self.attack.max_pooling(modifier, 2) 259 | self.assertEqual(pooled_output.shape, modifier.shape) 260 | 261 | # Integration Test 262 | self.assertTrue( 263 | np.allclose( 264 | pooled_output, 265 | np.array( 266 | [ 267 | [ 268 | 0.97039294, 269 | 0.97039294, 270 | 0.41714168, 271 | 0.41714168, 272 | ], 273 | [ 274 | 0.97039294, 275 | 0.97039294, 276 | 0.41714168, 277 | 0.41714168, 278 | ], 279 | [ 280 | 0.56274384, 281 | 0.56274384, 282 | 1.4234338, 283 | 1.4234338, 284 | ], 285 | [ 286 | 0.56274384, 287 | 0.56274384, 288 | 1.4234338, 289 | 1.4234338, 290 | ], 291 | ] 292 | ), 293 | atol=1e-5, 294 | ) 295 | ) 296 | 297 | def test_coordinate_adam(self): 298 | 299 | # With Proj True 300 | attack = deepcopy(self.attack) 301 | attack.config["use_tanh"] = False 302 | attack.up = 0.5 - self.orig_img.numpy().reshape(-1) 303 | attack.down = -0.5 - self.orig_img.numpy().reshape(-1) 304 | indices = np.array([15, 24, 32, 45]) 305 | 306 | grad = np.array([2000.0, 3500.0, -1000.0, -1500.0]) 307 | 308 | proj = not attack.config["use_tanh"] 309 | 310 | modifier = deepcopy(self.modifier) 311 | 312 | attack.coordinate_adam(indices, grad, modifier, proj) 313 | 314 | self.assertTrue( 315 | np.allclose( 316 | attack.mt_arr, 317 | np.array( 318 | [ 319 | 0.0, 320 | 0.0, 321 | 0.0, 322 | 0.0, 323 | 0.0, 324 | 0.0, 325 | 0.0, 326 | 0.0, 327 | 0.0, 328 | 0.0, 329 | 0.0, 330 | 0.0, 331 | 0.0, 332 | 0.0, 333 | 0.0, 334 | 200.0, 335 | 0.0, 336 | 0.0, 337 | 0.0, 338 | 0.0, 339 | 0.0, 340 | 0.0, 341 | 0.0, 342 | 0.0, 343 | 350.0, 344 | 0.0, 345 | 0.0, 346 | 0.0, 347 | 0.0, 348 | 0.0, 349 | 0.0, 350 | 0.0, 351 | -100.0, 352 | 0.0, 353 | 0.0, 354 | 0.0, 355 | 0.0, 356 | 0.0, 357 | 0.0, 358 | 0.0, 359 | 0.0, 360 | 0.0, 361 | 0.0, 362 | 0.0, 363 | 0.0, 364 | -150.0, 365 | 0.0, 366 | 0.0, 367 | ], 368 | ), 369 | atol=1e-5, 370 | ) 371 | ) 372 | 373 | self.assertTrue( 374 | np.allclose( 375 | attack.vt_arr, 376 | np.array( 377 | [ 378 | 0.0, 379 | 0.0, 380 | 0.0, 381 | 0.0, 382 | 0.0, 383 | 0.0, 384 | 0.0, 385 | 0.0, 386 | 0.0, 387 | 0.0, 388 | 0.0, 389 | 0.0, 390 | 0.0, 391 | 0.0, 392 | 0.0, 393 | 4000.0, 394 | 0.0, 395 | 0.0, 396 | 0.0, 397 | 0.0, 398 | 0.0, 399 | 0.0, 400 | 0.0, 401 | 0.0, 402 | 12250.0, 403 | 0.0, 404 | 0.0, 405 | 0.0, 406 | 0.0, 407 | 0.0, 408 | 0.0, 409 | 0.0, 410 | 1000.0, 411 | 0.0, 412 | 0.0, 413 | 0.0, 414 | 0.0, 415 | 0.0, 416 | 0.0, 417 | 0.0, 418 | 0.0, 419 | 0.0, 420 | 0.0, 421 | 0.0, 422 | 0.0, 423 | 2250.0, 424 | 0.0, 425 | 0.0, 426 | ], 427 | ), 428 | atol=1e-5, 429 | ) 430 | ) 431 | 432 | self.assertTrue( 433 | np.allclose( 434 | modifier, 435 | np.array( 436 | [ 437 | [ 438 | [-0.21563086, 0.54629284, 1.0879989], 439 | [-0.17234534, 0.37302095, 1.5072422], 440 | [-0.14709516, -0.08446954, -1.0199878], 441 | [-0.46581882, 0.41346493, -1.6357177], 442 | ], 443 | [ 444 | [0.97039294, -0.46038368, -0.5377948], 445 | [0.55406415, -1.4017423, -0.6447743], 446 | [-0.6031785, -2.003339, -0.01103557], 447 | [0.41714168, -1.94303, 0.6685426], 448 | ], 449 | [ 450 | [1.4404864, 0.79823476, 0.2532903], 451 | [-0.76351106, 0.90984505, 1.331635], 452 | [-1.1300149, -0.8444777, -0.6152072], 453 | [1.0166003, 0.9233805, 0.98315567], 454 | ], 455 | [ 456 | [-0.88205546, -0.3438152, -0.36559045], 457 | [0.56274384, 1.5836877, -1.2370849], 458 | [1.4234338, -0.5929535, -1.3011148], 459 | [-0.55921733, 0.90161383, 0.80880517], 460 | ], 461 | ] 462 | ), 463 | atol=1e-5, 464 | ) 465 | ) 466 | 467 | self.assertTrue( 468 | ( 469 | attack.adam_epochs 470 | == np.array( 471 | [ 472 | 1, 473 | 1, 474 | 1, 475 | 1, 476 | 1, 477 | 1, 478 | 1, 479 | 1, 480 | 1, 481 | 1, 482 | 1, 483 | 1, 484 | 1, 485 | 1, 486 | 1, 487 | 2, 488 | 1, 489 | 1, 490 | 1, 491 | 1, 492 | 1, 493 | 1, 494 | 1, 495 | 1, 496 | 2, 497 | 1, 498 | 1, 499 | 1, 500 | 1, 501 | 1, 502 | 1, 503 | 1, 504 | 2, 505 | 1, 506 | 1, 507 | 1, 508 | 1, 509 | 1, 510 | 1, 511 | 1, 512 | 1, 513 | 1, 514 | 1, 515 | 1, 516 | 1, 517 | 2, 518 | 1, 519 | 1, 520 | ], 521 | ) 522 | ).all(), 523 | ) 524 | 525 | # Integration Test 526 | # Without Proj True 527 | attack = deepcopy(self.attack) 528 | indices = np.array([15, 24, 32, 45]) 529 | 530 | grad = np.array([2000.0, 3500.0, -1000.0, -1500.0]) 531 | 532 | proj = not attack.config["use_tanh"] 533 | 534 | modifier = deepcopy(self.modifier) 535 | 536 | attack.coordinate_adam(indices, grad, modifier, proj) 537 | 538 | self.assertTrue( 539 | np.allclose( 540 | attack.mt_arr, 541 | np.array( 542 | [ 543 | 0.0, 544 | 0.0, 545 | 0.0, 546 | 0.0, 547 | 0.0, 548 | 0.0, 549 | 0.0, 550 | 0.0, 551 | 0.0, 552 | 0.0, 553 | 0.0, 554 | 0.0, 555 | 0.0, 556 | 0.0, 557 | 0.0, 558 | 200.0, 559 | 0.0, 560 | 0.0, 561 | 0.0, 562 | 0.0, 563 | 0.0, 564 | 0.0, 565 | 0.0, 566 | 0.0, 567 | 350.0, 568 | 0.0, 569 | 0.0, 570 | 0.0, 571 | 0.0, 572 | 0.0, 573 | 0.0, 574 | 0.0, 575 | -100.0, 576 | 0.0, 577 | 0.0, 578 | 0.0, 579 | 0.0, 580 | 0.0, 581 | 0.0, 582 | 0.0, 583 | 0.0, 584 | 0.0, 585 | 0.0, 586 | 0.0, 587 | 0.0, 588 | -150.0, 589 | 0.0, 590 | 0.0, 591 | ], 592 | ), 593 | atol=1e-5, 594 | ) 595 | ) 596 | 597 | self.assertTrue( 598 | np.allclose( 599 | attack.vt_arr, 600 | np.array( 601 | [ 602 | 0.0, 603 | 0.0, 604 | 0.0, 605 | 0.0, 606 | 0.0, 607 | 0.0, 608 | 0.0, 609 | 0.0, 610 | 0.0, 611 | 0.0, 612 | 0.0, 613 | 0.0, 614 | 0.0, 615 | 0.0, 616 | 0.0, 617 | 4000.0, 618 | 0.0, 619 | 0.0, 620 | 0.0, 621 | 0.0, 622 | 0.0, 623 | 0.0, 624 | 0.0, 625 | 0.0, 626 | 12250.0, 627 | 0.0, 628 | 0.0, 629 | 0.0, 630 | 0.0, 631 | 0.0, 632 | 0.0, 633 | 0.0, 634 | 1000.0, 635 | 0.0, 636 | 0.0, 637 | 0.0, 638 | 0.0, 639 | 0.0, 640 | 0.0, 641 | 0.0, 642 | 0.0, 643 | 0.0, 644 | 0.0, 645 | 0.0, 646 | 0.0, 647 | 2250.0, 648 | 0.0, 649 | 0.0, 650 | ], 651 | ), 652 | atol=1e-5, 653 | ) 654 | ) 655 | 656 | self.assertTrue( 657 | np.allclose( 658 | modifier, 659 | np.array( 660 | [ 661 | [ 662 | [-0.21563086, 0.54629284, 1.0879989], 663 | [-0.17234534, 0.37302095, 1.5072422], 664 | [-0.14709516, -0.08446954, -1.0199878], 665 | [-0.46581882, 0.41346493, -1.6357177], 666 | ], 667 | [ 668 | [0.97039294, -0.46038368, -0.5377948], 669 | [-0.08485582, -1.4017423, -0.6447743], 670 | [-0.6031785, -2.003339, -0.01103557], 671 | [0.41714168, -1.94303, 0.6685426], 672 | ], 673 | [ 674 | [-0.84051266, 0.79823476, 0.2532903], 675 | [-0.76351106, 0.90984505, 1.331635], 676 | [-1.1300149, -0.8444777, -2.2165612], 677 | [1.0166003, 0.9233805, 0.98315567], 678 | ], 679 | [ 680 | [-0.88205546, -0.3438152, -0.36559045], 681 | [0.56274384, 1.5836877, -1.2370849], 682 | [1.4234338, -0.5929535, -1.3011148], 683 | [0.84360084, 0.90161383, 0.80880517], 684 | ], 685 | ], 686 | ), 687 | atol=1e-5, 688 | ) 689 | ) 690 | 691 | self.assertTrue( 692 | ( 693 | attack.adam_epochs 694 | == np.array( 695 | [ 696 | 1, 697 | 1, 698 | 1, 699 | 1, 700 | 1, 701 | 1, 702 | 1, 703 | 1, 704 | 1, 705 | 1, 706 | 1, 707 | 1, 708 | 1, 709 | 1, 710 | 1, 711 | 2, 712 | 1, 713 | 1, 714 | 1, 715 | 1, 716 | 1, 717 | 1, 718 | 1, 719 | 1, 720 | 2, 721 | 1, 722 | 1, 723 | 1, 724 | 1, 725 | 1, 726 | 1, 727 | 1, 728 | 2, 729 | 1, 730 | 1, 731 | 1, 732 | 1, 733 | 1, 734 | 1, 735 | 1, 736 | 1, 737 | 1, 738 | 1, 739 | 1, 740 | 1, 741 | 2, 742 | 1, 743 | 1, 744 | ], 745 | ) 746 | ).all(), 747 | ) 748 | 749 | def test_get_new_prob(self): 750 | probs = self.attack.get_new_prob(self.modifier, 2) 751 | self.assertEqual(probs.shape, self.modifier.shape[1:]) 752 | 753 | # Integration Test 754 | self.assertTrue( 755 | np.allclose( 756 | probs, 757 | np.array( 758 | [ 759 | [ 760 | [0.01471687, 0.02125866, 0.02285866], 761 | [0.01471687, 0.02125866, 0.02285866], 762 | [0.00914774, 0.03038241, 0.0248071], 763 | [0.00914774, 0.03038241, 0.0248071], 764 | ], 765 | [ 766 | [0.01471687, 0.02125866, 0.02285866], 767 | [0.01471687, 0.02125866, 0.02285866], 768 | [0.00914774, 0.03038241, 0.0248071], 769 | [0.00914774, 0.03038241, 0.0248071], 770 | ], 771 | [ 772 | [0.01337715, 0.02401802, 0.02019542], 773 | [0.01337715, 0.02401802, 0.02019542], 774 | [0.02158763, 0.01400388, 0.03364644], 775 | [0.02158763, 0.01400388, 0.03364644], 776 | ], 777 | [ 778 | [0.01337715, 0.02401802, 0.02019542], 779 | [0.01337715, 0.02401802, 0.02019542], 780 | [0.02158763, 0.01400388, 0.03364644], 781 | [0.02158763, 0.01400388, 0.03364644], 782 | ], 783 | ] 784 | ), 785 | atol=1e-5, 786 | ) 787 | ) 788 | 789 | def test_resize_img(self): 790 | # Reset Only True 791 | 792 | attack = deepcopy(self.attack) 793 | new_modifier = attack.resize_img(8, 8, 3, self.modifier, 2, reset_only=True) 794 | 795 | self.assertEqual(new_modifier.shape, (1, 8, 8, 3)) 796 | 797 | self.assertEqual(attack.sample_prob.shape, np.prod(8 * 8 * 3)) 798 | 799 | # Reset Only False 800 | attack = deepcopy(self.attack) 801 | new_modifier = attack.resize_img(8, 8, 3, self.modifier, 2) 802 | 803 | self.assertEqual(new_modifier.shape, (1, 8, 8, 3)) 804 | 805 | self.assertEqual(attack.sample_prob.shape, np.prod(8 * 8 * 3)) 806 | 807 | # Integration Test 808 | self.assertTrue( 809 | np.allclose( 810 | new_modifier, 811 | np.array( 812 | [ 813 | [ 814 | [ 815 | [-0.21563086, 0.54629284, 1.0879989], 816 | [-0.20480949, 0.50297487, 1.1928097], 817 | [-0.18316671, 0.41633892, 1.4024314], 818 | [-0.16603279, 0.25864834, 0.8754347], 819 | [-0.15340771, 0.02990307, -0.38818032], 820 | [-0.22677608, 0.04001407, -1.1739203], 821 | [-0.3861379, 0.28898132, -1.4817853], 822 | [-0.46581882, 0.41346493, -1.6357177], 823 | ], 824 | [ 825 | [0.0808751, 0.2946237, 0.68155044], 826 | [0.02316307, 0.2033003, 0.7534723], 827 | [-0.09226094, 0.02065352, 0.89731616], 828 | [-0.17775872, -0.19404912, 0.53499115], 829 | [-0.23333023, -0.44080764, -0.3335028], 830 | [-0.25710666, -0.4670549, -0.8407254], 831 | [-0.24908802, -0.2727908, -0.98667693], 832 | [-0.2450787, -0.17565879, -1.0596527], 833 | ], 834 | [ 835 | [0.673887, -0.20871457, -0.1313464], 836 | [0.47910815, -0.39604884, -0.12520233], 837 | [0.0895506, -0.77071726, -0.11291426], 838 | [-0.20121056, -1.099444, -0.14589605], 839 | [-0.3931753, -1.3822291, -0.22414777], 840 | [-0.31776786, -1.481193, -0.17433581], 841 | [0.02501175, -1.396335, 0.00353971], 842 | [0.19640155, -1.3539063, 0.09247744], 843 | ], 844 | [ 845 | [0.51816654, -0.14572906, -0.34002355], 846 | [0.32536998, -0.3152582, -0.29268563], 847 | [-0.0602231, -0.6543164, -0.19800991], 848 | [-0.3734866, -1.04629, -0.25373322], 849 | [-0.61442065, -1.4911791, -0.45985574], 850 | [-0.4094141, -1.5918247, -0.23538877], 851 | [0.24153282, -1.3482264, 0.4196677], 852 | [0.56700635, -1.2264273, 0.7471959], 853 | ], 854 | [ 855 | [-0.38628626, 0.48358017, 0.05551901], 856 | [-0.4380515, 0.4456721, 0.25102246], 857 | [-0.54158205, 0.36985612, 0.6420292], 858 | [-0.6945869, -0.03458703, 0.21147956], 859 | [-0.89706624, -0.76765776, -1.0406268], 860 | [-0.5320455, -0.7989503, -1.0238843], 861 | [0.4004752, -0.12846482, 0.2617069], 862 | [0.8667356, 0.20677787, 0.9045024], 863 | ], 864 | [ 865 | [-0.8493984, 0.51272225, 0.09857011], 866 | [-0.7450356, 0.6541181, 0.24629137], 867 | [-0.5363101, 0.93690985, 0.54173374], 868 | [-0.44687366, 0.6133301, 0.01979139], 869 | [-0.4767264, -0.31662107, -1.319536], 870 | [-0.12552696, -0.35671276, -1.2570076], 871 | [0.60672456, 0.493055, 0.20737618], 872 | [0.9728504, 0.9179388, 0.93956804], 873 | ], 874 | [ 875 | [-0.87116975, -0.05830276, -0.21087027], 876 | [-0.5955823, 0.3100797, -0.3068789], 877 | [-0.04440734, 1.0468445, -0.49889636], 878 | [0.369653, 0.89746165, -0.8287977], 879 | [0.6465987, -0.1380692, -1.2965835], 880 | [0.8101414, -0.26511204, -0.9347591], 881 | [0.8602809, 0.51633304, 0.25667554], 882 | [0.8853507, 0.9070555, 0.8523928], 883 | ], 884 | [ 885 | [-0.88205546, -0.3438152, -0.36559045], 886 | [-0.52085567, 0.13806051, -0.583464], 887 | [0.20154402, 1.1018119, -1.0192113], 888 | [0.7779163, 1.0395274, -1.2530923], 889 | [1.2082613, -0.04879326, -1.2851074], 890 | [1.2779756, -0.21931165, -0.7736348], 891 | [0.98705906, 0.52797204, 0.28132522], 892 | [0.84160084, 0.90161383, 0.80880517], 893 | ], 894 | ] 895 | ] 896 | ), 897 | atol=1e-5, 898 | ) 899 | ) 900 | 901 | self.assertTrue( 902 | np.allclose( 903 | attack.sample_prob, 904 | np.array( 905 | [ 906 | 0.00367922, 907 | 0.00531467, 908 | 0.00571467, 909 | 0.00367922, 910 | 0.00531467, 911 | 0.00571467, 912 | 0.00367922, 913 | 0.00531467, 914 | 0.00571467, 915 | 0.00367922, 916 | 0.00531467, 917 | 0.00571467, 918 | 0.00228693, 919 | 0.0075956, 920 | 0.00620178, 921 | 0.00228693, 922 | 0.0075956, 923 | 0.00620178, 924 | 0.00228693, 925 | 0.0075956, 926 | 0.00620178, 927 | 0.00228693, 928 | 0.0075956, 929 | 0.00620178, 930 | 0.00367922, 931 | 0.00531467, 932 | 0.00571467, 933 | 0.00367922, 934 | 0.00531467, 935 | 0.00571467, 936 | 0.00367922, 937 | 0.00531467, 938 | 0.00571467, 939 | 0.00367922, 940 | 0.00531467, 941 | 0.00571467, 942 | 0.00228693, 943 | 0.0075956, 944 | 0.00620178, 945 | 0.00228693, 946 | 0.0075956, 947 | 0.00620178, 948 | 0.00228693, 949 | 0.0075956, 950 | 0.00620178, 951 | 0.00228693, 952 | 0.0075956, 953 | 0.00620178, 954 | 0.00367922, 955 | 0.00531467, 956 | 0.00571467, 957 | 0.00367922, 958 | 0.00531467, 959 | 0.00571467, 960 | 0.00367922, 961 | 0.00531467, 962 | 0.00571467, 963 | 0.00367922, 964 | 0.00531467, 965 | 0.00571467, 966 | 0.00228693, 967 | 0.0075956, 968 | 0.00620178, 969 | 0.00228693, 970 | 0.0075956, 971 | 0.00620178, 972 | 0.00228693, 973 | 0.0075956, 974 | 0.00620178, 975 | 0.00228693, 976 | 0.0075956, 977 | 0.00620178, 978 | 0.00367922, 979 | 0.00531467, 980 | 0.00571467, 981 | 0.00367922, 982 | 0.00531467, 983 | 0.00571467, 984 | 0.00367922, 985 | 0.00531467, 986 | 0.00571467, 987 | 0.00367922, 988 | 0.00531467, 989 | 0.00571467, 990 | 0.00228693, 991 | 0.0075956, 992 | 0.00620178, 993 | 0.00228693, 994 | 0.0075956, 995 | 0.00620178, 996 | 0.00228693, 997 | 0.0075956, 998 | 0.00620178, 999 | 0.00228693, 1000 | 0.0075956, 1001 | 0.00620178, 1002 | 0.00334429, 1003 | 0.00600451, 1004 | 0.00504886, 1005 | 0.00334429, 1006 | 0.00600451, 1007 | 0.00504886, 1008 | 0.00334429, 1009 | 0.00600451, 1010 | 0.00504886, 1011 | 0.00334429, 1012 | 0.00600451, 1013 | 0.00504886, 1014 | 0.00539691, 1015 | 0.00350097, 1016 | 0.00841161, 1017 | 0.00539691, 1018 | 0.00350097, 1019 | 0.00841161, 1020 | 0.00539691, 1021 | 0.00350097, 1022 | 0.00841161, 1023 | 0.00539691, 1024 | 0.00350097, 1025 | 0.00841161, 1026 | 0.00334429, 1027 | 0.00600451, 1028 | 0.00504886, 1029 | 0.00334429, 1030 | 0.00600451, 1031 | 0.00504886, 1032 | 0.00334429, 1033 | 0.00600451, 1034 | 0.00504886, 1035 | 0.00334429, 1036 | 0.00600451, 1037 | 0.00504886, 1038 | 0.00539691, 1039 | 0.00350097, 1040 | 0.00841161, 1041 | 0.00539691, 1042 | 0.00350097, 1043 | 0.00841161, 1044 | 0.00539691, 1045 | 0.00350097, 1046 | 0.00841161, 1047 | 0.00539691, 1048 | 0.00350097, 1049 | 0.00841161, 1050 | 0.00334429, 1051 | 0.00600451, 1052 | 0.00504886, 1053 | 0.00334429, 1054 | 0.00600451, 1055 | 0.00504886, 1056 | 0.00334429, 1057 | 0.00600451, 1058 | 0.00504886, 1059 | 0.00334429, 1060 | 0.00600451, 1061 | 0.00504886, 1062 | 0.00539691, 1063 | 0.00350097, 1064 | 0.00841161, 1065 | 0.00539691, 1066 | 0.00350097, 1067 | 0.00841161, 1068 | 0.00539691, 1069 | 0.00350097, 1070 | 0.00841161, 1071 | 0.00539691, 1072 | 0.00350097, 1073 | 0.00841161, 1074 | 0.00334429, 1075 | 0.00600451, 1076 | 0.00504886, 1077 | 0.00334429, 1078 | 0.00600451, 1079 | 0.00504886, 1080 | 0.00334429, 1081 | 0.00600451, 1082 | 0.00504886, 1083 | 0.00334429, 1084 | 0.00600451, 1085 | 0.00504886, 1086 | 0.00539691, 1087 | 0.00350097, 1088 | 0.00841161, 1089 | 0.00539691, 1090 | 0.00350097, 1091 | 0.00841161, 1092 | 0.00539691, 1093 | 0.00350097, 1094 | 0.00841161, 1095 | 0.00539691, 1096 | 0.00350097, 1097 | 0.00841161, 1098 | ] 1099 | ), 1100 | atol=1e-5, 1101 | ) 1102 | ) 1103 | 1104 | def test_single_step(self): 1105 | 1106 | # Random Without Importance and init size reduce 1107 | attack = ZooAttack( 1108 | model=self.model, 1109 | config=self.config, 1110 | input_image_shape=self.orig_img.shape[1:], 1111 | device="cpu:0", 1112 | ) 1113 | attack.config["use_importance"] = False 1114 | attack.config["init_size"] = 2 1115 | modifier = deepcopy(self.modifier) 1116 | 1117 | ( 1118 | total_loss, 1119 | l2_loss, 1120 | confidence_loss, 1121 | model_output, 1122 | new_img, 1123 | ) = attack.single_step( 1124 | modifier, 1125 | self.orig_img, 1126 | self.labels, 1127 | self.config["initial_const"], 1128 | max_pooling_ratio=2, 1129 | ) 1130 | 1131 | self.assertFalse(np.allclose(modifier, self.modifier, atol=1e-5)) 1132 | 1133 | self.assertEqual(new_img.shape, self.modifier.shape[1:]) 1134 | 1135 | # With Custom Indices 1136 | attack = deepcopy(self.attack) 1137 | modifier = deepcopy(self.modifier) 1138 | indices = [15, 24, 32, 45] 1139 | ( 1140 | total_loss, 1141 | l2_loss, 1142 | confidence_loss, 1143 | model_output, 1144 | new_img, 1145 | ) = attack.single_step( 1146 | modifier, 1147 | self.orig_img, 1148 | self.labels, 1149 | self.config["initial_const"], 1150 | var_indice=indices, 1151 | max_pooling_ratio=2, 1152 | ) 1153 | 1154 | self.assertFalse(np.allclose(modifier, self.modifier, atol=1e-5)) 1155 | 1156 | self.assertEqual(new_img.shape, self.modifier.shape[1:]) 1157 | 1158 | def test_attack(self): 1159 | attack = deepcopy(self.attack) 1160 | orig_img = deepcopy(self.orig_img[0].numpy()) 1161 | orig_img /= 10 * np.max(orig_img) 1162 | labels = self.labels[0].numpy() 1163 | outer_best_adv, outer_best_const = attack.attack( 1164 | orig_img, labels, max_pooling_ratio=2 1165 | ) 1166 | 1167 | self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:]) 1168 | 1169 | # Without x10 1170 | attack = deepcopy(self.attack) 1171 | orig_img = deepcopy(self.orig_img[0].numpy()) 1172 | orig_img /= 100 * np.max(orig_img) 1173 | outer_best_adv, outer_best_const = attack.attack( 1174 | orig_img, labels, max_pooling_ratio=2 1175 | ) 1176 | 1177 | self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:]) 1178 | 1179 | # With modifier init 1180 | attack = deepcopy(self.attack) 1181 | outer_best_adv, outer_best_const = attack.attack( 1182 | orig_img, 1183 | labels, 1184 | modifier_init=self.modifier[0], 1185 | max_pooling_ratio=2, 1186 | ) 1187 | 1188 | self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:]) 1189 | 1190 | # With use resize and untargeted and max iterations 10k 1191 | attack = deepcopy(self.attack) 1192 | attack.config["use_resize"] = True 1193 | attack.config["resize_iter_1"] = 20 1194 | attack.config["resize_iter_2"] = 80 1195 | attack.config["abort_early"] = False 1196 | attack.config["targeted"] = False 1197 | 1198 | orig_img = deepcopy(self.orig_img[0].numpy()) 1199 | orig_img /= 10 * np.max(orig_img) 1200 | outer_best_adv, outer_best_const = attack.attack( 1201 | orig_img, labels, max_pooling_ratio=2 1202 | ) 1203 | self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:]) 1204 | 1205 | # Without tanh 1206 | attack = deepcopy(self.attack) 1207 | attack.config["use_tanh"] = False 1208 | outer_best_adv, outer_best_const = attack.attack( 1209 | orig_img, labels, max_pooling_ratio=2 1210 | ) 1211 | self.assertEqual(outer_best_adv.shape, self.modifier.shape[1:]) 1212 | -------------------------------------------------------------------------------- /tests/test_common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/test_common/__init__.py -------------------------------------------------------------------------------- /tests/test_common/test_text/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/test_common/test_text/__init__.py -------------------------------------------------------------------------------- /tests/test_common/test_text/test_utils/test_perturbations.py: -------------------------------------------------------------------------------- 1 | import random 2 | import unittest 3 | 4 | from parameterized import parameterized_class 5 | 6 | from code_soup.common.text import perturbations 7 | 8 | WHITE_SPACE_EXAMPLE = "is wrong" 9 | 10 | 11 | @parameterized_class( 12 | ("word", "expected_result"), [("Bob", "Bb"), ("Hey there", "Hey there"), ("H", "H")] 13 | ) 14 | class TestPerturbDeleteParameterized(unittest.TestCase): 15 | """Perturb Delete Parameterized TestCase 16 | Args: ("word", "expected_result") 17 | """ 18 | 19 | def setUp(self): 20 | random.seed(42) 21 | self.delete_perturbations = perturbations.DeleteCharacterPerturbations() 22 | 23 | def test_output(self): 24 | self.assertEqual( 25 | self.delete_perturbations.apply(self.word), self.expected_result 26 | ) 27 | 28 | 29 | class TestPerturbDeleteUnparameterized(unittest.TestCase): 30 | def setUp(self): 31 | random.seed(42) 32 | self.delete_perturbations = perturbations.DeleteCharacterPerturbations() 33 | 34 | def test_perturb_delete_with_character_size_less_than_three(self): 35 | with self.assertRaises(AssertionError): 36 | self.delete_perturbations.apply("To", ignore=False) 37 | 38 | def test_perturb_delete_with_whitespace(self): 39 | with self.assertRaises(AssertionError): 40 | self.delete_perturbations.apply(WHITE_SPACE_EXAMPLE, ignore=False) 41 | 42 | 43 | @parameterized_class( 44 | ("word", "expected_result"), 45 | [("Bob", "B ob"), ("Hey there", "Hey there"), ("H", "H")], 46 | ) 47 | class TestPerturbInsertSpaceParameterized(unittest.TestCase): 48 | def setUp(self): 49 | random.seed(42) 50 | self.space_perturb = perturbations.InsertSpaceCharacterPerturbations() 51 | 52 | def test_output(self): 53 | self.assertEqual(self.space_perturb.apply(self.word), self.expected_result) 54 | 55 | 56 | class TestPerturbInsertSpaceUnparameterized(unittest.TestCase): 57 | def setUp(self): 58 | random.seed(42) 59 | self.space_perturb = perturbations.InsertSpaceCharacterPerturbations() 60 | 61 | def test_perturb_insert_space_with_whitespace(self): 62 | with self.assertRaises(AssertionError): 63 | self.space_perturb.apply(WHITE_SPACE_EXAMPLE, ignore=False) 64 | 65 | def test_perturb_insert_space_with_character_size_less_than_two(self): 66 | with self.assertRaises(AssertionError): 67 | self.space_perturb.apply("H", ignore=False) 68 | 69 | 70 | @parameterized_class( 71 | ("word", "expected_result"), 72 | [("hello", "hellzo"), ("Hey there", "Hey there"), ("H", "H")], 73 | ) 74 | class TestPerturbInsertCharacter(unittest.TestCase): 75 | def setUp(self): 76 | random.seed(30) 77 | self.char_perturb = perturbations.InsertSpaceCharacterPerturbations() 78 | 79 | def test_output(self): 80 | self.assertEqual( 81 | self.char_perturb.apply(self.word, char_perturb=True), self.expected_result 82 | ) 83 | 84 | 85 | @parameterized_class(("word", "expected_result"), [("THAT", "TAHT")]) 86 | class TestPerturbShuffleSwapTwo(unittest.TestCase): 87 | def setUp(self): 88 | random.seed(0) 89 | self.shuffle_perturbations = perturbations.ShuffleCharacterPerturbations() 90 | 91 | def test_output(self): 92 | self.assertEqual( 93 | self.shuffle_perturbations.apply(self.word, mid=False), self.expected_result 94 | ) 95 | 96 | 97 | @parameterized_class( 98 | ("word", "expected_result"), [("Adversarial", "Aiavrsedarl"), ("dog", "dog")] 99 | ) 100 | class TestPerturbShuffleMiddle(unittest.TestCase): 101 | def setUp(self): 102 | random.seed(0) 103 | self.shuffle_perturbations = perturbations.ShuffleCharacterPerturbations() 104 | 105 | def test_output(self): 106 | self.assertEqual( 107 | self.shuffle_perturbations.apply(self.word), self.expected_result 108 | ) 109 | 110 | 111 | class TestPerturbShuffleUnparameterized(unittest.TestCase): 112 | def setUp(self): 113 | self.shuffle_perturbations = perturbations.ShuffleCharacterPerturbations() 114 | 115 | def test_perturb_shuffle_with_character_size_less_than_four(self): 116 | with self.assertRaises(AssertionError): 117 | self.shuffle_perturbations.apply("Ton", ignore=False) 118 | 119 | def test_perturb_shuffle_with_whitespace(self): 120 | with self.assertRaises(AssertionError): 121 | self.shuffle_perturbations.apply(WHITE_SPACE_EXAMPLE, ignore=False) 122 | 123 | 124 | @parameterized_class( 125 | ("word", "expected_result"), [("Noise", "Noixe"), ("Hi there", "Hi there")] 126 | ) 127 | class TestPerturbTypo(unittest.TestCase): 128 | def setUp(self): 129 | random.seed(0) 130 | self.typo_perturbations = perturbations.TypoCharacterPerturbations() 131 | 132 | def test_output(self): 133 | self.assertEqual(self.typo_perturbations.apply(self.word), self.expected_result) 134 | 135 | 136 | class TestPerturbTypoWithWhitespace(unittest.TestCase): 137 | def setUp(self): 138 | self.typo_perturbations = perturbations.TypoCharacterPerturbations() 139 | 140 | def test_perturb_shuffle_with_whitespace(self): 141 | with self.assertRaises(AssertionError): 142 | self.typo_perturbations.apply( 143 | WHITE_SPACE_EXAMPLE, probability=0.1, ignore=False 144 | ) 145 | 146 | 147 | @parameterized_class( 148 | ("word", "expected_result"), 149 | [("adversarial", "âd́v̕e̕r̕s̐a̕r̂i̅a̒ĺ"), ("Hi there", "Hi there")], 150 | ) 151 | class TestPerturbUnicode(unittest.TestCase): 152 | def setUp(self): 153 | self.viz = perturbations.VisuallySimilarCharacterPerturbations( 154 | "unicode", "homoglyph" 155 | ) 156 | 157 | def test_output(self): 158 | self.assertEqual(self.viz.apply(self.word, 0), self.expected_result) 159 | 160 | 161 | @parameterized_class( 162 | ("word", "expected_result"), 163 | [("adversarial", "𝓪𝓭ꮩ𝑒𝓇s𝖺rꙇa1"), ("Hi there", "Hi there")], 164 | ) 165 | class TestPerturbHomoglyph(unittest.TestCase): 166 | def setUp(self): 167 | self.viz = perturbations.VisuallySimilarCharacterPerturbations( 168 | "unicode", "homoglyph" 169 | ) 170 | 171 | def test_output(self): 172 | self.assertEqual(self.viz.apply(self.word, 1), self.expected_result) 173 | -------------------------------------------------------------------------------- /tests/test_common/test_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from tests.test_common.test_utils.test_checkpoints import TestCheckpoints 2 | from tests.test_common.test_utils.test_seeding import TestSeeding 3 | -------------------------------------------------------------------------------- /tests/test_common/test_utils/test_checkpoints.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | import torch.optim as optim 8 | import torchvision.models as models 9 | 10 | from code_soup.common.utils import Checkpoints 11 | 12 | 13 | class TheModelClass(nn.Module): 14 | """ 15 | Model class for tests 16 | """ 17 | 18 | def __init__(self): 19 | super(TheModelClass, self).__init__() 20 | self.dense = nn.Linear(2, 1) 21 | self.activation = nn.Sigmoid() 22 | 23 | def forward(self, x): 24 | return self.activation(self.dense(x)) 25 | 26 | 27 | class TestCheckpoints(unittest.TestCase): 28 | def test_save(self): 29 | """ 30 | Test that the model is saved 31 | """ 32 | model_save = TheModelClass() 33 | optimizer = optim.SGD(model_save.parameters(), lr=0.01, momentum=0.9) 34 | loss = 0.5 35 | epoch = 10 36 | Checkpoints.save( 37 | "tests/test_common/test_utils/test_model.pth", 38 | model_save, 39 | optimizer, 40 | epoch, 41 | loss, 42 | ) 43 | self.assertTrue(os.path.isfile("tests/test_common/test_utils/test_model.pth")) 44 | os.remove("tests/test_common/test_utils/test_model.pth") 45 | 46 | def test_load(self): 47 | """ 48 | Test that the model is loaded 49 | """ 50 | model = TheModelClass() 51 | optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) 52 | loss = 0.5 53 | epoch = 10 54 | Checkpoints.save( 55 | "tests/test_common/test_utils/test_model.pth", 56 | model, 57 | optimizer, 58 | epoch, 59 | loss, 60 | ) 61 | model_load = Checkpoints.load("tests/test_common/test_utils/test_model.pth") 62 | self.assertEqual(list(model.state_dict()), list(model_load.state_dict())) 63 | os.remove("tests/test_common/test_utils/test_model.pth") 64 | -------------------------------------------------------------------------------- /tests/test_common/test_utils/test_seeding.py: -------------------------------------------------------------------------------- 1 | import random 2 | import unittest 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from code_soup.common.utils import Seeding 8 | 9 | 10 | class TestSeeding(unittest.TestCase): 11 | """Test the seed function.""" 12 | 13 | def test_seed(self): 14 | """Test that the seed is set.""" 15 | random.seed(42) 16 | initial_state = random.getstate() 17 | Seeding.seed(42) 18 | final_state = random.getstate() 19 | self.assertEqual(initial_state, final_state) 20 | self.assertEqual(np.random.get_state()[1][0], 42) 21 | self.assertEqual(torch.get_rng_state().tolist()[0], 42) 22 | -------------------------------------------------------------------------------- /tests/test_common/test_vision/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adversarial-Deep-Learning/code-soup/191a0a9e2237202472e8ba2bd69db04a6ba7b1ff/tests/test_common/test_vision/__init__.py -------------------------------------------------------------------------------- /tests/test_common/test_vision/test_datasets/test_image_classification.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torchvision 4 | from parameterized import parameterized_class 5 | from torch.utils.data import DataLoader 6 | from torchvision import transforms 7 | 8 | from code_soup.common.vision.datasets import ImageClassificationDataset 9 | 10 | 11 | @parameterized_class( 12 | ("dataset_class", "expected_size", "expected_label"), 13 | [ 14 | (torchvision.datasets.MNIST, (16, 1, 28, 28), 5), 15 | (torchvision.datasets.CIFAR10, (12, 3, 32, 32), 6), 16 | ], 17 | ) 18 | class TestVisionDataset(unittest.TestCase): 19 | """Vision Dataset Parameterized TestCase 20 | 21 | Args: ("dataset_class", "expected_size", "expected_label") 22 | """ 23 | 24 | def setUp(self): 25 | self.TestDataset = ImageClassificationDataset( 26 | self.dataset_class, transform=transforms.Compose([transforms.ToTensor()]) 27 | ) 28 | self.TestDatasetLoader = DataLoader( 29 | self.TestDataset, batch_size=self.expected_size[0], shuffle=False 30 | ) 31 | self.samples = next(iter(self.TestDatasetLoader)) 32 | 33 | def test_image_tensor_dimensions(self): 34 | self.assertTupleEqual(self.samples[0].size(), self.expected_size) 35 | 36 | def test_image_label_correctness(self): 37 | self.assertEqual(self.samples[1][0], self.expected_label) 38 | -------------------------------------------------------------------------------- /tests/test_common/test_vision/test_models/__init__.py: -------------------------------------------------------------------------------- 1 | from tests.test_common.test_vision.test_models.test_allconv import TestAllConvNet 2 | from tests.test_common.test_vision.test_models.test_nin import TestNIN 3 | -------------------------------------------------------------------------------- /tests/test_common/test_vision/test_models/test_allconv.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.common.vision.models import AllConvNet 7 | 8 | 9 | class TestAllConvNet(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | cls.model = AllConvNet( 13 | image_size=96, n_classes=1, device=torch.device("cpu"), lr=0.01 14 | ) 15 | 16 | def test_step(self): 17 | self.model.step([torch.randn(96, 96, 3, 3), torch.ones(4)]) 18 | -------------------------------------------------------------------------------- /tests/test_common/test_vision/test_models/test_nin.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.common.vision.models import NIN 7 | 8 | 9 | class TestNIN(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | cls.model = NIN(input_size=3, n_classes=1, device=torch.device("cpu"), lr=0.01) 13 | 14 | def test_step(self): 15 | self.model.step([torch.randn(3, 3, 3, 5), torch.ones(4)]) 16 | -------------------------------------------------------------------------------- /tests/test_common/test_vision/test_models/test_simple_cnn_classifier.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from code_soup.common.vision.models import SimpleCnnClassifier 7 | 8 | 9 | class TestSimpleCnnClassifier(unittest.TestCase): 10 | @classmethod 11 | def setUpClass(cls) -> None: 12 | cls.model = SimpleCnnClassifier(input_shape=(3, 32, 32), num_labels=10) 13 | 14 | def test_step(self): 15 | model_input = torch.randn(5, 3, 32, 32) 16 | model_output = self.model(model_input) 17 | self.assertEqual(model_output.shape, torch.Size([5, 10])) 18 | --------------------------------------------------------------------------------