├── .github └── workflows │ ├── docs.yml │ ├── lint.yml │ ├── readthedocs-pr.yaml │ ├── release.yml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── _static └── assets │ └── logos │ ├── FundedbytheEU.png │ ├── elsa.jpg │ └── sec4AI4sec.png ├── assets └── logos │ ├── FundedbytheEU.png │ ├── elsa.jpg │ └── sec4AI4sec.png ├── conftest.py ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── _static │ └── assets │ │ └── logos │ │ ├── FundedbytheEU.png │ │ ├── elsa.jpg │ │ └── sec4AI4sec.png │ ├── conf.py │ ├── contributing_link.md │ ├── index.rst │ ├── modules.rst │ ├── readme_link.md │ ├── secmlt.adv.evasion.advlib_attacks.rst │ ├── secmlt.adv.evasion.aggregators.rst │ ├── secmlt.adv.evasion.foolbox_attacks.rst │ ├── secmlt.adv.evasion.rst │ ├── secmlt.adv.poisoning.rst │ ├── secmlt.adv.rst │ ├── secmlt.data.rst │ ├── secmlt.manipulations.rst │ ├── secmlt.metrics.rst │ ├── secmlt.models.data_processing.rst │ ├── secmlt.models.pytorch.rst │ ├── secmlt.models.rst │ ├── secmlt.optimization.rst │ ├── secmlt.rst │ ├── secmlt.tests.rst │ ├── secmlt.trackers.rst │ └── secmlt.utils.rst ├── examples ├── backdoor_example.py ├── label_flipping_example.py ├── loaders │ └── get_loaders.py ├── mnist_example.py ├── mnist_example_random_inits.py ├── mnist_example_sequential.py ├── mnist_example_tensorboard.py ├── models │ └── mnist_net.py ├── run_evasion_attack.py └── train_model.py ├── pyproject.toml ├── requirements-dev.txt ├── requirements-test.txt ├── requirements.txt ├── ruff.toml ├── setup.py └── src ├── __init__.py └── secmlt ├── VERSION ├── __init__.py ├── adv ├── __init__.py ├── backends.py ├── evasion │ ├── __init__.py │ ├── advlib_attacks │ │ ├── __init__.py │ │ ├── advlib_base.py │ │ └── advlib_pgd.py │ ├── aggregators │ │ ├── __init__.py │ │ └── ensemble.py │ ├── base_evasion_attack.py │ ├── foolbox_attacks │ │ ├── __init__.py │ │ ├── foolbox_base.py │ │ └── foolbox_pgd.py │ ├── modular_attack.py │ ├── perturbation_models.py │ └── pgd.py └── poisoning │ ├── __init__.py │ ├── backdoor.py │ └── base_data_poisoning.py ├── data ├── __init__.py ├── distributions.py └── lp_uniform_sampling.py ├── manipulations ├── __init__.py └── manipulation.py ├── metrics ├── __init__.py └── classification.py ├── models ├── __init__.py ├── base_model.py ├── base_trainer.py ├── data_processing │ ├── __init__.py │ ├── data_processing.py │ └── identity_data_processing.py └── pytorch │ ├── __init__.py │ ├── base_pytorch_nn.py │ └── base_pytorch_trainer.py ├── optimization ├── __init__.py ├── constraints.py ├── gradient_processing.py ├── initializer.py ├── optimizer_factory.py └── random_perturb.py ├── tests ├── __init__.py ├── fixtures.py ├── mocks.py ├── test_aggregators.py ├── test_attacks.py ├── test_backdoors.py ├── test_constants.py ├── test_constraints.py ├── test_data.py ├── test_manipulations.py ├── test_metrics.py ├── test_trackers.py ├── test_trainer.py └── test_utils.py ├── trackers ├── __init__.py ├── image_trackers.py ├── tensorboard_tracker.py └── trackers.py └── utils ├── __init__.py └── tensor_utils.py /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Render documentation 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | FORCE_COLOR: "1" 17 | 18 | jobs: 19 | build: 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | - name: Set up Python 3.10 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: '3.10' 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | python -m pip install -r docs/requirements.txt 32 | - name: Render the documentation 33 | run: > 34 | sphinx-build 35 | -M html ./docs/source ./docs/build 36 | -vv 37 | --jobs=auto 38 | --keep-going 39 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Ruff 2 | on: [push, pull_request] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: chartboost/ruff-action@v1 9 | -------------------------------------------------------------------------------- /.github/workflows/readthedocs-pr.yaml: -------------------------------------------------------------------------------- 1 | name: readthedocs/actions 2 | on: 3 | pull_request_target: 4 | types: 5 | - opened 6 | 7 | permissions: 8 | pull-requests: write 9 | 10 | jobs: 11 | documentation-links: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | - uses: readthedocs/actions/preview@v1 17 | with: 18 | project-slug: "secml-torch" 19 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'src/secmlt/VERSION' 9 | release: 10 | types: 11 | - created 12 | 13 | jobs: 14 | build: 15 | name: Build distribution 📦 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Set up Python 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: "3.x" 24 | - name: Install pypa/build 25 | run: >- 26 | python3 -m 27 | pip install 28 | build 29 | --user 30 | - name: Build a binary wheel and a source tarball 31 | run: python3 -m build 32 | - name: Store the distribution packages 33 | uses: actions/upload-artifact@v4 34 | with: 35 | name: python-package-distributions 36 | path: dist/ 37 | 38 | publish-to-pypi: 39 | name: >- 40 | Publish Python 🐍 distribution 📦 to PyPI 41 | if: github.event_name == 'release' 42 | needs: 43 | - build 44 | runs-on: ubuntu-latest 45 | environment: 46 | name: release 47 | url: https://pypi.org/p/secml-torch 48 | permissions: 49 | id-token: write # IMPORTANT: mandatory for trusted publishing 50 | 51 | steps: 52 | - name: Download all the dists 53 | uses: actions/download-artifact@v4 54 | with: 55 | name: python-package-distributions 56 | path: dist/ 57 | - name: Publish distribution 📦 to PyPI 58 | uses: pypa/gh-action-pypi-publish@release/v1 59 | 60 | github-release: 61 | if: github.event_name == 'release' 62 | name: >- 63 | Sign the Python 🐍 distribution 📦 with Sigstore 64 | and upload them to GitHub Release 65 | needs: 66 | - publish-to-pypi 67 | runs-on: ubuntu-latest 68 | 69 | permissions: 70 | contents: write # IMPORTANT: mandatory for making GitHub Releases 71 | id-token: write # IMPORTANT: mandatory for sigstore 72 | 73 | steps: 74 | - name: Download all the dists 75 | uses: actions/download-artifact@v4 76 | with: 77 | name: python-package-distributions 78 | path: dist/ 79 | - name: Sign the dists with Sigstore 80 | uses: sigstore/gh-action-sigstore-python@v3.0.0 81 | with: 82 | inputs: >- 83 | ./dist/*.tar.gz 84 | ./dist/*.whl 85 | - name: Upload artifact signatures to GitHub Release 86 | env: 87 | GITHUB_TOKEN: ${{ github.token }} 88 | # Upload to GitHub Release using the `gh` CLI. 89 | # `dist/` contains the built packages, and the 90 | # sigstore-produced signatures and certificates. 91 | run: >- 92 | gh release upload 93 | '${{ github.ref_name }}' dist/** 94 | --repo '${{ github.repository }}' 95 | 96 | publish-to-testpypi: 97 | name: Publish Python 🐍 distribution 📦 to TestPyPI 98 | if: github.event_name != 'release' 99 | needs: 100 | - build 101 | runs-on: ubuntu-latest 102 | 103 | environment: 104 | name: release-test 105 | url: https://test.pypi.org/p/secml-torch 106 | 107 | permissions: 108 | id-token: write # IMPORTANT: mandatory for trusted publishing 109 | 110 | steps: 111 | - name: Download all the dists 112 | uses: actions/download-artifact@v4 113 | with: 114 | name: python-package-distributions 115 | path: dist/ 116 | - name: Publish distribution 📦 to TestPyPI 117 | uses: pypa/gh-action-pypi-publish@release/v1 118 | with: 119 | repository-url: https://test.pypi.org/legacy/ 120 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Test and codecov report 2 | on: [push, pull_request] 3 | jobs: 4 | run: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - name: Checkout 8 | uses: actions/checkout@v4 9 | with: 10 | fetch-depth: 0 11 | - name: Set up Python 3.10 12 | uses: actions/setup-python@v4 13 | with: 14 | python-version: '3.10' 15 | - name: Install dependencies 16 | run: pip install -r requirements.txt 17 | - name: Install test requirements 18 | run: pip install -r requirements-test.txt 19 | - name: Run tests and collect coverage 20 | run: pytest --cov src/ 21 | - name: Upload coverage reports to Codecov 22 | uses: codecov/codecov-action@v4.0.1 23 | with: 24 | token: ${{ secrets.CODECOV_TOKEN }} 25 | slug: pralab/secml-torch 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | 3 | # User-specific stuff 4 | .idea/**/workspace.xml 5 | .idea/**/tasks.xml 6 | .idea/**/usage.statistics.xml 7 | .idea/**/dictionaries 8 | .idea/**/shelf 9 | 10 | # Sensitive or high-churn files 11 | .idea/**/dataSources/ 12 | .idea/**/dataSources.ids 13 | .idea/**/dataSources.local.xml 14 | .idea/**/sqlDataSources.xml 15 | .idea/**/dynamic.xml 16 | .idea/**/uiDesigner.xml 17 | .idea/**/dbnavigator.xml 18 | 19 | # Gradle 20 | .idea/**/gradle.xml 21 | .idea/**/libraries 22 | 23 | # Gradle and Maven with auto-import 24 | # When using Gradle or Maven with auto-import, you should exclude module files, 25 | # since they will be recreated, and may cause churn. Uncomment if using 26 | # auto-import. 27 | # .idea/modules.xml 28 | # .idea/*.iml 29 | # .idea/modules 30 | 31 | # CMake 32 | cmake-build-*/ 33 | 34 | # Mongo Explorer plugin 35 | .idea/**/mongoSettings.xml 36 | 37 | # File-based project format 38 | *.iws 39 | 40 | # IntelliJ 41 | out/ 42 | 43 | # mpeltonen/sbt-idea plugin 44 | .idea_modules/ 45 | 46 | # JIRA plugin 47 | atlassian-ide-plugin.xml 48 | 49 | # Cursive Clojure plugin 50 | .idea/replstate.xml 51 | 52 | # Crashlytics plugin (for Android Studio and IntelliJ) 53 | com_crashlytics_export_strings.xml 54 | crashlytics.properties 55 | crashlytics-build.properties 56 | fabric.properties 57 | 58 | # Editor-based Rest Client 59 | .idea/httpRequests 60 | ### Python template 61 | # Byte-compiled / optimized / DLL files 62 | __pycache__/ 63 | *.py[cod] 64 | *$py.class 65 | 66 | # C extensions 67 | *.so 68 | 69 | # Distribution / packaging 70 | .Python 71 | build/ 72 | develop-eggs/ 73 | dist/ 74 | downloads/ 75 | eggs/ 76 | .eggs/ 77 | lib/ 78 | lib64/ 79 | parts/ 80 | sdist/ 81 | var/ 82 | wheels/ 83 | *.egg-info/ 84 | .installed.cfg 85 | *.egg 86 | MANIFEST 87 | 88 | # PyInstaller 89 | # Usually these files are written by a python script from a template 90 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 91 | *.manifest 92 | *.spec 93 | 94 | # Installer logs 95 | pip-log.txt 96 | pip-delete-this-directory.txt 97 | 98 | # Unit test / coverage reports 99 | htmlcov/ 100 | .tox/ 101 | .coverage 102 | .coverage.* 103 | .cache 104 | nosetests.xml 105 | coverage.xml 106 | *.cover 107 | .hypothesis/ 108 | .pytest_cache/ 109 | pytest-report.xml 110 | 111 | # Translations 112 | *.mo 113 | *.pot 114 | 115 | # Django stuff: 116 | *.log 117 | local_settings.py 118 | db.sqlite3 119 | 120 | # Flask stuff: 121 | instance/ 122 | .webassets-cache 123 | 124 | # Scrapy stuff: 125 | .scrapy 126 | 127 | # Sphinx documentation 128 | docs/build/ 129 | 130 | # PyBuilder 131 | target/ 132 | 133 | # Jupyter Notebook 134 | .ipynb_checkpoints 135 | 136 | # pyenv 137 | .python-version 138 | 139 | # celery beat schedule file 140 | celerybeat-schedule 141 | 142 | # SageMath parsed files 143 | *.sage.py 144 | 145 | # Environments 146 | .env 147 | .venv 148 | env/ 149 | venv/ 150 | ENV/ 151 | env.bak/ 152 | venv.bak/ 153 | devenv/ 154 | 155 | # Spyder project settings 156 | .spyderproject 157 | .spyproject 158 | 159 | # Rope project settings 160 | .ropeproject 161 | 162 | # mkdocs documentation 163 | /site 164 | 165 | # mypy 166 | .mypy_cache/ 167 | 168 | ### VirtualEnv template 169 | # Virtualenv 170 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 171 | [Bb]in 172 | [Ii]nclude 173 | [Ll]ib 174 | [Ll]ib64 175 | [Ll]ocal 176 | [Ss]cripts 177 | pyvenv.cfg 178 | pip-selfcheck.json 179 | 180 | ### macOS template 181 | # General 182 | .DS_Store 183 | .AppleDouble 184 | .LSOverride 185 | 186 | # Icon must end with two \r 187 | Icon 188 | 189 | # Thumbnails 190 | ._* 191 | 192 | # Files that might appear in the root of a volume 193 | .DocumentRevisions-V100 194 | .fseventsd 195 | .Spotlight-V100 196 | .TemporaryItems 197 | .Trashes 198 | .VolumeIcon.icns 199 | .com.apple.timemachine.donotpresent 200 | 201 | # Directories potentially created on remote AFP share 202 | .AppleDB 203 | .AppleDesktop 204 | Network Trash Folder 205 | Temporary Items 206 | .apdisk 207 | 208 | ### Linux template 209 | *~ 210 | 211 | # temporary files which can be created if a process still has a handle open of a deleted file 212 | .fuse_hidden* 213 | 214 | # KDE directory preferences 215 | .directory 216 | 217 | # Linux trash folder which might appear on any partition or disk 218 | .Trash-* 219 | 220 | # .nfs files are created when an open file is removed but is still being accessed 221 | .nfs* 222 | 223 | ### Windows template 224 | # Windows thumbnail cache files 225 | Thumbs.db 226 | Thumbs.db:encryptable 227 | ehthumbs.db 228 | ehthumbs_vista.db 229 | 230 | # Dump file 231 | *.stackdump 232 | 233 | # Folder config file 234 | [Dd]esktop.ini 235 | 236 | # Recycle Bin used on file shares 237 | $RECYCLE.BIN/ 238 | 239 | # Windows Installer files 240 | *.cab 241 | *.msi 242 | *.msix 243 | *.msm 244 | *.msp 245 | 246 | # Windows shortcuts 247 | *.lnk 248 | 249 | # Example models and datasets 250 | examples/example_data/* 251 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_language_version: 2 | python: python3.9 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.4.0 6 | hooks: 7 | - id: check-added-large-files 8 | - id: check-toml 9 | - id: check-yaml 10 | args: 11 | - --unsafe 12 | - id: end-of-file-fixer 13 | - id: trailing-whitespace 14 | - repo: https://github.com/charliermarsh/ruff-pre-commit 15 | rev: v0.6.8 16 | hooks: 17 | - id: ruff 18 | args: 19 | - --fix 20 | - id: ruff-format 21 | ci: 22 | autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks 23 | autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate 24 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.10" 13 | 14 | # Build documentation in the "docs/" directory with Sphinx 15 | sphinx: 16 | configuration: docs/source/conf.py 17 | 18 | # Optionally build your docs in additional formats such as PDF and ePub 19 | formats: 20 | - pdf 21 | # - epub 22 | 23 | # Optional but recommended, declare the Python requirements required 24 | # to build your documentation 25 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 26 | python: 27 | install: 28 | - requirements: docs/requirements.txt 29 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # SecMLT: Contribution Guide 2 | 3 | SecMLT is an open-source Python library for Adversarial Machine Learning and robustness evaluation. We welcome contributions from the research community to expand its capabilities, improve its functionality, or add new features. In this guide, we will discuss how to contribute to SecMLT through forks, pull requests, and code formatting using Ruff. 4 | 5 | ## Prerequisites 6 | 7 | Before contributing to SecMLT: 8 | 9 | 1. Familiarize yourself with the library by reviewing the [official documentation](https://secml-torch.readthedocs.io/en/latest/) and exploring the existing codebase. 10 | 2. Install the required dependencies (refer to [the installation guide](https://secml-torch.readthedocs.io/en/latest/#installation)). 11 | 12 | ## Setting up your development environment 13 | 14 | To contribute to SecMLT, follow these steps: 15 | 16 | 1. **Fork the repository**: Go to the [SecMLT GitHub page](https://github.com/pralab/secml-torch) and click "Fork" in the upper-right corner. This will create a 17 | copy of the SecMLT repository under your GitHub account. 18 | 19 | 1. **Clone your forked repository**: Clone your forked repository to your local machine using `git clone` command: 20 | ```bash 21 | git clone secmlt 22 | ``` 23 | 2. **Set up remote repositories**: Add the original SecMLT repository as an upstream remote and set the tracking branch to be `master`: 24 | ```bash 25 | cd secmlt 26 | git remote add upstream 27 | git fetch upstream 28 | git checkout master --track upstream/master 29 | ``` 30 | 31 | ## Making changes 32 | 33 | 1. Create a new branch for your feature, bug fix, or documentation update: 34 | ```bash 35 | git checkout -c 36 | ``` 37 | 2. Make the necessary changes to the codebase (add features, fix bugs, improve documentation, etc.). Be sure to write clear and descriptive commit messages. 38 | 3. Test your changes locally using appropriate testing frameworks and tools. 39 | 40 | ## Formatting your code 41 | 42 | In our project, we leverage [Ruff](https://docs.astral.sh/ruff/) and [pre-commit](https://pre-commit.com) to enhance code quality and streamline the development process. 43 | Ruff is a static code linter, while Pre-commit is a framework for defining pre-commit hooks. 44 | 45 | ## Documentation 46 | 47 | When adding new functionalities, modules, or packages to SecMLT, it's essential to document them properly. This includes generating reStructuredText (RST) files, which are used by Sphinx to build the documentation. 48 | 49 | To generate documentation RST files for new modules, follow these steps: 50 | 51 | 1. **Install the documentation requirements**: Ensure you have Sphinx and the docs dependencies installed. You can install it via pip: 52 | 53 | ```bash 54 | cd docs 55 | pip install -r requirements.txt 56 | ``` 57 | 58 | 2. **Write Docstrings**: Ensure your modules and functions/classes have docstrings following the reStructuredText format. This allows Sphinx to parse and generate documentation from your code. The following steps will find automatically the modules if they are properly documented. 59 | 60 | 3. **Run Autodoc**: Use the `sphinx-apidoc` command to automatically generate RST files for your modules: 61 | 62 | ```bash 63 | sphinx-apidoc -o -f 64 | ``` 65 | 66 | Replace `` with the directory where you want the RST files to be generated, and `` with the directory containing your modules. The `-f` parameter makes it rewrite existing files (so that the new functions are added). 67 | 68 | Specifically, it's easy to do it from the docs folder: 69 | 70 | ```bash 71 | cd docs # skip if you are already in the docs folder from the previous step 72 | sphinx-apidoc -o ../docs/source ../src/secmlt -f 73 | ``` 74 | 75 | 4. **Build Documentation**: Finally, build the documentation using Sphinx: 76 | 77 | ```bash 78 | sphinx-build -b html 79 | ``` 80 | 81 | Replace `` with the directory containing your Sphinx source files (including the generated RST files), and `` with the directory where you want the HTML documentation to be built. 82 | 83 | Specifically, you can use: 84 | 85 | ```bash 86 | cd .. # skip if you are already in the main project folder 87 | sphinx-build -M html ./docs/source ./docs/build 88 | ``` 89 | 90 | By following these steps, you can ensure that your new modules are properly documented and integrated into the SecMLT documentation. A preview will be generated when you create the pull request. 91 | 92 | 93 | ### Using Ruff 94 | 95 | Ruff is integrated into our project to perform code linting. 96 | It helps ensure adherence to coding standards, identifies potential bugs, and enhances overall code quality. Here's how to use Ruff: 97 | 98 | 1. **Installation**: Make sure you have Ruff installed in your development environment. You can install it via pip: 99 | ``` 100 | pip install ruff 101 | ``` 102 | 103 | 2. **Running Ruff**: To analyze your codebase using Ruff, navigate to the project directory and run the following command: 104 | ``` 105 | ruff check 106 | ``` 107 | Ruff will analyze the codebase and provide feedback on potential issues and areas for improvement. 108 | 109 | ### Using Pre-commit 110 | 111 | Pre-commit is employed to automate various tasks such as code formatting, linting, and ensuring code consistency across different environments. We use it to enforce Ruff formatting *before* commit. 112 | Here's how to utilize Pre-commit: 113 | 114 | 1. **Installation**: Ensure that Pre-commit is installed in your environment. You can install it using pip: 115 | ``` 116 | pip install pre-commit 117 | ``` 118 | 119 | 2. **Configuration**: The project includes a `.pre-commit-config.yaml` file that specifies the hooks to be executed by Pre-commit. These hooks can include tasks such as code formatting, static analysis, and more. 120 | 121 | 3. **Installation of Hooks**: Run the following command in the project directory to install the Pre-commit hooks: 122 | ``` 123 | pre-commit install 124 | ``` 125 | This command will set up the hooks specified in the configuration file to run automatically before each commit. 126 | 127 | 4. **Running Pre-commit**: Whenever you make changes and attempt to commit them, Pre-commit will automatically execute the configured hooks. If any issues are found, Pre-commit will prevent the commit from proceeding and provide feedback on the detected issues. 128 | 129 | ### Contributing with your Code 130 | 131 | When contributing code to the project, follow these guidelines to ensure a smooth and efficient contribution process: 132 | 133 | 1. **Run Ruff and Pre-commit Locally**: Before making a pull request, run Ruff and Pre-commit locally to identify and fix potential issues in your code. 134 | 135 | 2. **Address Ruff and Pre-commit Warnings**: If Ruff or Pre-commit identifies any issues, address them before submitting your code for review. This ensures that the codebase maintains high standards of quality and consistency. 136 | 137 | 3. **Document Changes**: Clearly document any changes you make, including the rationale behind the changes and any potential impact on existing functionality. 138 | 139 | 4. If there are no issues with your code, commit the changes using the `git add` command and push them to your forked repository: 140 | ```bash 141 | git add . 142 | git commit -m "Your commit message" 143 | git push origin 144 | ``` 145 | 146 | ## Submitting a pull request 147 | 148 | 1. Go to your forked repository on GitHub and click the "New pull request" button. 149 | 2. Choose the branch you've created as the source branch, and select `master` as the target branch. 150 | 3. Review the changes you're submitting and write a clear and descriptive pull request title and description. 151 | 4. Submit your pull request by clicking "Create pull request". 152 | 5. The SecMLT maintainers will review your pull request, provide feedback, or merge it into the main repository as appropriate. 153 | 154 | We appreciate your contributions to SecMLT! If you have any questions or need assistance during the process, please don't hesitate to reach out to us on GitHub or other communication channels. 155 | 156 | ## Useful tips 157 | 158 | You can use [```act```](https://nektosact.com/usage/index.html) to try out the actions. We recommend using it only for the linter, as the other workflows can be tested with common tools (e.g., pytest). 159 | 160 | ```bash 161 | act -W '.github/workflows/lint.yml' 162 | ``` 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Pattern Recognition and Applications Laboratory (PRALab) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SecML-Torch: A Library for Robustness Evaluation of Deep Learning Models 2 | 3 | [![pypi](https://img.shields.io/badge/pypi-latest-blue)](https://pypi.org/pypi/secml-torch/) 4 | [![py\_versions](https://img.shields.io/badge/python-3.8%2B-blue)](https://pypi.org/pypi/secml-torch/) 5 | [![coverage](https://codecov.io/gh/pralab/secml-torch/branch/main/graph/badge.svg)](https://app.codecov.io/gh/pralab/secml-torch) 6 | [![docs](https://readthedocs.org/projects/secml-torch/badge/?version=latest)](https://secml-torch.readthedocs.io/en/latest/#) 7 | 8 | SecML-Torch (SecMLT) is an open-source Python library designed to facilitate research in the area of Adversarial Machine Learning (AML) and robustness evaluation. 9 | The library provides a simple yet powerful interface for generating various types of adversarial examples, as well as tools for evaluating the robustness of machine learning models against such attacks. 10 | 11 | ## Installation 12 | 13 | You can install SecMLT via pip: 14 | ```bash 15 | pip install secml-torch 16 | ``` 17 | 18 | This will install the core version of SecMLT, including only the main functionalities such as native implementation of attacks and PyTorch wrappers. 19 | 20 | ### Install with extras 21 | 22 | The library can be installed together with other plugins that enable further functionalities. 23 | 24 | * [Foolbox](https://github.com/bethgelab/foolbox), a Python toolbox to create adversarial examples. 25 | * [Tensorboard](https://www.tensorflow.org/tensorboard), a visualization toolkit for machine learning experimentation. 26 | * [Adversarial Library](https://github.com/jeromerony/adversarial-library), a powerful library of various adversarial attacks resources in PyTorch. 27 | 28 | 29 | Install one or more extras with the command: 30 | ```bash 31 | pip install secml-torch[foolbox,tensorboard, adv_lib] 32 | ``` 33 | 34 | ## Key Features 35 | 36 | - **Built for Deep Learning:** SecMLT is compatible with the popular machine learning framework PyTorch. 37 | - **Various types of adversarial attacks:** SecMLT includes support for a wide range of attack methods (evasion, poisoning, ...) such as different implementations imported from popular AML libraries (Foolbox, Adversarial Library). 38 | - **Customizable attacks:** SecMLT offers several levels of analysis for the models, including modular implementations of existing attacks to extend with different loss functions, optimizers, and more. 39 | - **Attack debugging:** Built-in debugging of evaluations by logging events and metrics along the attack runs (even on Tensorboard). 40 | 41 | ## Usage 42 | 43 | Here's a brief example of using SecMLT to evaluate the robustness of a trained classifier: 44 | 45 | ```python 46 | from secmlt.adv.evasion.pgd import PGD 47 | from secmlt.metrics.classification import Accuracy 48 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 49 | 50 | 51 | model = ... 52 | torch_data_loader = ... 53 | 54 | # Wrap model 55 | model = BasePytorchClassifier(model) 56 | 57 | # create and run attack 58 | attack = PGD( 59 | perturbation_model="l2", 60 | epsilon=0.4, 61 | num_steps=100, 62 | step_size=0.01, 63 | ) 64 | 65 | adversarial_loader = attack(model, torch_data_loader) 66 | 67 | # Test accuracy on adversarial examples 68 | robust_accuracy = Accuracy()(model, adversarial_loader) 69 | ``` 70 | 71 | For more detailed usage instructions and examples, please refer to the [official documentation](https://secml-torch.readthedocs.io/en/latest/) or to the [examples](https://github.com/pralab/secml-torch/tree/main/examples). 72 | 73 | ## Contributing 74 | 75 | We welcome contributions from the research community to expand the library's capabilities or add new features. 76 | If you would like to contribute to SecMLT, please follow our [contribution guidelines](https://github.com/pralab/secml-torch/blob/main/CONTRIBUTING.md). 77 | 78 | ### Contributors 79 | 80 | 81 | 82 | 89 | 90 | 97 | 104 | 105 |
83 | 84 | Maura 85 |
86 | maurapintor 87 |
88 |
91 | 92 | zangobot/ 93 |
94 | zangobot 95 |
96 |
98 | 99 | lucascionis/ 100 |
101 | lucascionis 102 |
103 |
106 | 107 | ## Acknowledgements 108 | SecML has been partially developed with the support of European Union’s [ELSA – European Lighthouse on Secure and Safe AI](https://elsa-ai.eu), Horizon Europe, grant agreement No. 101070617, and [Sec4AI4Sec - Cybersecurity for AI-Augmented Systems](https://www.sec4ai4sec-project.eu), Horizon Europe, grant agreement No. 101120393. 109 | 110 | sec4ai4sec    111 | elsa    112 | europe -------------------------------------------------------------------------------- /_static/assets/logos/FundedbytheEU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/_static/assets/logos/FundedbytheEU.png -------------------------------------------------------------------------------- /_static/assets/logos/elsa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/_static/assets/logos/elsa.jpg -------------------------------------------------------------------------------- /_static/assets/logos/sec4AI4sec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/_static/assets/logos/sec4AI4sec.png -------------------------------------------------------------------------------- /assets/logos/FundedbytheEU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/assets/logos/FundedbytheEU.png -------------------------------------------------------------------------------- /assets/logos/elsa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/assets/logos/elsa.jpg -------------------------------------------------------------------------------- /assets/logos/sec4AI4sec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/assets/logos/sec4AI4sec.png -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | """Configuration for tests.""" 2 | 3 | pytest_plugins = [ 4 | "secmlt.tests.fixtures", 5 | ] 6 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx_rtd_theme 3 | numpydoc 4 | myst-parser 5 | sphinx-autodoc-typehints 6 | sphinx-copybutton 7 | -------------------------------------------------------------------------------- /docs/source/_static/assets/logos/FundedbytheEU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/docs/source/_static/assets/logos/FundedbytheEU.png -------------------------------------------------------------------------------- /docs/source/_static/assets/logos/elsa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/docs/source/_static/assets/logos/elsa.jpg -------------------------------------------------------------------------------- /docs/source/_static/assets/logos/sec4AI4sec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/docs/source/_static/assets/logos/sec4AI4sec.png -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | import os 10 | import pathlib 11 | import sys 12 | 13 | sys.path.insert(0, os.path.abspath("../../src")) 14 | 15 | 16 | project = "SecML-Torch" 17 | copyright = "2024, Maura Pintor, Luca Demetrio" 18 | author = "Maura Pintor, Luca Demetrio" 19 | 20 | 21 | version_path = pathlib.Path(__file__).parent.parent.parent / "src/secmlt" / "VERSION" 22 | 23 | 24 | # Get the version file from VERSION file 25 | with version_path.open() as f: 26 | version = f.read() 27 | release = version 28 | 29 | # -- General configuration --------------------------------------------------- 30 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 31 | 32 | extensions = [ 33 | "sphinx.ext.autodoc", 34 | "sphinx.ext.autosummary", 35 | "sphinx.ext.doctest", 36 | "sphinx.ext.intersphinx", 37 | "sphinx.ext.todo", 38 | "sphinx.ext.coverage", 39 | "sphinx.ext.napoleon", 40 | "sphinx.ext.viewcode", 41 | "sphinx.ext.autosectionlabel", 42 | "sphinx_copybutton", 43 | "myst_parser", 44 | ] 45 | 46 | autosummary_generate = True 47 | 48 | 49 | # Napoleon settings 50 | napoleon_google_docstring = False 51 | napoleon_numpy_docstring = True 52 | napoleon_include_init_with_doc = True 53 | napoleon_include_private_with_doc = True 54 | napoleon_include_special_with_doc = True 55 | napoleon_use_admonition_for_examples = False 56 | napoleon_use_admonition_for_notes = False 57 | napoleon_use_admonition_for_references = False 58 | napoleon_use_ivar = True 59 | napoleon_use_param = True 60 | napoleon_use_rtype = True 61 | 62 | autodoc_mock_imports = ["foolbox", "torch", "tensorboard", "adv_lib"] 63 | 64 | templates_path = ["_templates"] 65 | exclude_patterns = ["*tests*"] 66 | 67 | # -- Options for HTML output ------------------------------------------------- 68 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 69 | 70 | html_theme = "sphinx_rtd_theme" 71 | html_static_path = ["_static"] 72 | -------------------------------------------------------------------------------- /docs/source/contributing_link.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../CONTRIBUTING.md 2 | ``` 3 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. SecML-Torch documentation master file, created by 2 | sphinx-quickstart on Sun Mar 17 21:27:29 2024. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to SecML-Torch's documentation! 7 | ======================================= 8 | 9 | .. include:: readme_link.md 10 | :parser: myst 11 | 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | :caption: Intro: 16 | :hidden: 17 | 18 | readme_link 19 | 20 | .. toctree:: 21 | :maxdepth: 2 22 | :caption: APIs: 23 | :hidden: 24 | 25 | secmlt.adv 26 | secmlt.data 27 | secmlt.manipulations 28 | secmlt.metrics 29 | secmlt.models 30 | secmlt.optimization 31 | secmlt.trackers 32 | secmlt.utils 33 | 34 | .. toctree:: 35 | :maxdepth: 2 36 | :caption: Contribution guide: 37 | :hidden: 38 | 39 | contributing_link 40 | 41 | Indices and tables 42 | ================== 43 | 44 | * :ref:`genindex` 45 | * :ref:`modindex` 46 | * :ref:`search` 47 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | secmlt 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | secmlt 8 | -------------------------------------------------------------------------------- /docs/source/readme_link.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../README.md 2 | ``` 3 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.evasion.advlib_attacks.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv.evasion.advlib\_attacks package 2 | ========================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.adv.evasion.advlib\_attacks.advlib\_base module 8 | ------------------------------------------------------ 9 | 10 | .. automodule:: secmlt.adv.evasion.advlib_attacks.advlib_base 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.adv.evasion.advlib\_attacks.advlib\_pgd module 16 | ----------------------------------------------------- 17 | 18 | .. automodule:: secmlt.adv.evasion.advlib_attacks.advlib_pgd 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.adv.evasion.advlib_attacks 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.evasion.aggregators.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv.evasion.aggregators package 2 | ====================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.adv.evasion.aggregators.ensemble module 8 | ---------------------------------------------- 9 | 10 | .. automodule:: secmlt.adv.evasion.aggregators.ensemble 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: secmlt.adv.evasion.aggregators 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.evasion.foolbox_attacks.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv.evasion.foolbox\_attacks package 2 | =========================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.adv.evasion.foolbox\_attacks.foolbox\_base module 8 | -------------------------------------------------------- 9 | 10 | .. automodule:: secmlt.adv.evasion.foolbox_attacks.foolbox_base 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.adv.evasion.foolbox\_attacks.foolbox\_pgd module 16 | ------------------------------------------------------- 17 | 18 | .. automodule:: secmlt.adv.evasion.foolbox_attacks.foolbox_pgd 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.adv.evasion.foolbox_attacks 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.evasion.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv.evasion package 2 | ========================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | secmlt.adv.evasion.advlib_attacks 11 | secmlt.adv.evasion.aggregators 12 | secmlt.adv.evasion.foolbox_attacks 13 | 14 | Submodules 15 | ---------- 16 | 17 | secmlt.adv.evasion.base\_evasion\_attack module 18 | ----------------------------------------------- 19 | 20 | .. automodule:: secmlt.adv.evasion.base_evasion_attack 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | 25 | secmlt.adv.evasion.modular\_attack module 26 | ----------------------------------------- 27 | 28 | .. automodule:: secmlt.adv.evasion.modular_attack 29 | :members: 30 | :undoc-members: 31 | :show-inheritance: 32 | 33 | secmlt.adv.evasion.perturbation\_models module 34 | ---------------------------------------------- 35 | 36 | .. automodule:: secmlt.adv.evasion.perturbation_models 37 | :members: 38 | :undoc-members: 39 | :show-inheritance: 40 | 41 | secmlt.adv.evasion.pgd module 42 | ----------------------------- 43 | 44 | .. automodule:: secmlt.adv.evasion.pgd 45 | :members: 46 | :undoc-members: 47 | :show-inheritance: 48 | 49 | Module contents 50 | --------------- 51 | 52 | .. automodule:: secmlt.adv.evasion 53 | :members: 54 | :undoc-members: 55 | :show-inheritance: 56 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.poisoning.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv.poisoning package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.adv.poisoning.backdoor module 8 | ------------------------------------ 9 | 10 | .. automodule:: secmlt.adv.poisoning.backdoor 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.adv.poisoning.base\_data\_poisoning module 16 | ------------------------------------------------- 17 | 18 | .. automodule:: secmlt.adv.poisoning.base_data_poisoning 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.adv.poisoning 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.adv.rst: -------------------------------------------------------------------------------- 1 | secmlt.adv package 2 | ================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | secmlt.adv.evasion 11 | secmlt.adv.poisoning 12 | 13 | Submodules 14 | ---------- 15 | 16 | secmlt.adv.backends module 17 | -------------------------- 18 | 19 | .. automodule:: secmlt.adv.backends 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: secmlt.adv 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/source/secmlt.data.rst: -------------------------------------------------------------------------------- 1 | secmlt.data package 2 | =================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.data.distributions module 8 | -------------------------------- 9 | 10 | .. automodule:: secmlt.data.distributions 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.data.lp\_uniform\_sampling module 16 | ---------------------------------------- 17 | 18 | .. automodule:: secmlt.data.lp_uniform_sampling 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.data 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.manipulations.rst: -------------------------------------------------------------------------------- 1 | secmlt.manipulations package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.manipulations.manipulation module 8 | ---------------------------------------- 9 | 10 | .. automodule:: secmlt.manipulations.manipulation 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: secmlt.manipulations 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/source/secmlt.metrics.rst: -------------------------------------------------------------------------------- 1 | secmlt.metrics package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.metrics.classification module 8 | ------------------------------------ 9 | 10 | .. automodule:: secmlt.metrics.classification 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: secmlt.metrics 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/source/secmlt.models.data_processing.rst: -------------------------------------------------------------------------------- 1 | secmlt.models.data\_processing package 2 | ====================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.models.data\_processing.data\_processing module 8 | ------------------------------------------------------ 9 | 10 | .. automodule:: secmlt.models.data_processing.data_processing 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.models.data\_processing.identity\_data\_processing module 16 | ---------------------------------------------------------------- 17 | 18 | .. automodule:: secmlt.models.data_processing.identity_data_processing 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.models.data_processing 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.models.pytorch.rst: -------------------------------------------------------------------------------- 1 | secmlt.models.pytorch package 2 | ============================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.models.pytorch.base\_pytorch\_nn module 8 | ---------------------------------------------- 9 | 10 | .. automodule:: secmlt.models.pytorch.base_pytorch_nn 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.models.pytorch.base\_pytorch\_trainer module 16 | --------------------------------------------------- 17 | 18 | .. automodule:: secmlt.models.pytorch.base_pytorch_trainer 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: secmlt.models.pytorch 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/source/secmlt.models.rst: -------------------------------------------------------------------------------- 1 | secmlt.models package 2 | ===================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | secmlt.models.data_processing 11 | secmlt.models.pytorch 12 | 13 | Submodules 14 | ---------- 15 | 16 | secmlt.models.base\_model module 17 | -------------------------------- 18 | 19 | .. automodule:: secmlt.models.base_model 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | secmlt.models.base\_trainer module 25 | ---------------------------------- 26 | 27 | .. automodule:: secmlt.models.base_trainer 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: secmlt.models 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/source/secmlt.optimization.rst: -------------------------------------------------------------------------------- 1 | secmlt.optimization package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.optimization.constraints module 8 | -------------------------------------- 9 | 10 | .. automodule:: secmlt.optimization.constraints 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.optimization.gradient\_processing module 16 | ----------------------------------------------- 17 | 18 | .. automodule:: secmlt.optimization.gradient_processing 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | secmlt.optimization.initializer module 24 | -------------------------------------- 25 | 26 | .. automodule:: secmlt.optimization.initializer 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | secmlt.optimization.optimizer\_factory module 32 | --------------------------------------------- 33 | 34 | .. automodule:: secmlt.optimization.optimizer_factory 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | secmlt.optimization.random\_perturb module 40 | ------------------------------------------ 41 | 42 | .. automodule:: secmlt.optimization.random_perturb 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | Module contents 48 | --------------- 49 | 50 | .. automodule:: secmlt.optimization 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | -------------------------------------------------------------------------------- /docs/source/secmlt.rst: -------------------------------------------------------------------------------- 1 | secmlt package 2 | ============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | secmlt.adv 11 | secmlt.data 12 | secmlt.manipulations 13 | secmlt.metrics 14 | secmlt.models 15 | secmlt.optimization 16 | secmlt.tests 17 | secmlt.trackers 18 | secmlt.utils 19 | 20 | Module contents 21 | --------------- 22 | 23 | .. automodule:: secmlt 24 | :members: 25 | :undoc-members: 26 | :show-inheritance: 27 | -------------------------------------------------------------------------------- /docs/source/secmlt.tests.rst: -------------------------------------------------------------------------------- 1 | secmlt.tests package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.tests.fixtures module 8 | ---------------------------- 9 | 10 | .. automodule:: secmlt.tests.fixtures 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.tests.mocks module 16 | ------------------------- 17 | 18 | .. automodule:: secmlt.tests.mocks 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | secmlt.tests.test\_aggregators module 24 | ------------------------------------- 25 | 26 | .. automodule:: secmlt.tests.test_aggregators 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | secmlt.tests.test\_attacks module 32 | --------------------------------- 33 | 34 | .. automodule:: secmlt.tests.test_attacks 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | secmlt.tests.test\_backdoors module 40 | ----------------------------------- 41 | 42 | .. automodule:: secmlt.tests.test_backdoors 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | secmlt.tests.test\_constants module 48 | ----------------------------------- 49 | 50 | .. automodule:: secmlt.tests.test_constants 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | secmlt.tests.test\_constraints module 56 | ------------------------------------- 57 | 58 | .. automodule:: secmlt.tests.test_constraints 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | secmlt.tests.test\_data module 64 | ------------------------------ 65 | 66 | .. automodule:: secmlt.tests.test_data 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | secmlt.tests.test\_manipulations module 72 | --------------------------------------- 73 | 74 | .. automodule:: secmlt.tests.test_manipulations 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | secmlt.tests.test\_metrics module 80 | --------------------------------- 81 | 82 | .. automodule:: secmlt.tests.test_metrics 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | secmlt.tests.test\_trackers module 88 | ---------------------------------- 89 | 90 | .. automodule:: secmlt.tests.test_trackers 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | secmlt.tests.test\_trainer module 96 | --------------------------------- 97 | 98 | .. automodule:: secmlt.tests.test_trainer 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | secmlt.tests.test\_utils module 104 | ------------------------------- 105 | 106 | .. automodule:: secmlt.tests.test_utils 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | 111 | Module contents 112 | --------------- 113 | 114 | .. automodule:: secmlt.tests 115 | :members: 116 | :undoc-members: 117 | :show-inheritance: 118 | -------------------------------------------------------------------------------- /docs/source/secmlt.trackers.rst: -------------------------------------------------------------------------------- 1 | secmlt.trackers package 2 | ======================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.trackers.image\_trackers module 8 | -------------------------------------- 9 | 10 | .. automodule:: secmlt.trackers.image_trackers 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | secmlt.trackers.tensorboard\_tracker module 16 | ------------------------------------------- 17 | 18 | .. automodule:: secmlt.trackers.tensorboard_tracker 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | secmlt.trackers.trackers module 24 | ------------------------------- 25 | 26 | .. automodule:: secmlt.trackers.trackers 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: secmlt.trackers 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /docs/source/secmlt.utils.rst: -------------------------------------------------------------------------------- 1 | secmlt.utils package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | secmlt.utils.tensor\_utils module 8 | --------------------------------- 9 | 10 | .. automodule:: secmlt.utils.tensor_utils 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: secmlt.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /examples/backdoor_example.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision.datasets 3 | from models.mnist_net import MNISTNet 4 | from secmlt.adv.poisoning.backdoor import BackdoorDatasetPyTorch 5 | from secmlt.metrics.classification import Accuracy, AttackSuccessRate 6 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 7 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 8 | from torch.optim import Adam 9 | from torch.utils.data import DataLoader 10 | 11 | 12 | def apply_patch(x: torch.Tensor) -> torch.Tensor: 13 | x[:, 0, 24:28, 24:28] = 1.0 14 | return x 15 | 16 | 17 | dataset_path = "example_data/datasets/" 18 | device = "cpu" 19 | net = MNISTNet() 20 | net.to(device) 21 | optimizer = Adam(lr=1e-3, params=net.parameters()) 22 | training_dataset = torchvision.datasets.MNIST( 23 | transform=torchvision.transforms.ToTensor(), 24 | train=True, 25 | root=dataset_path, 26 | download=True, 27 | ) 28 | target_label = 1 29 | backdoored_mnist = BackdoorDatasetPyTorch( 30 | training_dataset, 31 | data_manipulation_func=apply_patch, 32 | trigger_label=target_label, 33 | portion=0.1, 34 | ) 35 | 36 | training_data_loader = DataLoader(backdoored_mnist, batch_size=20, shuffle=False) 37 | test_dataset = torchvision.datasets.MNIST( 38 | transform=torchvision.transforms.ToTensor(), 39 | train=False, 40 | root=dataset_path, 41 | download=True, 42 | ) 43 | test_data_loader = DataLoader(test_dataset, batch_size=20, shuffle=False) 44 | 45 | trainer = BasePyTorchTrainer(optimizer, epochs=5) 46 | model = BasePytorchClassifier(net, trainer=trainer) 47 | model.train(training_data_loader) 48 | 49 | # test accuracy without backdoor 50 | accuracy = Accuracy()(model, test_data_loader) 51 | print("test accuracy: ", accuracy) 52 | 53 | # test accuracy on backdoored dataset 54 | backdoored_test_set = BackdoorDatasetPyTorch( 55 | test_dataset, data_manipulation_func=apply_patch 56 | ) 57 | backdoored_loader = DataLoader(backdoored_test_set, batch_size=20, shuffle=False) 58 | 59 | asr = AttackSuccessRate(y_target=target_label)(model, backdoored_loader) 60 | print(f"asr: {asr}") 61 | -------------------------------------------------------------------------------- /examples/label_flipping_example.py: -------------------------------------------------------------------------------- 1 | import torchvision.datasets 2 | from models.mnist_net import MNISTNet 3 | from secmlt.adv.poisoning.base_data_poisoning import PoisoningDatasetPyTorch 4 | from secmlt.metrics.classification import Accuracy 5 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 6 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 7 | from torch.optim import Adam 8 | from torch.utils.data import DataLoader 9 | 10 | 11 | def flip_label(label): 12 | return 0 if label != 0 else 1 13 | 14 | 15 | dataset_path = "example_data/datasets/" 16 | device = "cpu" 17 | net = MNISTNet() 18 | net.to(device) 19 | optimizer = Adam(lr=1e-3, params=net.parameters()) 20 | training_dataset = torchvision.datasets.MNIST( 21 | transform=torchvision.transforms.ToTensor(), 22 | train=True, 23 | root=dataset_path, 24 | download=True, 25 | ) 26 | target_label = 1 27 | poisoned_mnist = PoisoningDatasetPyTorch( 28 | training_dataset, 29 | label_manipulation_func=flip_label, 30 | portion=0.4, 31 | ) 32 | 33 | training_data_loader = DataLoader(training_dataset, batch_size=20, shuffle=False) 34 | poisoned_data_loader = DataLoader(poisoned_mnist, batch_size=20, shuffle=False) 35 | 36 | test_dataset = torchvision.datasets.MNIST( 37 | transform=torchvision.transforms.ToTensor(), 38 | train=False, 39 | root=dataset_path, 40 | download=True, 41 | ) 42 | test_data_loader = DataLoader(test_dataset, batch_size=20, shuffle=False) 43 | 44 | for k, data_loader in { 45 | "normal": training_data_loader, 46 | "poisoned": poisoned_data_loader, 47 | }.items(): 48 | trainer = BasePyTorchTrainer(optimizer, epochs=3) 49 | model = BasePytorchClassifier(net, trainer=trainer) 50 | model.train(data_loader) 51 | # test accuracy without backdoor 52 | accuracy = Accuracy()(model, test_data_loader) 53 | print(f"test accuracy on {k} data: {accuracy.item():.3f}") 54 | -------------------------------------------------------------------------------- /examples/loaders/get_loaders.py: -------------------------------------------------------------------------------- 1 | import torchvision 2 | from torch.utils.data import DataLoader, Subset 3 | 4 | 5 | def get_mnist_loader(path): 6 | test_dataset = torchvision.datasets.MNIST( 7 | transform=torchvision.transforms.ToTensor(), 8 | train=False, 9 | root=path, 10 | download=True, 11 | ) 12 | test_dataset = Subset(test_dataset, list(range(10))) 13 | return DataLoader(test_dataset, batch_size=10, shuffle=False) 14 | -------------------------------------------------------------------------------- /examples/mnist_example.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from loaders.get_loaders import get_mnist_loader 3 | from models.mnist_net import get_mnist_model 4 | from secmlt.adv.backends import Backends 5 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 6 | from secmlt.adv.evasion.pgd import PGD 7 | from secmlt.metrics.classification import Accuracy 8 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 9 | from secmlt.trackers.trackers import ( 10 | LossTracker, 11 | PerturbationNormTracker, 12 | PredictionTracker, 13 | ) 14 | 15 | device = "cpu" 16 | model_path = "example_data/models/mnist" 17 | dataset_path = "example_data/datasets/" 18 | net = get_mnist_model(model_path).to(device) 19 | test_loader = get_mnist_loader(dataset_path) 20 | 21 | # Wrap model 22 | model = BasePytorchClassifier(net) 23 | 24 | # Test accuracy on original data 25 | accuracy = Accuracy()(model, test_loader) 26 | print(f"test accuracy: {accuracy.item():.2f}") 27 | 28 | # Create and run attack 29 | epsilon = 1 30 | num_steps = 10 31 | step_size = 0.05 32 | perturbation_model = LpPerturbationModels.LINF 33 | y_target = None 34 | 35 | trackers = [ 36 | LossTracker(), 37 | PredictionTracker(), 38 | PerturbationNormTracker(perturbation_model), 39 | ] 40 | 41 | native_attack = PGD( 42 | perturbation_model=perturbation_model, 43 | epsilon=epsilon, 44 | num_steps=num_steps, 45 | step_size=step_size, 46 | random_start=False, 47 | y_target=y_target, 48 | backend=Backends.NATIVE, 49 | trackers=trackers, 50 | ) 51 | 52 | native_adv_ds = native_attack(model, test_loader) 53 | 54 | for tracker in trackers: 55 | print(tracker.name) 56 | print(tracker.get()) 57 | 58 | # Test accuracy on adversarial examples 59 | n_robust_accuracy = Accuracy()(model, native_adv_ds) 60 | print("robust accuracy native: ", n_robust_accuracy) 61 | 62 | # Create and run attack 63 | foolbox_attack = PGD( 64 | perturbation_model=perturbation_model, 65 | epsilon=epsilon, 66 | num_steps=num_steps, 67 | step_size=step_size, 68 | random_start=False, 69 | y_target=y_target, 70 | backend=Backends.FOOLBOX, 71 | ) 72 | f_adv_ds = foolbox_attack(model, test_loader) 73 | 74 | advlib_attack = PGD( 75 | perturbation_model=perturbation_model, 76 | epsilon=epsilon, 77 | num_steps=num_steps, 78 | step_size=step_size, 79 | random_start=False, 80 | loss_function="dlr", 81 | y_target=y_target, 82 | backend=Backends.ADVLIB, 83 | ) 84 | al_adv_ds = advlib_attack(model, test_loader) 85 | 86 | # Test accuracy on foolbox 87 | f_robust_accuracy = Accuracy()(model, f_adv_ds) 88 | print("robust accuracy foolbox: ", f_robust_accuracy) 89 | 90 | # Test accuracy on adv lib 91 | al_robust_accuracy = Accuracy()(model, al_adv_ds) 92 | print("robust accuracy AdvLib: ", al_robust_accuracy) 93 | 94 | native_data, native_labels = next(iter(native_adv_ds)) 95 | f_data, f_labels = next(iter(f_adv_ds)) 96 | real_data, real_labels = next(iter(test_loader)) 97 | 98 | 99 | distance = torch.linalg.norm( 100 | native_data.detach().cpu().flatten(start_dim=1) 101 | - f_data.detach().cpu().flatten(start_dim=1), 102 | ord=LpPerturbationModels.pert_models[perturbation_model], 103 | dim=1, 104 | ) 105 | print("Solutions are :", distance, f"{perturbation_model} distant") 106 | -------------------------------------------------------------------------------- /examples/mnist_example_random_inits.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from loaders.get_loaders import get_mnist_loader 3 | from models.mnist_net import get_mnist_model 4 | from secmlt.adv.backends import Backends 5 | from secmlt.adv.evasion.aggregators.ensemble import FixedEpsilonEnsemble 6 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 7 | from secmlt.adv.evasion.pgd import PGD 8 | from secmlt.metrics.classification import ( 9 | Accuracy, 10 | AccuracyEnsemble, 11 | AttackSuccessRate, 12 | EnsembleSuccessRate, 13 | ) 14 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 15 | 16 | device = "cpu" 17 | model_path = "example_data/models/mnist" 18 | dataset_path = "example_data/datasets/" 19 | net = get_mnist_model(model_path).to(device) 20 | test_loader = get_mnist_loader(dataset_path) 21 | 22 | # Wrap model 23 | model = BasePytorchClassifier(net) 24 | 25 | # Test accuracy on original data 26 | accuracy = Accuracy()(model, test_loader) 27 | print(f"test accuracy: {accuracy.item():.2f}") 28 | 29 | # Create and run attack 30 | epsilon = 0.15 31 | num_steps = 3 32 | step_size = 0.05 33 | perturbation_model = LpPerturbationModels.LINF 34 | y_target = None 35 | 36 | pgd_attack = PGD( 37 | perturbation_model=perturbation_model, 38 | epsilon=epsilon, 39 | num_steps=num_steps, 40 | step_size=step_size, 41 | random_start=True, 42 | y_target=y_target, 43 | backend=Backends.NATIVE, 44 | ) 45 | 46 | multiple_attack_results = [pgd_attack(model, test_loader) for i in range(3)] 47 | criterion = FixedEpsilonEnsemble(loss_fn=torch.nn.CrossEntropyLoss()) 48 | best_advs = criterion(model, test_loader, multiple_attack_results) 49 | 50 | # Test accuracy on best adversarial examples 51 | n_robust_accuracy = Accuracy()(model, best_advs) 52 | print(f"RA best advs: {n_robust_accuracy.item():.2f}") 53 | 54 | # Test accuracy on ensemble 55 | n_robust_accuracy = AccuracyEnsemble()(model, multiple_attack_results) 56 | print(f"RA ensemble: {n_robust_accuracy.item():.2f}") 57 | 58 | n_asr = EnsembleSuccessRate(y_target=y_target)(model, multiple_attack_results) 59 | print(f"ASR ensemble: {n_asr.item():.2f}") 60 | 61 | for i, res in enumerate(multiple_attack_results): 62 | n_robust_accuracy = Accuracy()(model, res) 63 | print(f"RA attack: {i}: {n_robust_accuracy.item():.2f}") 64 | 65 | asr = AttackSuccessRate(y_target=y_target)(model, res) 66 | print(f"ASR attack: {i}: {asr.item():.2f}") 67 | -------------------------------------------------------------------------------- /examples/mnist_example_sequential.py: -------------------------------------------------------------------------------- 1 | from loaders.get_loaders import get_mnist_loader 2 | from models.mnist_net import get_mnist_model 3 | from secmlt.adv.backends import Backends 4 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 5 | from secmlt.adv.evasion.pgd import PGD 6 | from secmlt.metrics.classification import Accuracy 7 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 8 | 9 | device = "cpu" 10 | model_path = "example_data/models/mnist" 11 | dataset_path = "example_data/datasets/" 12 | net = get_mnist_model(model_path).to(device) 13 | test_loader = get_mnist_loader(dataset_path) 14 | 15 | # Wrap model 16 | model = BasePytorchClassifier(net) 17 | 18 | # Test accuracy on original data 19 | accuracy = Accuracy()(model, test_loader) 20 | print(f"test accuracy: {accuracy.item():.2f}") 21 | 22 | 23 | # Create and run attack 24 | epsilon = 0.3 25 | num_steps = 10 26 | step_size = 0.05 27 | perturbation_model = LpPerturbationModels.LINF 28 | y_target = None 29 | 30 | attack_1 = PGD( 31 | perturbation_model=perturbation_model, 32 | epsilon=epsilon, 33 | num_steps=num_steps, 34 | step_size=step_size, 35 | random_start=False, 36 | y_target=y_target, 37 | backend=Backends.NATIVE, 38 | ) 39 | attack_2 = PGD( 40 | perturbation_model=perturbation_model, 41 | epsilon=epsilon, 42 | num_steps=num_steps, 43 | step_size=step_size, 44 | random_start=False, 45 | y_target=y_target, 46 | backend=Backends.NATIVE, 47 | ) 48 | 49 | attack_2.initializer = attack_1 50 | 51 | 52 | adv_ds = attack_2(model, test_loader) 53 | 54 | 55 | # Test accuracy on adversarial examples 56 | n_robust_accuracy = Accuracy()(model, adv_ds) 57 | print("robust accuracy: ", n_robust_accuracy.item()) 58 | -------------------------------------------------------------------------------- /examples/mnist_example_tensorboard.py: -------------------------------------------------------------------------------- 1 | from loaders.get_loaders import get_mnist_loader 2 | from models.mnist_net import get_mnist_model 3 | from secmlt.adv.backends import Backends 4 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 5 | from secmlt.adv.evasion.pgd import PGD 6 | from secmlt.metrics.classification import Accuracy 7 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 8 | from secmlt.trackers import ( 9 | GradientNormTracker, 10 | GradientsTracker, 11 | LossTracker, 12 | PerturbationNormTracker, 13 | PredictionTracker, 14 | SampleTracker, 15 | ScoresTracker, 16 | TensorboardTracker, 17 | ) 18 | 19 | device = "cpu" 20 | model_path = "example_data/models/mnist" 21 | dataset_path = "example_data/datasets/" 22 | net = get_mnist_model(model_path).to(device) 23 | test_loader = get_mnist_loader(dataset_path) 24 | 25 | # Wrap model 26 | model = BasePytorchClassifier(net) 27 | 28 | # Test accuracy on original data 29 | accuracy = Accuracy()(model, test_loader) 30 | print(f"test accuracy: {accuracy.item():.2f}") 31 | 32 | # Create and run attack 33 | epsilon = 0.2 34 | num_steps = 200 35 | step_size = 0.01 36 | perturbation_model = LpPerturbationModels.LINF 37 | y_target = None 38 | 39 | trackers = [ 40 | LossTracker(), 41 | PredictionTracker(), 42 | PerturbationNormTracker("linf"), 43 | GradientNormTracker(), 44 | SampleTracker(), 45 | ScoresTracker(), 46 | GradientsTracker(), 47 | ] 48 | 49 | tensorboard_tracker = TensorboardTracker("example_data/logs/pgd", trackers) 50 | 51 | native_attack = PGD( 52 | perturbation_model=perturbation_model, 53 | epsilon=epsilon, 54 | num_steps=num_steps, 55 | step_size=step_size, 56 | random_start=False, 57 | y_target=y_target, 58 | backend=Backends.NATIVE, 59 | trackers=tensorboard_tracker, 60 | ) 61 | native_adv_ds = native_attack(model, test_loader) 62 | -------------------------------------------------------------------------------- /examples/models/mnist_net.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import torch 4 | from robustbench.utils import download_gdrive 5 | 6 | 7 | class MNISTNet(torch.nn.Module): 8 | def __init__(self): 9 | super().__init__() 10 | self.fc1 = torch.nn.Linear(784, 200) 11 | self.fc2 = torch.nn.Linear(200, 200) 12 | self.fc3 = torch.nn.Linear(200, 10) 13 | 14 | def forward(self, x): 15 | x = x.flatten(1) 16 | x = torch.relu(self.fc1(x)) 17 | x = torch.relu(self.fc2(x)) 18 | return self.fc3(x) 19 | 20 | 21 | def get_mnist_model(path): 22 | net = MNISTNet() 23 | path = Path(path) 24 | model_weights_path = path / "mnist_model.pt" 25 | if not model_weights_path.exists(): 26 | path.mkdir(exist_ok=True, parents=True) 27 | model_id = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM" 28 | download_gdrive(model_id, model_weights_path) 29 | 30 | model_weigths = torch.load(model_weights_path, map_location="cpu") 31 | net.eval() 32 | net.load_state_dict(model_weigths) 33 | return net 34 | -------------------------------------------------------------------------------- /examples/run_evasion_attack.py: -------------------------------------------------------------------------------- 1 | import torchvision.datasets 2 | from robustbench.utils import load_model 3 | from secmlt.adv.backends import Backends 4 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 5 | from secmlt.adv.evasion.pgd import PGD 6 | from secmlt.metrics.classification import Accuracy 7 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 8 | from torch.utils.data import DataLoader, Subset 9 | 10 | net = load_model(model_name="Rony2019Decoupling", dataset="cifar10", threat_model="L2") 11 | device = "cpu" 12 | net.to(device) 13 | test_dataset = torchvision.datasets.CIFAR10( 14 | transform=torchvision.transforms.ToTensor(), 15 | train=False, 16 | root=".", 17 | download=True, 18 | ) 19 | test_dataset = Subset(test_dataset, list(range(5))) 20 | test_data_loader = DataLoader(test_dataset, batch_size=5, shuffle=False) 21 | 22 | # Wrap model 23 | model = BasePytorchClassifier(net) 24 | 25 | # Test accuracy on original data 26 | accuracy = Accuracy()(model, test_data_loader) 27 | print("Accuracy:", accuracy.item()) 28 | 29 | # Create and run attack 30 | epsilon = 0.5 31 | num_steps = 10 32 | step_size = 0.005 33 | perturbation_model = LpPerturbationModels.LINF 34 | y_target = None 35 | native_attack = PGD( 36 | perturbation_model=perturbation_model, 37 | epsilon=epsilon, 38 | num_steps=num_steps, 39 | step_size=step_size, 40 | random_start=False, 41 | y_target=y_target, 42 | backend=Backends.NATIVE, 43 | ) 44 | native_adv_ds = native_attack(model, test_data_loader) 45 | 46 | # Test accuracy on adversarial examples 47 | n_robust_accuracy = Accuracy()(model, native_adv_ds) 48 | print("Robust Accuracy (PGD Native): ", n_robust_accuracy.item()) 49 | 50 | # Create and run attack 51 | foolbox_attack = PGD( 52 | perturbation_model=perturbation_model, 53 | epsilon=epsilon, 54 | num_steps=num_steps, 55 | step_size=step_size, 56 | random_start=False, 57 | y_target=y_target, 58 | backend=Backends.FOOLBOX, 59 | ) 60 | f_adv_ds = foolbox_attack(model, test_data_loader) 61 | 62 | # Test accuracy on adversarial examples 63 | f_robust_accuracy = Accuracy()(model, f_adv_ds) 64 | print("Robust Accuracy (PGD Foolbox): ", n_robust_accuracy.item()) 65 | 66 | # Create and run attack 67 | advlib_attack = PGD( 68 | perturbation_model=perturbation_model, 69 | epsilon=epsilon, 70 | num_steps=num_steps, 71 | step_size=step_size, 72 | random_start=False, 73 | loss_function="dlr", 74 | y_target=y_target, 75 | backend=Backends.ADVLIB, 76 | ) 77 | al_adv_ds = advlib_attack(model, test_data_loader) 78 | 79 | # Test accuracy on adversarial examples 80 | f_robust_accuracy = Accuracy()(model, al_adv_ds) 81 | print("Robust Accuracy (PGD AdvLib): ", n_robust_accuracy.item()) 82 | -------------------------------------------------------------------------------- /examples/train_model.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import torch 4 | import torchvision.datasets 5 | from models.mnist_net import MNISTNet 6 | from secmlt.metrics.classification import Accuracy 7 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 8 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 9 | from torch.optim import Adam 10 | from torch.utils.data import DataLoader 11 | 12 | dataset_path = "example_data/datasets/" 13 | device = "cpu" 14 | net = MNISTNet() 15 | net.to(device) 16 | optimizer = Adam(lr=1e-3, params=net.parameters()) 17 | training_dataset = torchvision.datasets.MNIST( 18 | transform=torchvision.transforms.ToTensor(), 19 | train=True, 20 | root=dataset_path, 21 | download=True, 22 | ) 23 | training_data_loader = DataLoader(training_dataset, batch_size=64, shuffle=False) 24 | test_dataset = torchvision.datasets.MNIST( 25 | transform=torchvision.transforms.ToTensor(), 26 | train=False, 27 | root=dataset_path, 28 | download=True, 29 | ) 30 | test_data_loader = DataLoader(test_dataset, batch_size=64, shuffle=False) 31 | 32 | # Training MNIST model 33 | trainer = BasePyTorchTrainer(optimizer, epochs=1) 34 | model = BasePytorchClassifier(net, trainer=trainer) 35 | model.train(training_data_loader) 36 | 37 | # Test MNIST model 38 | accuracy = Accuracy()(model, test_data_loader) 39 | print("test accuracy: ", accuracy) 40 | 41 | model_path = Path("example_data/models/mnist") 42 | torch.save(model.model.state_dict(), model_path / "mnist_model.pt") 43 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.pytest.ini_options] 2 | pythonpath = [ 3 | "src" 4 | ] 5 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | ruff 2 | pre-commit 3 | flake8-docstrings 4 | sphinx 5 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-cov 3 | foolbox 4 | adv-lib 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | scikit-learn 4 | -------------------------------------------------------------------------------- /ruff.toml: -------------------------------------------------------------------------------- 1 | # same as black 2 | line-length = 88 3 | indent-width = 4 4 | 5 | # assume Python 3.10 6 | target-version = "py310" 7 | src = ["../src"] 8 | 9 | [lint] 10 | select = ["ALL", "D107"] 11 | ignore = [ 12 | "FA102", # dynamic type hinting 13 | "S101", # use of assert 14 | "ARG001", # unused function argument 15 | "ARG002", # unused method argument 16 | "ANN101", # annotation for self 17 | "ANN102", # annotation of cls 18 | "PLR0913", # too many arguments 19 | "ANN002", # type annotations for args 20 | "ANN003", # type annotations for kwargs 21 | "ARG004", # unused kwargs 22 | "PLW2901", # for loop variable overwritten 23 | "SLF001", # use of private methods, 24 | "FBT001", # boolean type positional argument 25 | "FBT002", # boolean type default argument 26 | "COM812", # flake8-commas "Trailing comma missing" 27 | "ISC001", # implicitly concatenated string literals on one line 28 | "UP007", # conflict non-pep8 annotations 29 | "S311" # random generator not suitable for cryptographic purposes 30 | ] 31 | 32 | [lint.per-file-ignores] 33 | "test_*.py" = [ 34 | "D", # force docstrings 35 | "ANN", # annotations for tests 36 | "PT006", # mark parametrize 37 | ] 38 | "*/tests/*.py" = ["D104"] 39 | "setup.py" = ["D"] 40 | "examples/*" = [ 41 | "D", # docstrings 42 | "INP001", # init file in folder 43 | "ANN", # annotations 44 | "T20" # print 45 | ] 46 | "docs/*" = ["ALL"] 47 | 48 | [lint.pydocstyle] 49 | convention = "numpy" 50 | 51 | [format] 52 | # like black, use double quotes for strings. 53 | quote-style = "double" 54 | 55 | # like black, indent with spaces, rather than tabs. 56 | indent-style = "space" 57 | 58 | # like black, respect magic trailing commas. 59 | skip-magic-trailing-comma = false 60 | 61 | # like black, automatically detect the appropriate line ending. 62 | line-ending = "auto" 63 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | from setuptools import find_packages, setup 4 | 5 | here = pathlib.Path.cwd() 6 | readme_path = here / "README.md" 7 | version_path = here / "src/secmlt" / "VERSION" 8 | 9 | 10 | # Get the long description from the README file 11 | with readme_path.open() as f: 12 | long_description = f.read() 13 | 14 | # Get the version from VERSION file 15 | with version_path.open() as f: 16 | version = f.read() 17 | 18 | 19 | CLASSIFIERS = """\ 20 | Development Status :: 3 - Alpha 21 | Intended Audience :: Science/Research 22 | Intended Audience :: Developers 23 | License :: OSI Approved 24 | Programming Language :: Python 25 | Programming Language :: Python :: 3 26 | Programming Language :: Python :: 3.6 27 | Programming Language :: Python :: 3.7 28 | Programming Language :: Python :: 3.8 29 | Programming Language :: Python :: 3.9 30 | Programming Language :: Python :: 3.10 31 | Programming Language :: Python :: Implementation :: PyPy 32 | Topic :: Software Development 33 | Topic :: Scientific/Engineering 34 | """ 35 | 36 | 37 | setup( 38 | name="secml-torch", 39 | version=version, 40 | description="SecML-Torch Library", 41 | classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f], 42 | long_description=long_description, 43 | long_description_content_type="text/markdown", 44 | package_dir={"": "src"}, 45 | packages=find_packages( 46 | where="src", 47 | exclude=[ 48 | "*.tests", 49 | "*.tests.*", 50 | "tests.*", 51 | "tests", 52 | ], 53 | ), 54 | data_files=[("src/secmlt/VERSION", ["src/secmlt/VERSION"])], 55 | include_package_data=True, 56 | url="https://secml-torch.readthedocs.io/en/latest/", 57 | license="MIT", 58 | author="Maura Pintor, Luca Demetrio", 59 | author_email="maura.pintor@unica.it, luca.demetrio@unige.it", 60 | install_requires=["torch>=1.4,!=1.5.*", "torchvision>=0.5,!=0.6.*"], 61 | extras_require={ 62 | "foolbox": ["foolbox>=3.3.0"], 63 | "tensorboard": ["tensorboard"], 64 | "adv_lib": ["adv_lib"], 65 | }, 66 | python_requires=">=3.7", 67 | ) 68 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | # noqa: D104 2 | -------------------------------------------------------------------------------- /src/secmlt/VERSION: -------------------------------------------------------------------------------- 1 | 1.2.2 2 | -------------------------------------------------------------------------------- /src/secmlt/__init__.py: -------------------------------------------------------------------------------- 1 | # noqa: D104 2 | 3 | import pathlib 4 | 5 | version_path = pathlib.Path(__file__).parent / "VERSION" 6 | 7 | with version_path.open() as f: 8 | __version__ = f.read() 9 | -------------------------------------------------------------------------------- /src/secmlt/adv/__init__.py: -------------------------------------------------------------------------------- 1 | """Adversarial functionalities.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/adv/backends.py: -------------------------------------------------------------------------------- 1 | """Available backends for running adversarial attacks.""" 2 | 3 | 4 | class Backends: 5 | """Available backends.""" 6 | 7 | FOOLBOX = "foolbox" 8 | ADVLIB = "advlib" 9 | NATIVE = "native" 10 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/__init__.py: -------------------------------------------------------------------------------- 1 | """Evasion attack functionalities.""" 2 | 3 | import importlib.util 4 | 5 | if importlib.util.find_spec("foolbox", None) is not None: 6 | from .foolbox_attacks import * # noqa: F403 7 | 8 | if importlib.util.find_spec("adv_lib", None) is not None: 9 | from .advlib_attacks import * # noqa: F403 10 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/advlib_attacks/__init__.py: -------------------------------------------------------------------------------- 1 | """Wrappers of Adversarial Library for evasion attacks.""" 2 | 3 | import importlib.util 4 | 5 | if importlib.util.find_spec("adv_lib", None) is not None: 6 | from .advlib_pgd import * # noqa: F403 7 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/advlib_attacks/advlib_base.py: -------------------------------------------------------------------------------- 1 | """Generic wrapper for Adversarial Library evasion attacks.""" 2 | 3 | from collections.abc import Callable 4 | from typing import Literal 5 | 6 | import torch 7 | from secmlt.adv.evasion.base_evasion_attack import TRACKER_TYPE, BaseEvasionAttack 8 | from secmlt.models.base_model import BaseModel 9 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 10 | 11 | 12 | class BaseAdvLibEvasionAttack(BaseEvasionAttack): 13 | """Generic wrapper for Adversarial Library Evasion attacks.""" 14 | 15 | def __init__( 16 | self, 17 | advlib_attack: Callable[..., torch.Tensor], 18 | epsilon: float = torch.inf, 19 | y_target: int | None = None, 20 | loss_function: str = "ce", 21 | lb: float = 0.0, 22 | ub: float = 1.0, 23 | trackers: type[TRACKER_TYPE] | None = None, 24 | ) -> None: 25 | """ 26 | Wrap Adversarial Library attacks. 27 | 28 | Parameters 29 | ---------- 30 | advlib_attack : Callable[..., torch.Tensor] 31 | The Adversarial Library attack function to wrap. 32 | The function returns the adversarial examples. 33 | epsilon : float, optional 34 | The perturbation constraint. The default value is 35 | torch.inf, which means no constraint. 36 | y_target : int | None, optional 37 | The target label for the attack. If None, the attack is 38 | untargeted. The default value is None. 39 | loss_function : str, optional 40 | The loss function to be used for the attack. The default value is "ce". 41 | lb : float, optional 42 | The lower bound for the perturbation. The default value is 0.0. 43 | ub : float, optional 44 | The upper bound for the perturbation. The default value is 1.0. 45 | trackers : type[TRACKER_TYPE] | None, optional 46 | Trackers for the attack (unallowed in Adversarial Library), by default None. 47 | """ 48 | self.advlib_attack = advlib_attack 49 | self.lb = lb 50 | self.ub = ub 51 | self.epsilon = epsilon 52 | self.y_target = y_target 53 | self.trackers = trackers 54 | self.loss_function = loss_function 55 | super().__init__() 56 | 57 | @classmethod 58 | def _trackers_allowed(cls) -> Literal[False]: 59 | return False 60 | 61 | def _run( 62 | self, 63 | model: BaseModel, 64 | samples: torch.Tensor, 65 | labels: torch.Tensor, 66 | ) -> torch.Tensor: 67 | if not isinstance(model, BasePytorchClassifier): 68 | msg = "Model type not supported." 69 | raise NotImplementedError(msg) 70 | device = model._get_device() 71 | samples = samples.to(device) 72 | labels = labels.to(device) 73 | advx = self.advlib_attack( 74 | model=model, 75 | inputs=samples, 76 | labels=labels, 77 | ε=self.epsilon, 78 | targeted=(self.y_target is not None), 79 | loss_function=self.loss_function, 80 | ) 81 | 82 | delta = advx - samples 83 | return advx, delta 84 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/advlib_attacks/advlib_pgd.py: -------------------------------------------------------------------------------- 1 | """Wrapper of the PGD attack implemented in Adversarial Library.""" 2 | 3 | from functools import partial 4 | 5 | from adv_lib.attacks import pgd_linf 6 | from secmlt.adv.evasion.advlib_attacks.advlib_base import BaseAdvLibEvasionAttack 7 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 8 | 9 | 10 | class PGDAdvLib(BaseAdvLibEvasionAttack): 11 | """Wrapper of the Adversarial Library implementation of the PGD attack.""" 12 | 13 | def __init__( 14 | self, 15 | perturbation_model: str, 16 | epsilon: float, 17 | num_steps: int, 18 | random_start: bool, 19 | step_size: float, 20 | restarts: int = 1, 21 | loss_function: str = "ce", 22 | y_target: int | None = None, 23 | lb: float = 0.0, 24 | ub: float = 1.0, 25 | **kwargs, 26 | ) -> None: 27 | """ 28 | Initialize a PGD attack with the Adversarial Library backend. 29 | 30 | Parameters 31 | ---------- 32 | perturbation_model : str 33 | The perturbation model to be used for the attack. 34 | epsilon : float 35 | The maximum perturbation allowed. 36 | num_steps : int 37 | The number of iterations for the attack. 38 | random_start : bool 39 | If True, the perturbation will be randomly initialized. 40 | step_size : float 41 | The attack step size. 42 | restarts : int, optional 43 | The number of attack restarts. The default value is 1. 44 | loss_function : str, optional 45 | The loss function to be used for the attack. The default value is "ce". 46 | y_target : int | None, optional 47 | The target label for the attack. If None, the attack is 48 | untargeted. The default value is None. 49 | lb : float, optional 50 | The lower bound for the perturbation. The default value is 0.0. 51 | ub : float, optional 52 | 53 | Raises 54 | ------ 55 | ValueError 56 | If the provided `loss_function` is not supported by the PGD attack 57 | using the Adversarial Library backend. 58 | """ 59 | perturbation_models = { 60 | LpPerturbationModels.LINF: pgd_linf, 61 | } 62 | losses = ["ce", "dl", "dlr"] 63 | if isinstance(loss_function, str): 64 | if loss_function not in losses: 65 | msg = f"PGD AdvLib supports only these losses: {losses}" 66 | raise ValueError(msg) 67 | else: 68 | loss_function = losses[0] 69 | 70 | advlib_attack_func = perturbation_models.get(perturbation_model) 71 | advlib_attack = partial( 72 | advlib_attack_func, 73 | steps=num_steps, 74 | random_init=random_start, 75 | restarts=restarts, 76 | loss_function=loss_function, 77 | absolute_step_size=step_size, 78 | ) 79 | 80 | super().__init__( 81 | advlib_attack=advlib_attack, 82 | epsilon=epsilon, 83 | y_target=y_target, 84 | lb=lb, 85 | ub=ub, 86 | ) 87 | 88 | @staticmethod 89 | def get_perturbation_models() -> set[str]: 90 | """ 91 | Check the perturbation models implemented for this attack. 92 | 93 | Returns 94 | ------- 95 | set[str] 96 | The list of perturbation models implemented for this attack. 97 | """ 98 | return { 99 | LpPerturbationModels.LINF, 100 | } 101 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/aggregators/__init__.py: -------------------------------------------------------------------------------- 1 | """Aggregator functions for multiple attacks or multiple attack runs.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/aggregators/ensemble.py: -------------------------------------------------------------------------------- 1 | """Ensemble metrics for getting best results across multiple attacks.""" 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import Union 5 | 6 | import torch 7 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 8 | from secmlt.models.base_model import BaseModel 9 | from secmlt.utils.tensor_utils import atleast_kd 10 | from torch.utils.data import DataLoader, TensorDataset 11 | 12 | 13 | class Ensemble(ABC): 14 | """Abstract class for creating an ensemble metric.""" 15 | 16 | def __call__( 17 | self, 18 | model: BaseModel, 19 | data_loader: DataLoader, 20 | adv_loaders: list[DataLoader], 21 | ) -> DataLoader[tuple[torch.Tensor]]: 22 | """ 23 | Get the worst-case of the metric with the given implemented criterion. 24 | 25 | Parameters 26 | ---------- 27 | model : BaseModel 28 | Model to use for predictions. 29 | data_loader : DataLoader 30 | Test dataloader. 31 | adv_loaders : list[DataLoader] 32 | List of dataloaders returned by multiple attacks. 33 | 34 | Returns 35 | ------- 36 | DataLoader[torch.Tuple[torch.Tensor]] 37 | The worst-case metric computed on the multiple attacks. 38 | """ 39 | best_x_adv_data = [] 40 | original_labels = [] 41 | adv_loaders = [iter(a) for a in adv_loaders] 42 | for samples, labels in data_loader: 43 | best_x_adv = samples.clone() 44 | for adv_loader in adv_loaders: 45 | x_adv, _ = next(adv_loader) 46 | best_x_adv = self._get_best(model, samples, labels, x_adv, best_x_adv) 47 | best_x_adv_data.append(best_x_adv) 48 | original_labels.append(labels) 49 | best_x_adv_dataset = TensorDataset( 50 | torch.vstack(best_x_adv_data), 51 | torch.hstack(original_labels), 52 | ) 53 | return DataLoader( 54 | best_x_adv_dataset, 55 | batch_size=data_loader.batch_size, 56 | ) 57 | 58 | @abstractmethod 59 | def _get_best( 60 | self, 61 | model: BaseModel, 62 | samples: torch.Tensor, 63 | labels: torch.Tensor, 64 | x_adv: torch.Tensor, 65 | best_x_adv: torch.Tensor, 66 | ) -> torch.Tensor: 67 | """ 68 | Get the best result from multiple attacks. 69 | 70 | Parameters 71 | ---------- 72 | model : BaseModel 73 | Model to use to predict. 74 | samples : torch.Tensor 75 | Input samples. 76 | labels : torch.Tensor 77 | Labels for the samples. 78 | x_adv : torch.Tensor 79 | Adversarial examples. 80 | best_x_adv : torch.Tensor 81 | Best adversarial examples found so far. 82 | 83 | Returns 84 | ------- 85 | torch.Tensor 86 | Best adversarial examples between the current x_adv 87 | and the ones already tested on the given model. 88 | """ 89 | ... 90 | 91 | 92 | class MinDistanceEnsemble(Ensemble): 93 | """Wrapper for ensembling results of multiple minimum-distance attacks.""" 94 | 95 | def __init__(self, perturbation_model: str) -> None: 96 | """ 97 | Create MinDistance Ensemble. 98 | 99 | Parameters 100 | ---------- 101 | perturbation_model : str 102 | Perturbation model to use to compute the distance. 103 | """ 104 | self.perturbation_model = perturbation_model 105 | 106 | def _get_best( 107 | self, 108 | model: BaseModel, 109 | samples: torch.Tensor, 110 | labels: torch.Tensor, 111 | x_adv: torch.Tensor, 112 | best_x_adv: torch.Tensor, 113 | ) -> torch.Tensor: 114 | """ 115 | Get the adversarial examples with minimal perturbation. 116 | 117 | Parameters 118 | ---------- 119 | model : BaseModel 120 | Model to use to predict. 121 | samples : torch.Tensor 122 | Input samples. 123 | labels : torch.Tensor 124 | Labels for the samples. 125 | x_adv : torch.Tensor 126 | Adversarial examples. 127 | best_x_adv : torch.Tensor 128 | Best adversarial examples found so far. 129 | 130 | Returns 131 | ------- 132 | torch.Tensor 133 | The minimum-distance adversarial examples found so far. 134 | """ 135 | preds = model(x_adv).argmax(dim=1) 136 | is_adv = preds.type(labels.dtype) == labels 137 | norms = ( 138 | (samples - x_adv) 139 | .flatten(start_dim=1) 140 | .norm(LpPerturbationModels.get_p(self.perturbation_model), dim=-1) 141 | ) 142 | best_adv_norms = ( 143 | (samples - best_x_adv) 144 | .flatten(start_dim=1) 145 | .norm(LpPerturbationModels.get_p(self.perturbation_model)) 146 | ) 147 | is_best = torch.logical_and(norms < best_adv_norms, is_adv) 148 | 149 | return torch.where( 150 | atleast_kd(is_best, len(x_adv.shape)), 151 | x_adv, 152 | best_x_adv, 153 | ) 154 | 155 | 156 | class FixedEpsilonEnsemble(Ensemble): 157 | """Wrapper for ensembling results of multiple fixed-epsilon attacks.""" 158 | 159 | def __init__( 160 | self, 161 | loss_fn: torch.nn.Module, 162 | maximize: bool = True, 163 | y_target: Union[torch.Tensor, None] = None, 164 | ) -> None: 165 | """ 166 | Create fixed epsilon ensemble. 167 | 168 | Parameters 169 | ---------- 170 | loss_fn : torch.nn.Module 171 | Loss function to maximize (or minimize). 172 | maximize : bool, optional 173 | If True maximizes the loss otherwise it minimizes it, by default True. 174 | y_target : torch.Tensor | None, optional 175 | Target label for targeted attacks, None for untargeted, by default None. 176 | """ 177 | self.maximize = maximize 178 | self.loss_fn = loss_fn 179 | self.y_target = y_target 180 | 181 | def _get_best( 182 | self, 183 | model: BaseModel, 184 | samples: torch.Tensor, 185 | labels: torch.Tensor, 186 | x_adv: torch.Tensor, 187 | best_x_adv: torch.Tensor, 188 | ) -> torch.Tensor: 189 | """ 190 | Get the adversarial examples with maximum (or minimum) loss. 191 | 192 | Parameters 193 | ---------- 194 | model : BaseModel 195 | Model to use to predict. 196 | samples : torch.Tensor 197 | Input samples. 198 | labels : torch.Tensor 199 | Labels for the samples. 200 | x_adv : torch.Tensor 201 | Adversarial examples. 202 | best_x_adv : torch.Tensor 203 | Best adversarial examples found so far. 204 | 205 | Returns 206 | ------- 207 | torch.Tensor 208 | The maximum-loss adversarial examples found so far. 209 | """ 210 | if self.y_target is None: 211 | targets = labels 212 | else: 213 | targets = torch.ones_like(labels) * self.y_target 214 | loss = self.loss_fn(model(x_adv), targets) 215 | best_adv_loss = self.loss_fn(model(best_x_adv), targets) 216 | if self.maximize is True: 217 | is_best = loss > best_adv_loss 218 | else: 219 | is_best = loss < best_adv_loss 220 | return torch.where( 221 | atleast_kd(is_best, len(x_adv.shape)), 222 | x_adv, 223 | best_x_adv, 224 | ) 225 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/base_evasion_attack.py: -------------------------------------------------------------------------------- 1 | """Base classes for implementing attacks and wrapping backends.""" 2 | 3 | import importlib.util 4 | from abc import abstractmethod 5 | from typing import Literal 6 | 7 | import torch 8 | from secmlt.adv.backends import Backends 9 | from secmlt.models.base_model import BaseModel 10 | from torch.utils.data import DataLoader, TensorDataset 11 | 12 | # lazy evaluation to avoid circular imports 13 | TRACKER_TYPE = "secmlt.trackers.tracker.Tracker" 14 | 15 | 16 | class BaseEvasionAttackCreator: 17 | """Generic creator for attacks.""" 18 | 19 | @classmethod 20 | def get_implementation(cls, backend: str) -> "BaseEvasionAttack": 21 | """ 22 | Get the implementation of the attack with the given backend. 23 | 24 | Parameters 25 | ---------- 26 | backend : str 27 | The backend for the attack. See secmlt.adv.backends for 28 | available backends. 29 | 30 | Returns 31 | ------- 32 | BaseEvasionAttack 33 | Attack implementation. 34 | """ 35 | implementations = { 36 | Backends.FOOLBOX: cls.get_foolbox_implementation, 37 | Backends.ADVLIB: cls.get_advlib_implementation, 38 | Backends.NATIVE: cls._get_native_implementation, 39 | } 40 | cls.check_backend_available(backend) 41 | return implementations[backend]() 42 | 43 | @classmethod 44 | def check_backend_available(cls, backend: str) -> bool: 45 | """ 46 | Check if a given backend is available for the attack. 47 | 48 | Parameters 49 | ---------- 50 | backend : str 51 | Backend string. 52 | 53 | Returns 54 | ------- 55 | bool 56 | True if the given backend is implemented. 57 | 58 | Raises 59 | ------ 60 | NotImplementedError 61 | Raises NotImplementedError if the requested backend is not in 62 | the list of the possible backends (check secmlt.adv.backends). 63 | """ 64 | if backend in cls.get_backends(): 65 | return True 66 | msg = "Unsupported or not-implemented backend." 67 | raise NotImplementedError(msg) 68 | 69 | @classmethod 70 | def get_foolbox_implementation(cls) -> "BaseEvasionAttack": 71 | """ 72 | Get the Foolbox implementation of the attack. 73 | 74 | Returns 75 | ------- 76 | BaseEvasionAttack 77 | Foolbox implementation of the attack. 78 | 79 | Raises 80 | ------ 81 | ImportError 82 | Raises ImportError if Foolbox extra is not installed. 83 | """ 84 | if importlib.util.find_spec("foolbox", None) is not None: 85 | return cls._get_foolbox_implementation() 86 | msg = "Foolbox extra not installed." 87 | raise ImportError(msg) 88 | 89 | @staticmethod 90 | def _get_foolbox_implementation() -> "BaseEvasionAttack": 91 | msg = "Foolbox implementation not available." 92 | raise NotImplementedError(msg) 93 | 94 | @classmethod 95 | def get_advlib_implementation(cls) -> "BaseEvasionAttack": 96 | """ 97 | Get the Adversarial Library implementation of the attack. 98 | 99 | Returns 100 | ------- 101 | BaseEvasionAttack 102 | Adversarial Library implementation of the attack. 103 | 104 | Raises 105 | ------ 106 | ImportError 107 | Raises ImportError if Adversarial Library extra is not installed. 108 | """ 109 | if importlib.util.find_spec("adv_lib", None) is not None: 110 | return cls._get_advlib_implementation() 111 | msg = "Adversarial Library extra not installed." 112 | raise ImportError(msg) 113 | 114 | @staticmethod 115 | def _get_advlib_implementation() -> "BaseEvasionAttack": 116 | msg = "Adversarial Library implementation not available." 117 | raise NotImplementedError(msg) 118 | 119 | @staticmethod 120 | def _get_native_implementation() -> "BaseEvasionAttack": 121 | msg = "Native implementation not available." 122 | raise NotImplementedError(msg) 123 | 124 | @staticmethod 125 | @abstractmethod 126 | def get_backends() -> set[str]: 127 | """ 128 | Get the available backends for the given attack. 129 | 130 | Returns 131 | ------- 132 | set[str] 133 | Set of implemented backends available for the attack. 134 | 135 | Raises 136 | ------ 137 | NotImplementedError 138 | Raises NotImplementedError if not implemented in the inherited class. 139 | """ 140 | msg = "Backends should be specified in inherited class." 141 | raise NotImplementedError(msg) 142 | 143 | 144 | class BaseEvasionAttack: 145 | """Base class for evasion attacks.""" 146 | 147 | def __call__(self, model: BaseModel, data_loader: DataLoader) -> DataLoader: 148 | """ 149 | Compute the attack against the model, using the input data. 150 | 151 | Parameters 152 | ---------- 153 | model : BaseModel 154 | Model to test. 155 | data_loader : DataLoader 156 | Test dataloader. 157 | 158 | Returns 159 | ------- 160 | DataLoader 161 | Dataloader with adversarial examples and original labels. 162 | """ 163 | adversarials = [] 164 | original_labels = [] 165 | for samples, labels in data_loader: 166 | x_adv, _ = self._run(model, samples, labels) 167 | adversarials.append(x_adv) 168 | original_labels.append(labels) 169 | adversarials = torch.vstack(adversarials) 170 | original_labels = torch.hstack(original_labels) 171 | adversarial_dataset = TensorDataset(adversarials, original_labels) 172 | return DataLoader( 173 | adversarial_dataset, 174 | batch_size=data_loader.batch_size, 175 | ) 176 | 177 | @property 178 | def trackers(self) -> list[TRACKER_TYPE] | None: 179 | """ 180 | Get the trackers set for this attack. 181 | 182 | Returns 183 | ------- 184 | list[TRACKER_TYPE] | None 185 | Trackers set for the attack, if any. 186 | """ 187 | return self._trackers 188 | 189 | @trackers.setter 190 | def trackers(self, trackers: list[TRACKER_TYPE] | None = None) -> None: 191 | if self._trackers_allowed(): 192 | if trackers is not None and not isinstance(trackers, list): 193 | trackers = [trackers] 194 | self._trackers = trackers 195 | elif trackers is not None: 196 | msg = "Trackers not implemented for this attack." 197 | raise NotImplementedError(msg) 198 | 199 | @classmethod 200 | @abstractmethod 201 | def _trackers_allowed(cls) -> Literal[False]: 202 | return False 203 | 204 | @classmethod 205 | def check_perturbation_model_available(cls, perturbation_model: str) -> bool: 206 | """ 207 | Check whether the given perturbation model is available for the attack. 208 | 209 | Parameters 210 | ---------- 211 | perturbation_model : str 212 | A perturbation model. 213 | 214 | Returns 215 | ------- 216 | bool 217 | True if the attack implements the given perturbation model. 218 | 219 | Raises 220 | ------ 221 | NotImplementedError 222 | Raises NotImplementedError if not implemented in the inherited class. 223 | """ 224 | if perturbation_model in cls.get_perturbation_models(): 225 | return True 226 | msg = "Unsupported or not-implemented perturbation model." 227 | raise NotImplementedError(msg) 228 | 229 | @staticmethod 230 | @abstractmethod 231 | def get_perturbation_models() -> set[str]: 232 | """ 233 | Check the perturbation models implemented for the given attack. 234 | 235 | Returns 236 | ------- 237 | set[str] 238 | The set of perturbation models for which the attack is implemented. 239 | 240 | Raises 241 | ------ 242 | NotImplementedError 243 | Raises NotImplementedError if not implemented in the inherited class. 244 | """ 245 | msg = "Perturbation models should be specified in inherited class." 246 | raise NotImplementedError(msg) 247 | 248 | @abstractmethod 249 | def _run( 250 | self, 251 | model: BaseModel, 252 | samples: torch.Tensor, 253 | labels: torch.Tensor, 254 | ) -> torch.Tensor: ... 255 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/foolbox_attacks/__init__.py: -------------------------------------------------------------------------------- 1 | """Wrappers of Foolbox library for evasion attacks.""" 2 | 3 | import importlib.util 4 | 5 | if importlib.util.find_spec("foolbox", None) is not None: 6 | from .foolbox_pgd import * # noqa: F403 7 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/foolbox_attacks/foolbox_base.py: -------------------------------------------------------------------------------- 1 | """Generic wrapper for Foolbox evasion attacks.""" 2 | 3 | from typing import Literal 4 | 5 | import torch 6 | from foolbox.attacks.base import Attack 7 | from foolbox.criteria import Misclassification, TargetedMisclassification 8 | from foolbox.models.pytorch import PyTorchModel 9 | from secmlt.adv.evasion.base_evasion_attack import TRACKER_TYPE, BaseEvasionAttack 10 | from secmlt.models.base_model import BaseModel 11 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 12 | 13 | 14 | class BaseFoolboxEvasionAttack(BaseEvasionAttack): 15 | """Generic wrapper for Foolbox Evasion attacks.""" 16 | 17 | def __init__( 18 | self, 19 | foolbox_attack: type[Attack], 20 | epsilon: float = torch.inf, 21 | y_target: int | None = None, 22 | lb: float = 0.0, 23 | ub: float = 1.0, 24 | trackers: type[TRACKER_TYPE] | None = None, 25 | ) -> None: 26 | """ 27 | Wrap Foolbox attacks. 28 | 29 | Parameters 30 | ---------- 31 | foolbox_attack : Type[Attack] 32 | Foolbox attack class to wrap. 33 | epsilon : float, optional 34 | Perturbation constraint, by default torch.inf. 35 | y_target : int | None, optional 36 | Target label for the attack, None if untargeted, by default None. 37 | lb : float, optional 38 | Lower bound of the input space, by default 0.0. 39 | ub : float, optional 40 | Upper bound of the input space, by default 1.0. 41 | trackers : type[TRACKER_TYPE] | None, optional 42 | Trackers for the attack (unallowed in Foolbox), by default None. 43 | """ 44 | self.foolbox_attack = foolbox_attack 45 | self.lb = lb 46 | self.ub = ub 47 | self.epsilon = epsilon 48 | self.y_target = y_target 49 | self.trackers = trackers 50 | super().__init__() 51 | 52 | @classmethod 53 | def _trackers_allowed(cls) -> Literal[False]: 54 | return False 55 | 56 | def _run( 57 | self, 58 | model: BaseModel, 59 | samples: torch.Tensor, 60 | labels: torch.Tensor, 61 | ) -> torch.Tensor: 62 | if not isinstance(model, BasePytorchClassifier): 63 | msg = "Model type not supported." 64 | raise NotImplementedError(msg) 65 | device = model._get_device() 66 | samples = samples.to(device) 67 | labels = labels.to(device) 68 | foolbox_model = PyTorchModel(model.model, (self.lb, self.ub), device=device) 69 | if self.y_target is None: 70 | criterion = Misclassification(labels) 71 | else: 72 | target = ( 73 | torch.zeros_like(labels) + self.y_target 74 | if self.y_target is not None 75 | else labels 76 | ).type(labels.dtype) 77 | target = target.to(device) 78 | criterion = TargetedMisclassification(target) 79 | 80 | _, advx, _ = self.foolbox_attack( 81 | model=foolbox_model, 82 | inputs=samples, 83 | criterion=criterion, 84 | epsilons=self.epsilon, 85 | ) 86 | # foolbox deals only with additive perturbations 87 | delta = advx - samples 88 | return advx, delta 89 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/foolbox_attacks/foolbox_pgd.py: -------------------------------------------------------------------------------- 1 | """Wrapper of the PGD attack implemented in Foolbox.""" 2 | 3 | from foolbox.attacks.projected_gradient_descent import ( 4 | L1ProjectedGradientDescentAttack, 5 | L2ProjectedGradientDescentAttack, 6 | LinfProjectedGradientDescentAttack, 7 | ) 8 | from secmlt.adv.evasion.foolbox_attacks.foolbox_base import BaseFoolboxEvasionAttack 9 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 10 | 11 | 12 | class PGDFoolbox(BaseFoolboxEvasionAttack): 13 | """Wrapper of the Foolbox implementation of the PGD attack.""" 14 | 15 | def __init__( 16 | self, 17 | perturbation_model: str, 18 | epsilon: float, 19 | num_steps: int, 20 | step_size: float, 21 | random_start: bool, 22 | y_target: int | None = None, 23 | lb: float = 0.0, 24 | ub: float = 1.0, 25 | **kwargs, 26 | ) -> None: 27 | """ 28 | Create PGD attack with Foolbox backend. 29 | 30 | Parameters 31 | ---------- 32 | perturbation_model : str 33 | Perturbation model for the attack. 34 | epsilon : float 35 | Maximum perturbation allowed. 36 | num_steps : int 37 | Number of iterations for the attack. 38 | step_size : float 39 | Attack step size. 40 | random_start : bool 41 | True for randomly initializing the perturbation. 42 | y_target : int | None, optional 43 | Target label for the attack, None for untargeted, by default None. 44 | lb : float, optional 45 | Lower bound of the input space, by default 0.0. 46 | ub : float, optional 47 | Upper bound of the input space, by default 1.0. 48 | """ 49 | perturbation_models = { 50 | LpPerturbationModels.L1: L1ProjectedGradientDescentAttack, 51 | LpPerturbationModels.L2: L2ProjectedGradientDescentAttack, 52 | LpPerturbationModels.LINF: LinfProjectedGradientDescentAttack, 53 | } 54 | foolbox_attack_cls = perturbation_models.get(perturbation_model) 55 | 56 | foolbox_attack = foolbox_attack_cls( 57 | abs_stepsize=step_size, 58 | steps=num_steps, 59 | random_start=random_start, 60 | ) 61 | 62 | super().__init__( 63 | foolbox_attack=foolbox_attack, 64 | epsilon=epsilon, 65 | y_target=y_target, 66 | lb=lb, 67 | ub=ub, 68 | ) 69 | 70 | @staticmethod 71 | def get_perturbation_models() -> set[str]: 72 | """ 73 | Check the perturbation models implemented for this attack. 74 | 75 | Returns 76 | ------- 77 | set[str] 78 | The list of perturbation models implemented for this attack. 79 | """ 80 | return { 81 | LpPerturbationModels.L1, 82 | LpPerturbationModels.L2, 83 | LpPerturbationModels.LINF, 84 | } 85 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/modular_attack.py: -------------------------------------------------------------------------------- 1 | """Implementation of modular iterative attacks with customizable components.""" 2 | 3 | from functools import partial 4 | from typing import Literal, Union 5 | 6 | import torch.nn 7 | from secmlt.adv.evasion.base_evasion_attack import BaseEvasionAttack 8 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 9 | from secmlt.manipulations.manipulation import Manipulation 10 | from secmlt.models.base_model import BaseModel 11 | from secmlt.optimization.constraints import Constraint 12 | from secmlt.optimization.gradient_processing import GradientProcessing 13 | from secmlt.optimization.initializer import Initializer 14 | from secmlt.optimization.optimizer_factory import OptimizerFactory 15 | from secmlt.trackers.trackers import Tracker 16 | from secmlt.utils.tensor_utils import atleast_kd 17 | from torch.nn import CrossEntropyLoss 18 | from torch.optim import Optimizer 19 | 20 | CE_LOSS = "ce_loss" 21 | LOGIT_LOSS = "logit_loss" 22 | 23 | LOSS_FUNCTIONS = { 24 | CE_LOSS: CrossEntropyLoss, 25 | } 26 | 27 | 28 | class ModularEvasionAttackFixedEps(BaseEvasionAttack): 29 | """Modular evasion attack for fixed-epsilon attacks.""" 30 | 31 | def __init__( 32 | self, 33 | y_target: int | None, 34 | num_steps: int, 35 | step_size: float, 36 | loss_function: Union[str, torch.nn.Module], 37 | optimizer_cls: str | partial[Optimizer], 38 | manipulation_function: Manipulation, 39 | initializer: Initializer, 40 | gradient_processing: GradientProcessing, 41 | trackers: list[Tracker] | Tracker | None = None, 42 | ) -> None: 43 | """ 44 | Create modular evasion attack. 45 | 46 | Parameters 47 | ---------- 48 | y_target : int | None 49 | Target label for the attack, None for untargeted. 50 | num_steps : int 51 | Number of iterations for the attack. 52 | step_size : float 53 | Attack step size. 54 | loss_function : str | torch.nn.Module 55 | Loss function to minimize. 56 | optimizer_cls : str | partial[Optimizer] 57 | Algorithm for solving the attack optimization problem. 58 | manipulation_function : Manipulation 59 | Manipulation function to perturb the inputs. 60 | initializer : Initializer 61 | Initialization for the perturbation delta. 62 | gradient_processing : GradientProcessing 63 | Gradient transformation function. 64 | trackers : list[Tracker] | Tracker | None, optional 65 | Trackers for logging, by default None. 66 | 67 | Raises 68 | ------ 69 | ValueError 70 | Raises ValueError if the loss is not in allowed 71 | list of loss functions. 72 | """ 73 | self.y_target = y_target 74 | self.num_steps = num_steps 75 | self.step_size = step_size 76 | self.trackers = trackers 77 | if isinstance(loss_function, str): 78 | if loss_function in LOSS_FUNCTIONS: 79 | self.loss_function = LOSS_FUNCTIONS[loss_function](reduction="none") 80 | else: 81 | msg = ( 82 | f"Loss function not found. Use one among {LOSS_FUNCTIONS.values()}" 83 | ) 84 | raise ValueError(msg) 85 | else: 86 | self.loss_function = loss_function 87 | 88 | if isinstance(optimizer_cls, str): 89 | optimizer_cls = OptimizerFactory.create_from_name( 90 | optimizer_cls, 91 | lr=step_size, 92 | ) 93 | 94 | self.optimizer_cls = optimizer_cls 95 | 96 | self._manipulation_function = manipulation_function 97 | self.initializer = initializer 98 | self.gradient_processing = gradient_processing 99 | 100 | super().__init__() 101 | 102 | @property 103 | def manipulation_function(self) -> Manipulation: 104 | """ 105 | Get the manipulation function for the attack. 106 | 107 | Returns 108 | ------- 109 | Manipulation 110 | The manipulation function used in the attack. 111 | """ 112 | return self._manipulation_function 113 | 114 | @manipulation_function.setter 115 | def manipulation_function(self, manipulation_function: Manipulation) -> None: 116 | """ 117 | Set the manipulation function for the attack. 118 | 119 | Parameters 120 | ---------- 121 | manipulation_function : Manipulation 122 | The manipulation function to be used in the attack. 123 | """ 124 | self._manipulation_function = manipulation_function 125 | 126 | @classmethod 127 | def get_perturbation_models(cls) -> set[str]: 128 | """ 129 | Check if a given perturbation model is implemented. 130 | 131 | Returns 132 | ------- 133 | set[str] 134 | Set of perturbation models available for this attack. 135 | """ 136 | return { 137 | LpPerturbationModels.L1, 138 | LpPerturbationModels.L2, 139 | LpPerturbationModels.LINF, 140 | } 141 | 142 | @classmethod 143 | def _trackers_allowed(cls) -> Literal[True]: 144 | return True 145 | 146 | def _init_perturbation_constraints(self) -> list[Constraint]: 147 | msg = "Must be implemented accordingly" 148 | raise NotImplementedError(msg) 149 | 150 | def _create_optimizer(self, delta: torch.Tensor, **kwargs) -> Optimizer: 151 | return self.optimizer_cls([delta], lr=self.step_size, **kwargs) 152 | 153 | def forward_loss( 154 | self, model: BaseModel, x: torch.Tensor, target: torch.Tensor 155 | ) -> tuple[torch.Tensor, torch.Tensor]: 156 | """ 157 | Compute the forward for the loss function. 158 | 159 | Parameters 160 | ---------- 161 | model : BaseModel 162 | Model used by the attack run. 163 | x : torch.Tensor 164 | Input sample. 165 | target : torch.Tensor 166 | Target for computing the loss. 167 | 168 | Returns 169 | ------- 170 | tuple[torch.Tensor, torch.Tensor] 171 | Output scores and loss. 172 | """ 173 | scores = model.decision_function(x) 174 | target = target.to(scores.device) 175 | losses = self.loss_function(scores, target) 176 | return scores, losses 177 | 178 | def _run( 179 | self, 180 | model: BaseModel, 181 | samples: torch.Tensor, 182 | labels: torch.Tensor, 183 | init_deltas: torch.Tensor = None, 184 | optim_kwargs: dict | None = None, 185 | ) -> tuple[torch.Tensor, torch.Tensor]: 186 | if optim_kwargs is None: 187 | optim_kwargs = {} 188 | multiplier = 1 if self.y_target is not None else -1 189 | target = ( 190 | torch.zeros_like(labels) + self.y_target 191 | if self.y_target is not None 192 | else labels 193 | ).type(labels.dtype) 194 | 195 | if init_deltas is not None: 196 | delta = init_deltas.data 197 | elif isinstance(self.initializer, BaseEvasionAttack): 198 | _, delta = self.initializer._run(model, samples, target) 199 | else: 200 | delta = self.initializer(samples.data) 201 | delta.requires_grad = True 202 | 203 | optimizer = self._create_optimizer(delta, **optim_kwargs) 204 | x_adv, delta = self.manipulation_function(samples, delta) 205 | x_adv.data, delta.data = self.manipulation_function(samples.data, delta.data) 206 | best_losses = torch.zeros(samples.shape[0]).fill_(torch.inf) 207 | best_delta = torch.zeros_like(samples) 208 | 209 | for i in range(self.num_steps): 210 | scores, losses = self.forward_loss(model=model, x=x_adv, target=target) 211 | losses *= multiplier 212 | loss = losses.sum() 213 | optimizer.zero_grad() 214 | loss.backward() 215 | grad_before_processing = delta.grad.data 216 | delta.grad.data = self.gradient_processing(delta.grad.data) 217 | optimizer.step() 218 | x_adv.data, delta.data = self.manipulation_function( 219 | samples.data, 220 | delta.data, 221 | ) 222 | if self.trackers is not None: 223 | for tracker in self.trackers: 224 | tracker.track( 225 | i, 226 | losses.detach().cpu().data, 227 | scores.detach().cpu().data, 228 | x_adv.detach().cpu().data, 229 | delta.detach().cpu().data, 230 | grad_before_processing.detach().cpu().data, 231 | ) 232 | 233 | # keep perturbation with highest loss 234 | best_delta.data = torch.where( 235 | atleast_kd(losses.detach().cpu() < best_losses, len(samples.shape)), 236 | delta.data, 237 | best_delta.data, 238 | ) 239 | best_losses.data = torch.where( 240 | losses.detach().cpu() < best_losses, 241 | losses.detach().cpu(), 242 | best_losses.data, 243 | ) 244 | x_adv, _ = self.manipulation_function(samples.data, best_delta.data) 245 | return x_adv, best_delta 246 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/perturbation_models.py: -------------------------------------------------------------------------------- 1 | """Implementation of perturbation models for perturbations of adversarial examples.""" 2 | 3 | from typing import ClassVar 4 | 5 | 6 | class LpPerturbationModels: 7 | """Lp perturbation models.""" 8 | 9 | L0 = "l0" 10 | L1 = "l1" 11 | L2 = "l2" 12 | LINF = "linf" 13 | pert_models: ClassVar[dict[str, float]] = {L0: 0, L1: 1, L2: 2, LINF: float("inf")} 14 | 15 | @classmethod 16 | def is_perturbation_model_available(cls, perturbation_model: str) -> bool: 17 | """ 18 | Check availability of the perturbation model requested. 19 | 20 | Parameters 21 | ---------- 22 | perturbation_model : str 23 | A perturbation model as a string. 24 | 25 | Returns 26 | ------- 27 | bool 28 | True if the perturbation model is found in PerturbationModels.pert_models. 29 | """ 30 | return perturbation_model in (cls.pert_models) 31 | 32 | @classmethod 33 | def get_p(cls, perturbation_model: str) -> float: 34 | """ 35 | Get the float representation of p from the given string. 36 | 37 | Parameters 38 | ---------- 39 | perturbation_model : str 40 | One of the strings defined in PerturbationModels.pert_models. 41 | 42 | Returns 43 | ------- 44 | float 45 | The float representation of p, to use. e.g., in torch.norm(p=...). 46 | 47 | Raises 48 | ------ 49 | ValueError 50 | Raises ValueError if the norm given is not in PerturbationModels.pert_models 51 | """ 52 | if cls.is_perturbation_model_available(perturbation_model): 53 | return cls.pert_models[perturbation_model] 54 | msg = "Perturbation model not implemented" 55 | raise ValueError(msg) 56 | -------------------------------------------------------------------------------- /src/secmlt/adv/evasion/pgd.py: -------------------------------------------------------------------------------- 1 | """Implementations of the Projected Gradient Descent evasion attack.""" 2 | 3 | import importlib.util 4 | 5 | from secmlt.adv.backends import Backends 6 | from secmlt.adv.evasion.base_evasion_attack import ( 7 | BaseEvasionAttack, 8 | BaseEvasionAttackCreator, 9 | ) 10 | from secmlt.adv.evasion.modular_attack import CE_LOSS, ModularEvasionAttackFixedEps 11 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 12 | from secmlt.manipulations.manipulation import AdditiveManipulation 13 | from secmlt.optimization.constraints import ( 14 | ClipConstraint, 15 | L1Constraint, 16 | L2Constraint, 17 | LInfConstraint, 18 | ) 19 | from secmlt.optimization.gradient_processing import LinearProjectionGradientProcessing 20 | from secmlt.optimization.initializer import Initializer, RandomLpInitializer 21 | from secmlt.optimization.optimizer_factory import OptimizerFactory 22 | from secmlt.trackers.trackers import Tracker 23 | 24 | 25 | class PGD(BaseEvasionAttackCreator): 26 | """Creator for the Projected Gradient Descent (PGD) attack.""" 27 | 28 | def __new__( 29 | cls, 30 | perturbation_model: str, 31 | epsilon: float, 32 | num_steps: int, 33 | step_size: float, 34 | random_start: bool = False, 35 | y_target: int | None = None, 36 | lb: float = 0.0, 37 | ub: float = 1.0, 38 | backend: str = Backends.FOOLBOX, 39 | trackers: list[Tracker] | None = None, 40 | **kwargs, 41 | ) -> BaseEvasionAttack: 42 | """ 43 | Create the PGD attack. 44 | 45 | Parameters 46 | ---------- 47 | perturbation_model : str 48 | Perturbation model for the attack. Available: 1, 2, inf. 49 | epsilon : float 50 | Radius of the constraint for the Lp ball. 51 | num_steps : int 52 | Number of iterations for the attack. 53 | step_size : float 54 | Attack step size. 55 | random_start : bool, optional 56 | Whether to use a random initialization onto the Lp ball, by 57 | default False. 58 | y_target : int | None, optional 59 | Target label for a targeted attack, None 60 | for untargeted attack, by default None. 61 | lb : float, optional 62 | Lower bound of the input space, by default 0.0. 63 | ub : float, optional 64 | Upper bound of the input space, by default 1.0. 65 | backend : str, optional 66 | Backend to use to run the attack, by default Backends.FOOLBOX 67 | trackers : list[Tracker] | None, optional 68 | Trackers to check various attack metrics (see secmlt.trackers), 69 | available only for native implementation, by default None. 70 | 71 | Returns 72 | ------- 73 | BaseEvasionAttack 74 | PGD attack instance. 75 | """ 76 | cls.check_backend_available(backend) 77 | implementation = cls.get_implementation(backend) 78 | implementation.check_perturbation_model_available(perturbation_model) 79 | return implementation( 80 | perturbation_model=perturbation_model, 81 | epsilon=epsilon, 82 | num_steps=num_steps, 83 | step_size=step_size, 84 | random_start=random_start, 85 | y_target=y_target, 86 | lb=lb, 87 | ub=ub, 88 | trackers=trackers, 89 | **kwargs, 90 | ) 91 | 92 | @staticmethod 93 | def get_backends() -> list[str]: 94 | """Get available implementations for the PGD attack.""" 95 | return [Backends.FOOLBOX, Backends.ADVLIB, Backends.NATIVE] 96 | 97 | @staticmethod 98 | def _get_foolbox_implementation() -> type["PGDFoolbox"]: # noqa: F821 99 | if importlib.util.find_spec("foolbox", None) is not None: 100 | from secmlt.adv.evasion.foolbox_attacks.foolbox_pgd import PGDFoolbox 101 | 102 | return PGDFoolbox 103 | msg = "Foolbox extra not installed" 104 | raise ImportError(msg) 105 | 106 | @staticmethod 107 | def _get_advlib_implementation() -> type["PGDAdvLib"]: # noqa: F821 108 | if importlib.util.find_spec("adv_lib", None) is not None: 109 | from secmlt.adv.evasion.advlib_attacks import PGDAdvLib 110 | 111 | return PGDAdvLib 112 | msg = "Adversarial Library extra not installed" 113 | raise ImportError(msg) 114 | 115 | @staticmethod 116 | def _get_native_implementation() -> type["PGDNative"]: 117 | return PGDNative 118 | 119 | 120 | class PGDNative(ModularEvasionAttackFixedEps): 121 | """Native implementation of the Projected Gradient Descent attack.""" 122 | 123 | def __init__( 124 | self, 125 | perturbation_model: str, 126 | epsilon: float, 127 | num_steps: int, 128 | step_size: float, 129 | random_start: bool, 130 | y_target: int | None = None, 131 | lb: float = 0.0, 132 | ub: float = 1.0, 133 | trackers: list[Tracker] | None = None, 134 | **kwargs, 135 | ) -> None: 136 | """ 137 | Create Native PGD attack. 138 | 139 | Parameters 140 | ---------- 141 | perturbation_model : str 142 | Perturbation model for the attack. Available: 1, 2, inf. 143 | epsilon : float 144 | Radius of the constraint for the Lp ball. 145 | num_steps : int 146 | Number of iterations for the attack. 147 | step_size : float 148 | Attack step size. 149 | random_start : bool 150 | Whether to use a random initialization onto the Lp ball. 151 | y_target : int | None, optional 152 | Target label for a targeted attack, None 153 | for untargeted attack, by default None. 154 | lb : float, optional 155 | Lower bound of the input space, by default 0.0. 156 | ub : float, optional 157 | Upper bound of the input space, by default 1.0. 158 | trackers : list[Tracker] | None, optional 159 | Trackers to check various attack metrics (see secmlt.trackers), 160 | available only for native implementation, by default None. 161 | """ 162 | perturbation_models = { 163 | LpPerturbationModels.L1: L1Constraint, 164 | LpPerturbationModels.L2: L2Constraint, 165 | LpPerturbationModels.LINF: LInfConstraint, 166 | } 167 | 168 | if random_start: 169 | initializer = RandomLpInitializer( 170 | perturbation_model=perturbation_model, 171 | radius=epsilon, 172 | ) 173 | else: 174 | initializer = Initializer() 175 | self.epsilon = epsilon 176 | gradient_processing = LinearProjectionGradientProcessing(perturbation_model) 177 | perturbation_constraints = [ 178 | perturbation_models[perturbation_model](radius=self.epsilon), 179 | ] 180 | domain_constraints = [ClipConstraint(lb=lb, ub=ub)] 181 | manipulation_function = AdditiveManipulation( 182 | domain_constraints=domain_constraints, 183 | perturbation_constraints=perturbation_constraints, 184 | ) 185 | super().__init__( 186 | y_target=y_target, 187 | num_steps=num_steps, 188 | step_size=step_size, 189 | loss_function=CE_LOSS, 190 | optimizer_cls=OptimizerFactory.create_sgd(step_size), 191 | manipulation_function=manipulation_function, 192 | gradient_processing=gradient_processing, 193 | initializer=initializer, 194 | trackers=trackers, 195 | ) 196 | -------------------------------------------------------------------------------- /src/secmlt/adv/poisoning/__init__.py: -------------------------------------------------------------------------------- 1 | """Backdoor attacks.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/adv/poisoning/backdoor.py: -------------------------------------------------------------------------------- 1 | """Simple backdoor attack in PyTorch.""" 2 | 3 | from typing import Union 4 | 5 | import torch 6 | from secmlt.adv.poisoning.base_data_poisoning import PoisoningDatasetPyTorch 7 | from torch.utils.data import Dataset 8 | 9 | 10 | class BackdoorDatasetPyTorch(PoisoningDatasetPyTorch): 11 | """Dataset class for adding triggers for backdoor attacks.""" 12 | 13 | def __init__( 14 | self, 15 | dataset: Dataset, 16 | data_manipulation_func: callable, 17 | trigger_label: int = 0, 18 | portion: float | None = None, 19 | poisoned_indexes: Union[list[int], torch.Tensor] = None, 20 | ) -> None: 21 | """ 22 | Create the backdoored dataset. 23 | 24 | Parameters 25 | ---------- 26 | dataset : torch.utils.data.Dataset 27 | PyTorch dataset. 28 | data_manipulation_func: callable 29 | Function to manipulate the data and add the backdoor. 30 | trigger_label : int, optional 31 | Label to associate with the backdoored data (default 0). 32 | portion : float, optional 33 | Percentage of samples on which the backdoor will be injected (default 0.1). 34 | poisoned_indexes: list[int] | torch.Tensor 35 | Specific indexes of samples to perturb. Alternative to portion. 36 | """ 37 | super().__init__( 38 | dataset=dataset, 39 | data_manipulation_func=data_manipulation_func, 40 | label_manipulation_func=lambda _: trigger_label, 41 | portion=portion, 42 | poisoned_indexes=poisoned_indexes, 43 | ) 44 | -------------------------------------------------------------------------------- /src/secmlt/adv/poisoning/base_data_poisoning.py: -------------------------------------------------------------------------------- 1 | """Base class for data poisoning.""" 2 | 3 | import random 4 | from typing import Union 5 | 6 | import torch 7 | from torch.utils.data import Dataset 8 | 9 | 10 | class PoisoningDatasetPyTorch(Dataset): 11 | """Dataset class for adding poisoning samples.""" 12 | 13 | def __init__( 14 | self, 15 | dataset: Dataset, 16 | data_manipulation_func: callable = lambda x: x, 17 | label_manipulation_func: callable = lambda x: x, 18 | portion: float | None = None, 19 | poisoned_indexes: Union[list[int], torch.Tensor] = None, 20 | ) -> None: 21 | """ 22 | Create the poisoned dataset. 23 | 24 | Parameters 25 | ---------- 26 | dataset : torch.utils.data.Dataset 27 | PyTorch dataset. 28 | data_manipulation_func : callable 29 | Function that manipulates the data. 30 | label_manipulation_func: callable 31 | Function that returns the label to associate with the poisoned data. 32 | portion : float, optional 33 | Percentage of samples on which the poisoning will be injected (default 0.1). 34 | poisoned_indexes: list[int] | torch.Tensor 35 | Specific indexes of samples to perturb. Alternative to portion. 36 | """ 37 | self.dataset = dataset 38 | self.data_len = len(dataset) 39 | if portion is not None: 40 | if poisoned_indexes is not None: 41 | msg = "Specify either portion or poisoned_indexes, not both." 42 | raise ValueError(msg) 43 | if portion < 0.0 or portion > 1.0: 44 | msg = f"Poison ratio should be between 0.0 and 1.0. Passed {portion}." 45 | raise ValueError(msg) 46 | # calculate number of samples to poison 47 | num_poisoned_samples = int(portion * self.data_len) 48 | 49 | # randomly select indices to poison 50 | self.poisoned_indexes = set( 51 | random.sample(range(self.data_len), num_poisoned_samples) 52 | ) 53 | elif poisoned_indexes is not None: 54 | self.poisoned_indexes = poisoned_indexes 55 | else: 56 | self.poisoned_indexes = range(self.data_len) 57 | 58 | self.data_manipulation_func = data_manipulation_func 59 | self.label_manipulation_func = label_manipulation_func 60 | 61 | def __len__(self) -> int: 62 | """Get number of samples.""" 63 | return self.data_len 64 | 65 | def __getitem__(self, idx: int) -> tuple[torch.Tensor, int]: 66 | """ 67 | Get item from the dataset. 68 | 69 | Parameters 70 | ---------- 71 | idx : int 72 | Index of the item to return 73 | 74 | Returns 75 | ------- 76 | tuple[torch.Tensor, int] 77 | Item at position specified by idx. 78 | """ 79 | x, label = self.dataset[idx] 80 | # poison portion of the data 81 | if idx in self.poisoned_indexes: 82 | x = self.data_manipulation_func(x=x.unsqueeze(0)).squeeze(0) 83 | target_label = self.label_manipulation_func(label) 84 | label = ( 85 | target_label 86 | if isinstance(label, int) 87 | else torch.Tensor(target_label).type(label.dtype) 88 | ) 89 | return x, label 90 | -------------------------------------------------------------------------------- /src/secmlt/data/__init__.py: -------------------------------------------------------------------------------- 1 | """Functionalities for handling data.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/data/distributions.py: -------------------------------------------------------------------------------- 1 | """Implementation for uncommon distributions.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch 6 | from torch.distributions.gamma import Gamma 7 | 8 | 9 | class Distribution(ABC): 10 | """Abstract class for distributions.""" 11 | 12 | @abstractmethod 13 | def sample(self, shape: torch.Size) -> torch.Tensor: 14 | """ 15 | Sample from the distribution. 16 | 17 | This method generates a sample from the distribution, with the specified shape. 18 | If no shape is specified, a single sample is returned. 19 | 20 | Parameters 21 | ---------- 22 | shape : torch.Size, optional 23 | The shape of the sample to be generated. Default is torch.Size(), which 24 | corresponds to a single sample. 25 | 26 | Returns 27 | ------- 28 | torch.Tensor 29 | A tensor of samples from the distribution, with the specified shape. 30 | """ 31 | ... 32 | 33 | 34 | class Rademacher(Distribution): 35 | """Samples from Rademacher distribution (-1, 1) with equal probability.""" 36 | 37 | def sample(self, shape: torch.Size) -> torch.Tensor: 38 | """ 39 | Sample from the Rademacher distribution. 40 | 41 | This method generates a sample from the Rademacher distribution, where each 42 | sample is either -1 or 1 with equal probability. The shape of the output 43 | is determined by the `shape` parameter. 44 | 45 | Parameters 46 | ---------- 47 | shape : torch.Size 48 | The shape of the sample to be generated. 49 | 50 | Returns 51 | ------- 52 | torch.Tensor 53 | A tensor of samples from the Rademacher distribution, with values -1 or 1. 54 | 55 | Examples 56 | -------- 57 | >>> dist = Rademacher() 58 | >>> sample = dist.sample((3, 4)) 59 | """ 60 | _prob = 0.5 61 | return torch.where((torch.rand(size=shape) < _prob), -1, 1) 62 | 63 | 64 | class GeneralizedNormal(Distribution): 65 | r""" 66 | Generalized normal distribution. 67 | 68 | .. math:: 69 | f(x; \mu, \alpha, \beta) = \frac{\beta}{2 \alpha 70 | \Gamma(1 / \beta)} e^{-(|x-\mu| / \alpha)^\beta} 71 | 72 | where `\mu` is the location parameter, `\alpha` is the scale 73 | parameter, and `\beta` is the shape parameter. 74 | """ 75 | 76 | def sample(self, shape: torch.Size, p: float = 2) -> torch.Tensor: 77 | """ 78 | Sample from the generalized normal distribution. 79 | 80 | This method generates a sample from the generalized normal 81 | distribution, with shape parameter `p`. The shape of the 82 | output is determined by the `shape` parameter. 83 | 84 | Parameters 85 | ---------- 86 | shape : torch.Size 87 | The shape of the sample to be generated. 88 | p : float, optional 89 | The shape parameter of the generalized normal distribution. Default is 2. 90 | 91 | Returns 92 | ------- 93 | torch.Tensor 94 | A tensor of samples from the generalized normal distribution. 95 | 96 | Examples 97 | -------- 98 | >>> dist = GeneralizedNormal() 99 | >>> sample = dist.sample((3, 4)) 100 | """ 101 | g = Gamma(concentration=1 / p, rate=1).sample(sample_shape=shape) 102 | r = Rademacher().sample(shape=shape) 103 | return r * g ** (1 / p) 104 | -------------------------------------------------------------------------------- /src/secmlt/data/lp_uniform_sampling.py: -------------------------------------------------------------------------------- 1 | """Implementation of Lp uniform sampling.""" 2 | 3 | import torch 4 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 5 | from secmlt.data.distributions import GeneralizedNormal 6 | from torch.distributions.exponential import Exponential 7 | 8 | 9 | class LpUniformSampling: 10 | """ 11 | Uniform sampling from the unit Lp ball. 12 | 13 | This class provides a method for sampling uniformly from the 14 | unit Lp ball, where Lp is a norm defined by a parameter `p`. 15 | The class supports sampling from the L0, L2, and Linf norms. 16 | 17 | The sampling method is based on the following reference: 18 | https://arxiv.org/abs/math/0503650 19 | 20 | Attributes 21 | ---------- 22 | p : str 23 | The norm to use for sampling. Must be one of 'l0', 'l1', 'l2', 'linf'. 24 | """ 25 | 26 | def __init__(self, p: str = LpPerturbationModels.L2) -> None: 27 | """ 28 | Initialize the LpUniformSampling object. 29 | 30 | Parameters 31 | ---------- 32 | p : str, optional 33 | The norm to use for sampling. Must be one 34 | of 'L0', 'L2', or 'Linf'. Default is 'L2'. 35 | """ 36 | self.p = p 37 | 38 | def sample_like(self, x: torch.Tensor) -> torch.Tensor: 39 | """ 40 | Sample from the unit Lp ball with the same shape as a given tensor. 41 | 42 | Parameters 43 | ---------- 44 | x : torch.Tensor 45 | The input tensor whose shape is used to determine the shape of the samples. 46 | 47 | Returns 48 | ------- 49 | torch.Tensor 50 | A tensor of samples from the unit Lp ball, with the 51 | same shape as the input tensor `x`. 52 | """ 53 | num_samples, dim = x.flatten(1).shape 54 | return self.sample(num_samples, dim).reshape(x.shape) 55 | 56 | def sample(self, num_samples: int = 1, dim: int = 2) -> torch.Tensor: 57 | """ 58 | Sample uniformly from the unit Lp ball. 59 | 60 | This method generates a specified number of samples 61 | from the unit Lp ball, where Lp is a norm defined by the `p` parameter. 62 | The samples are generated using the algorithm 63 | described in the class documentation. 64 | 65 | Parameters 66 | ---------- 67 | num_samples : int 68 | The number of samples to generate. 69 | dim : int 70 | The dimension of the samples. 71 | 72 | Returns 73 | ------- 74 | torch.Tensor 75 | A tensor of samples from the unit Lp ball, with shape `(num_samples, dim)`. 76 | """ 77 | shape = torch.Size((num_samples, dim)) 78 | _p = LpPerturbationModels.get_p(self.p) 79 | 80 | if self.p == LpPerturbationModels.LINF: 81 | ball = 2 * torch.rand(size=shape) - 1 82 | elif self.p == LpPerturbationModels.L0: 83 | ball = torch.rand(size=shape).sign() 84 | else: 85 | g = GeneralizedNormal().sample(shape) 86 | e = Exponential(rate=1).sample(sample_shape=(num_samples,)) 87 | d = ((torch.abs(g) ** _p).sum(-1) + e) ** (1 / _p) 88 | ball = g / d.unsqueeze(-1) 89 | 90 | return ball 91 | -------------------------------------------------------------------------------- /src/secmlt/manipulations/__init__.py: -------------------------------------------------------------------------------- 1 | """Functionalities for applying manipulations to input data.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/manipulations/manipulation.py: -------------------------------------------------------------------------------- 1 | """Manipulations for perturbing input samples.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch 6 | from secmlt.optimization.constraints import Constraint 7 | 8 | 9 | class Manipulation(ABC): 10 | """Abstract class for manipulations.""" 11 | 12 | def __init__( 13 | self, 14 | domain_constraints: list[Constraint], 15 | perturbation_constraints: list[Constraint], 16 | ) -> None: 17 | """ 18 | Create manipulation object. 19 | 20 | Parameters 21 | ---------- 22 | domain_constraints : list[Constraint] 23 | Constraints for the domain bounds (x_adv). 24 | perturbation_constraints : list[Constraint] 25 | Constraints for the perturbation (delta). 26 | """ 27 | self._domain_constraints = domain_constraints 28 | self._perturbation_constraints = perturbation_constraints 29 | 30 | @property 31 | def domain_constraints(self) -> list[Constraint]: 32 | """ 33 | Get the domain constraints for the manipulation. 34 | 35 | Returns 36 | ------- 37 | list[Constraint] 38 | List of domain constraints for the manipulation. 39 | """ 40 | return self._domain_constraints 41 | 42 | @domain_constraints.setter 43 | def domain_constraints(self, domain_constraints: list[Constraint]) -> None: 44 | self._domain_constraints = domain_constraints 45 | 46 | @property 47 | def perturbation_constraints(self) -> list[Constraint]: 48 | """ 49 | Get the perturbation constraints for the manipulation. 50 | 51 | Returns 52 | ------- 53 | list[Constraint] 54 | List of perturbation constraints for the manipulation. 55 | """ 56 | return self._perturbation_constraints 57 | 58 | @perturbation_constraints.setter 59 | def perturbation_constraints( 60 | self, perturbation_constraints: list[Constraint] 61 | ) -> None: 62 | self._perturbation_constraints = perturbation_constraints 63 | 64 | def _apply_domain_constraints(self, x: torch.Tensor) -> torch.Tensor: 65 | for constraint in self.domain_constraints: 66 | x = constraint(x) 67 | return x 68 | 69 | def _apply_perturbation_constraints(self, delta: torch.Tensor) -> torch.Tensor: 70 | for constraint in self.perturbation_constraints: 71 | delta = constraint(delta) 72 | return delta 73 | 74 | @abstractmethod 75 | def _apply_manipulation( 76 | self, 77 | x: torch.Tensor, 78 | delta: torch.Tensor, 79 | ) -> torch.Tensor: 80 | """ 81 | Apply the manipulation. 82 | 83 | Parameters 84 | ---------- 85 | x : torch.Tensor 86 | Input samples. 87 | delta : torch.Tensor 88 | Manipulation to apply. 89 | 90 | Returns 91 | ------- 92 | torch.Tensor 93 | Perturbed samples. 94 | """ 95 | ... 96 | 97 | def __call__( 98 | self, 99 | x: torch.Tensor, 100 | delta: torch.Tensor, 101 | ) -> tuple[torch.Tensor, torch.Tensor]: 102 | """ 103 | Apply the manipulation to the input data. 104 | 105 | Parameters 106 | ---------- 107 | x : torch.Tensor 108 | Input data. 109 | delta : torch.Tensor 110 | Perturbation to apply. 111 | 112 | Returns 113 | ------- 114 | tuple[torch.Tensor, torch.Tensor] 115 | Perturbed data and perturbation after the 116 | application of constraints. 117 | """ 118 | delta.data = self._apply_perturbation_constraints(delta.data) 119 | x_adv, delta = self._apply_manipulation(x, delta) 120 | x_adv.data = self._apply_domain_constraints(x_adv.data) 121 | return x_adv, delta 122 | 123 | 124 | class AdditiveManipulation(Manipulation): 125 | """Additive manipulation for input data.""" 126 | 127 | def _apply_manipulation( 128 | self, 129 | x: torch.Tensor, 130 | delta: torch.Tensor, 131 | ) -> tuple[torch.Tensor, torch.Tensor]: 132 | return x + delta, delta 133 | -------------------------------------------------------------------------------- /src/secmlt/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | """Metrics to evaluate machine learning models and attacks.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/metrics/classification.py: -------------------------------------------------------------------------------- 1 | """Classification metrics for machine-learning models and for attack performance.""" 2 | 3 | from typing import Union 4 | 5 | import torch 6 | from secmlt.models.base_model import BaseModel 7 | from torch.utils.data import DataLoader 8 | 9 | 10 | def accuracy(y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor: 11 | """ 12 | Compute the accuracy on a batch of predictions and targets. 13 | 14 | Parameters 15 | ---------- 16 | y_pred : torch.Tensor 17 | Predictions from the model. 18 | y_true : torch.Tensor 19 | Target labels. 20 | 21 | Returns 22 | ------- 23 | torch.Tensor 24 | The percentage of predictions that match the targets. 25 | """ 26 | return (y_pred.type(y_true.dtype) == y_true).mean() 27 | 28 | 29 | class Accuracy: 30 | """Class for computing accuracy of a model on a dataset.""" 31 | 32 | def __init__(self) -> None: 33 | """Create Accuracy metric.""" 34 | self._num_samples = 0 35 | self._accumulated_accuracy = 0.0 36 | 37 | def __call__(self, model: BaseModel, dataloader: DataLoader) -> torch.Tensor: 38 | """ 39 | Compute the metric on a single attack run or a dataloader. 40 | 41 | Parameters 42 | ---------- 43 | model : BaseModel 44 | Model to use for prediction. 45 | dataloader : DataLoader 46 | A dataloader, can be the result of an attack or a generic 47 | test dataloader. 48 | 49 | Returns 50 | ------- 51 | torch.Tensor 52 | The metric computed on the given dataloader. 53 | """ 54 | for _, (x, y) in enumerate(dataloader): 55 | y_pred = model.predict(x).cpu().detach() 56 | self._accumulate(y_pred, y) 57 | return self._compute() 58 | 59 | def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None: 60 | self._num_samples += y_true.shape[0] 61 | self._accumulated_accuracy += torch.sum( 62 | y_pred.type(y_true.dtype).cpu() == y_true.cpu(), 63 | ) 64 | 65 | def _compute(self) -> torch.Tensor: 66 | return self._accumulated_accuracy / self._num_samples 67 | 68 | 69 | class AttackSuccessRate(Accuracy): 70 | """Single attack success rate from attack results.""" 71 | 72 | def __init__(self, y_target: Union[float, torch.Tensor, None] = None) -> None: 73 | """ 74 | Create attack success rate metric. 75 | 76 | Parameters 77 | ---------- 78 | y_target : float | torch.Tensor | None, optional 79 | Target label for the attack, None for untargeted, by default None 80 | """ 81 | super().__init__() 82 | self.y_target = y_target 83 | 84 | def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None: 85 | if self.y_target is None: 86 | super()._accumulate(y_pred, y_true) 87 | else: 88 | super()._accumulate(y_pred, torch.ones_like(y_true) * self.y_target) 89 | 90 | def _compute(self) -> torch.Tensor: 91 | if self.y_target is None: 92 | return 1 - super()._compute() 93 | return super()._compute() 94 | 95 | 96 | class AccuracyEnsemble(Accuracy): 97 | """Robust accuracy of a model on multiple attack runs.""" 98 | 99 | def __call__(self, model: BaseModel, dataloaders: list[DataLoader]) -> torch.Tensor: 100 | """ 101 | Compute the metric on an ensemble of attacks from their results. 102 | 103 | Parameters 104 | ---------- 105 | model : BaseModel 106 | Model to use for prediction. 107 | dataloaders : list[DataLoader] 108 | List of loaders returned from multiple attack runs. 109 | 110 | Returns 111 | ------- 112 | torch.Tensor 113 | The metric computed across multiple attack runs. 114 | """ 115 | for advs in zip(*dataloaders, strict=False): 116 | y_pred = [] 117 | for x, y in advs: 118 | y_pred.append(model.predict(x).cpu().detach()) 119 | # verify that the samples order correspond 120 | assert (y - advs[0][1]).sum() == 0 121 | y_pred = torch.vstack(y_pred) 122 | self._accumulate(y_pred, advs[0][1]) 123 | return self._compute() 124 | 125 | def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None: 126 | self._num_samples += y_true.shape[0] 127 | self._accumulated_accuracy += torch.sum( 128 | # take worst over predictions 129 | (y_pred.type(y_true.dtype).cpu() == y_true.cpu()).min(dim=0).values, 130 | ) 131 | 132 | 133 | class EnsembleSuccessRate(AccuracyEnsemble): 134 | """Worst-case success rate of multiple attack runs.""" 135 | 136 | def __init__(self, y_target: Union[float, torch.Tensor, None] = None) -> None: 137 | """ 138 | Create ensemble success rate metric. 139 | 140 | Parameters 141 | ---------- 142 | y_target : float | torch.Tensor | None, optional 143 | Target label for the attack, None for untargeted,, by default None 144 | """ 145 | super().__init__() 146 | self.y_target = y_target 147 | 148 | def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None: 149 | if self.y_target is None: 150 | super()._accumulate(y_pred, y_true) 151 | else: 152 | self._num_samples += y_true.shape[0] 153 | self._accumulated_accuracy += torch.sum( 154 | # take worst over predictions 155 | ( 156 | y_pred.type(y_true.dtype).cpu() 157 | == (torch.ones_like(y_true) * self.y_target).cpu() 158 | ) 159 | .max(dim=0) 160 | .values, 161 | ) 162 | 163 | def _compute(self) -> torch.Tensor: 164 | if self.y_target is None: 165 | return 1 - super()._compute() 166 | return super()._compute() 167 | -------------------------------------------------------------------------------- /src/secmlt/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Machine learning models and wrappers.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/models/base_model.py: -------------------------------------------------------------------------------- 1 | """Basic wrapper for generic model.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch 6 | from secmlt.models.data_processing.data_processing import DataProcessing 7 | from secmlt.models.data_processing.identity_data_processing import ( 8 | IdentityDataProcessing, 9 | ) 10 | from torch.utils.data import DataLoader 11 | 12 | 13 | class BaseModel(ABC): 14 | """Basic model wrapper.""" 15 | 16 | def __init__( 17 | self, 18 | preprocessing: DataProcessing = None, 19 | postprocessing: DataProcessing = None, 20 | ) -> None: 21 | """ 22 | Create base model. 23 | 24 | Parameters 25 | ---------- 26 | preprocessing : DataProcessing, optional 27 | Preprocessing to apply before the forward, by default None. 28 | postprocessing : DataProcessing, optional 29 | Postprocessing to apply after the forward, by default None. 30 | """ 31 | self._preprocessing = ( 32 | preprocessing if preprocessing is not None else IdentityDataProcessing() 33 | ) 34 | self._postprocessing = ( 35 | postprocessing if postprocessing is not None else IdentityDataProcessing() 36 | ) 37 | 38 | @abstractmethod 39 | def predict(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 40 | """ 41 | Return output predictions for given model. 42 | 43 | Parameters 44 | ---------- 45 | x : torch.Tensor 46 | Input samples. 47 | 48 | Returns 49 | ------- 50 | torch.Tensor 51 | Predictions from the model. 52 | """ 53 | ... 54 | 55 | def decision_function(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 56 | """ 57 | Return the decision function from the model. 58 | 59 | Requires override to specify custom args and kwargs passing. 60 | 61 | Parameters 62 | ---------- 63 | x : torch.Tensor 64 | Input damples. 65 | 66 | Returns 67 | ------- 68 | torch.Tensor 69 | Model output scores. 70 | """ 71 | x = self._preprocessing(x) 72 | x = self._decision_function(x) 73 | return self._postprocessing(x) 74 | 75 | @abstractmethod 76 | def _decision_function(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 77 | """ 78 | Specific decision function of the model (data already preprocessed). 79 | 80 | Parameters 81 | ---------- 82 | x : torch.Tensor 83 | Preprocessed input samples. 84 | 85 | Returns 86 | ------- 87 | torch.Tensor 88 | Model output scores. 89 | """ 90 | ... 91 | 92 | @abstractmethod 93 | def gradient(self, x: torch.Tensor, y: int, *args, **kwargs) -> torch.Tensor: 94 | """ 95 | Compute gradients of the score y w.r.t. x. 96 | 97 | Parameters 98 | ---------- 99 | x : torch.Tensor 100 | Input samples. 101 | y : int 102 | Target score. 103 | 104 | Returns 105 | ------- 106 | torch.Tensor 107 | Input gradients of the target score y. 108 | """ 109 | ... 110 | 111 | @abstractmethod 112 | def train(self, dataloader: DataLoader) -> "BaseModel": 113 | """ 114 | Train the model with the given dataloader. 115 | 116 | Parameters 117 | ---------- 118 | dataloader : DataLoader 119 | Train data loader. 120 | """ 121 | ... 122 | 123 | def __call__(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 124 | """ 125 | Forward function of the model. 126 | 127 | Parameters 128 | ---------- 129 | x : torch.Tensor 130 | Input samples. 131 | 132 | Returns 133 | ------- 134 | torch.Tensor 135 | Model ouptut scores. 136 | """ 137 | return self.decision_function(x, *args, **kwargs) 138 | -------------------------------------------------------------------------------- /src/secmlt/models/base_trainer.py: -------------------------------------------------------------------------------- 1 | """Model trainers.""" 2 | 3 | from abc import ABCMeta, abstractmethod 4 | 5 | from secmlt.models.base_model import BaseModel 6 | from torch.utils.data import DataLoader 7 | 8 | 9 | class BaseTrainer(metaclass=ABCMeta): 10 | """Abstract class for model trainers.""" 11 | 12 | @abstractmethod 13 | def train(self, model: BaseModel, dataloader: DataLoader) -> BaseModel: 14 | """ 15 | Train a model with the given dataloader. 16 | 17 | Parameters 18 | ---------- 19 | model : BaseModel 20 | Model to train. 21 | dataloader : DataLoader 22 | Training dataloader. 23 | 24 | Returns 25 | ------- 26 | BaseModel 27 | The trained model. 28 | """ 29 | ... 30 | -------------------------------------------------------------------------------- /src/secmlt/models/data_processing/__init__.py: -------------------------------------------------------------------------------- 1 | """Functionalities for data transformations.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/models/data_processing/data_processing.py: -------------------------------------------------------------------------------- 1 | """Interface for the data processing functionalities.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch 6 | 7 | 8 | class DataProcessing(ABC): 9 | """Abstract data processing class.""" 10 | 11 | @abstractmethod 12 | def _process(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: ... 13 | 14 | @abstractmethod 15 | def invert(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 16 | """ 17 | Apply the inverted transform (if defined). 18 | 19 | Parameters 20 | ---------- 21 | x : torch.Tensor 22 | Input samples. 23 | 24 | Returns 25 | ------- 26 | torch.Tensor 27 | The samples in the input space before the transformation. 28 | """ 29 | ... 30 | 31 | def __call__(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 32 | """ 33 | Apply the forward transformation. 34 | 35 | Parameters 36 | ---------- 37 | x : torch.Tensor 38 | Input samples. 39 | 40 | Returns 41 | ------- 42 | torch.Tensor 43 | The samples after transformation. 44 | """ 45 | return self._process(x, *args, **kwargs) 46 | -------------------------------------------------------------------------------- /src/secmlt/models/data_processing/identity_data_processing.py: -------------------------------------------------------------------------------- 1 | """Identity data processing, returns the samples as they are.""" 2 | 3 | import torch 4 | from secmlt.models.data_processing.data_processing import DataProcessing 5 | 6 | 7 | class IdentityDataProcessing(DataProcessing): 8 | """Identity transformation.""" 9 | 10 | def _process(self, x: torch.Tensor) -> torch.Tensor: 11 | """ 12 | Identity transformation. Returns the samples unchanged. 13 | 14 | Parameters 15 | ---------- 16 | x : torch.Tensor 17 | Input samples. 18 | 19 | Returns 20 | ------- 21 | torch.Tensor 22 | Unchanged samples. 23 | """ 24 | return x 25 | 26 | def invert(self, x: torch.Tensor) -> torch.Tensor: 27 | """ 28 | Return the sample as it is. 29 | 30 | Parameters 31 | ---------- 32 | x : torch.Tensor 33 | Input samples. 34 | 35 | Returns 36 | ------- 37 | torch.Tensor 38 | Unchanged samples for identity inverse transformation. 39 | """ 40 | return x 41 | -------------------------------------------------------------------------------- /src/secmlt/models/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | """PyTorch model wrappers.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/models/pytorch/base_pytorch_nn.py: -------------------------------------------------------------------------------- 1 | """Wrappers for PyTorch models.""" 2 | 3 | import torch 4 | from secmlt.models.base_model import BaseModel 5 | from secmlt.models.data_processing.data_processing import DataProcessing 6 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 7 | from torch.utils.data import DataLoader 8 | 9 | 10 | class BasePytorchClassifier(BaseModel): 11 | """Wrapper for PyTorch classifier.""" 12 | 13 | def __init__( 14 | self, 15 | model: torch.nn.Module, 16 | preprocessing: DataProcessing = None, 17 | postprocessing: DataProcessing = None, 18 | trainer: BasePyTorchTrainer = None, 19 | ) -> None: 20 | """ 21 | Create wrapped PyTorch classifier. 22 | 23 | Parameters 24 | ---------- 25 | model : torch.nn.Module 26 | PyTorch model. 27 | preprocessing : DataProcessing, optional 28 | Preprocessing to apply before the forward., by default None. 29 | postprocessing : DataProcessing, optional 30 | Postprocessing to apply after the forward, by default None. 31 | trainer : BasePyTorchTrainer, optional 32 | Trainer object to train the model, by default None. 33 | """ 34 | super().__init__(preprocessing=preprocessing, postprocessing=postprocessing) 35 | self._model: torch.nn.Module = model 36 | self._trainer = trainer 37 | 38 | @property 39 | def model(self) -> torch.nn.Module: 40 | """ 41 | Get the wrapped instance of PyTorch model. 42 | 43 | Returns 44 | ------- 45 | torch.nn.Module 46 | Wrapped PyTorch model. 47 | """ 48 | return self._model 49 | 50 | def _get_device(self) -> torch.device: 51 | return next(self._model.parameters()).device 52 | 53 | def predict(self, x: torch.Tensor) -> torch.Tensor: 54 | """ 55 | Return the predicted class for the given samples. 56 | 57 | Parameters 58 | ---------- 59 | x : torch.Tensor 60 | Input samples. 61 | 62 | Returns 63 | ------- 64 | torch.Tensor 65 | Predicted class for the samples. 66 | """ 67 | scores = self.decision_function(x) 68 | return torch.argmax(scores, dim=-1) 69 | 70 | def _decision_function(self, x: torch.Tensor) -> torch.Tensor: 71 | """ 72 | Compute decision function of the model. 73 | 74 | Parameters 75 | ---------- 76 | x : torch.Tensor 77 | Input samples. 78 | 79 | Returns 80 | ------- 81 | torch.Tensor 82 | Output scores from the model. 83 | """ 84 | x = x.to(device=self._get_device()) 85 | return self._model(x) 86 | 87 | def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor: 88 | """ 89 | Compute batch gradients of class y w.r.t. x. 90 | 91 | Parameters 92 | ---------- 93 | x : torch.Tensor 94 | Input samples. 95 | y : int 96 | Class label. 97 | 98 | Returns 99 | ------- 100 | torch.Tensor 101 | Gradient of class y w.r.t. input x. 102 | """ 103 | x = x.clone().requires_grad_() 104 | if x.grad is not None: 105 | x.grad.zero_() 106 | output = self.decision_function(x) 107 | output = output[:, y].sum() 108 | output.backward() 109 | return x.grad 110 | 111 | def train(self, dataloader: DataLoader) -> torch.nn.Module: 112 | """ 113 | Train the model with given dataloader, if trainer is set. 114 | 115 | Parameters 116 | ---------- 117 | dataloader : DataLoader 118 | Training PyTorch dataloader to use for training. 119 | 120 | Returns 121 | ------- 122 | torch.nn.Module 123 | Trained PyTorch model. 124 | 125 | Raises 126 | ------ 127 | ValueError 128 | Raises ValueError if the trainer is not set. 129 | """ 130 | if self._trainer is None: 131 | msg = "Cannot train without a trainer." 132 | raise ValueError(msg) 133 | return self._trainer.train(self._model, dataloader) 134 | -------------------------------------------------------------------------------- /src/secmlt/models/pytorch/base_pytorch_trainer.py: -------------------------------------------------------------------------------- 1 | """PyTorch model trainers.""" 2 | 3 | import torch.nn 4 | from secmlt.models.base_trainer import BaseTrainer 5 | from torch.optim.lr_scheduler import _LRScheduler 6 | from torch.utils.data import DataLoader 7 | 8 | 9 | class BasePyTorchTrainer(BaseTrainer): 10 | """Trainer for PyTorch models.""" 11 | 12 | def __init__( 13 | self, 14 | optimizer: torch.optim.Optimizer, 15 | epochs: int = 5, 16 | loss: torch.nn.Module = None, 17 | scheduler: _LRScheduler = None, 18 | ) -> None: 19 | """ 20 | Create PyTorch trainer. 21 | 22 | Parameters 23 | ---------- 24 | optimizer : torch.optim.Optimizer 25 | Optimizer to use for training the model. 26 | epochs : int, optional 27 | Number of epochs, by default 5. 28 | loss : torch.nn.Module, optional 29 | Loss to minimize, by default None. 30 | scheduler : _LRScheduler, optional 31 | Scheduler for the optimizer, by default None. 32 | """ 33 | self._epochs = epochs 34 | self._optimizer = optimizer 35 | self._loss = loss if loss is not None else torch.nn.CrossEntropyLoss() 36 | self._scheduler = scheduler 37 | 38 | def train(self, model: torch.nn.Module, dataloader: DataLoader) -> torch.nn.Module: 39 | """ 40 | Train model with given loader. 41 | 42 | Parameters 43 | ---------- 44 | model : torch.nn.Module 45 | Pytorch model to be trained. 46 | dataloader : DataLoader 47 | Train data loader. 48 | 49 | Returns 50 | ------- 51 | torch.nn.Module 52 | Trained model. 53 | """ 54 | device = next(model.parameters()).device 55 | model = model.train() 56 | for _ in range(self._epochs): 57 | for _, (x, y) in enumerate(dataloader): 58 | x, y = x.to(device), y.to(device) 59 | self._optimizer.zero_grad() 60 | outputs = model(x) 61 | loss = self._loss(outputs, y) 62 | loss.backward() 63 | self._optimizer.step() 64 | if self._scheduler is not None: 65 | self._scheduler.step() 66 | return model 67 | -------------------------------------------------------------------------------- /src/secmlt/optimization/__init__.py: -------------------------------------------------------------------------------- 1 | """Optimization functionalities.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/optimization/gradient_processing.py: -------------------------------------------------------------------------------- 1 | """Processing functions for gradients.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch.linalg 6 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 7 | from torch.nn.functional import normalize 8 | 9 | 10 | def lin_proj_l1(x: torch.Tensor) -> torch.Tensor: 11 | """Return the linear projection of x onto an L1 unit ball. 12 | 13 | Parameters 14 | ---------- 15 | x : torch.Tensor 16 | Input tensor to project. 17 | 18 | Returns 19 | ------- 20 | torch.Tensor 21 | Linear projection of x onto unit L1 ball. 22 | """ 23 | w = abs(x) 24 | num_max = (w == w.max()).sum() 25 | w = torch.where(w == w.max(), 1 / num_max, 0) 26 | return w * x.sign() 27 | 28 | 29 | class GradientProcessing(ABC): 30 | """Gradient processing base class.""" 31 | 32 | @abstractmethod 33 | def __call__(self, grad: torch.Tensor) -> torch.Tensor: 34 | """ 35 | Process the gradient with the given transformation. 36 | 37 | Parameters 38 | ---------- 39 | grad : torch.Tensor 40 | Input gradients. 41 | 42 | Returns 43 | ------- 44 | torch.Tensor 45 | The processed gradients. 46 | """ 47 | ... 48 | 49 | 50 | class LinearProjectionGradientProcessing(GradientProcessing): 51 | """Linear projection of the gradient onto Lp balls.""" 52 | 53 | def __init__(self, perturbation_model: str = LpPerturbationModels.L2) -> None: 54 | """ 55 | Create linear projection for the gradient. 56 | 57 | Parameters 58 | ---------- 59 | perturbation_model : str, optional 60 | Perturbation model for the Lp ball, by default LpPerturbationModels.L2. 61 | 62 | Raises 63 | ------ 64 | ValueError 65 | Raises ValueError if the perturbation model is not implemented. 66 | Available, l1, l2, linf. 67 | """ 68 | perturbations_models = { 69 | LpPerturbationModels.L1: 1, 70 | LpPerturbationModels.L2: 2, 71 | LpPerturbationModels.LINF: float("inf"), 72 | } 73 | if perturbation_model not in perturbations_models: 74 | msg = f"{perturbation_model} not available. \ 75 | Use one of: {perturbations_models.values()}" 76 | raise ValueError(msg) 77 | self.p = perturbations_models[perturbation_model] 78 | 79 | def __call__(self, grad: torch.Tensor) -> torch.Tensor: 80 | """ 81 | Process gradient with linear projection onto the Lp ball. 82 | 83 | Sets the direction by maximizing the scalar product with the 84 | gradient over the Lp ball. 85 | 86 | Parameters 87 | ---------- 88 | grad : torch.Tensor 89 | Input gradients. 90 | 91 | Returns 92 | ------- 93 | torch.Tensor 94 | The gradient linearly projected onto the Lp ball. 95 | 96 | Raises 97 | ------ 98 | NotImplementedError 99 | Raises NotImplementedError if the norm is not in 2, inf. 100 | """ 101 | original_shape = grad.data.shape 102 | if self.p == LpPerturbationModels.get_p(LpPerturbationModels.L2): 103 | return normalize(grad.data.flatten(start_dim=1), p=self.p, dim=1).view( 104 | original_shape 105 | ) 106 | if self.p == LpPerturbationModels.get_p(LpPerturbationModels.L1): 107 | return lin_proj_l1(grad.data.flatten(start_dim=1)).view(original_shape) 108 | if self.p == LpPerturbationModels.get_p(LpPerturbationModels.LINF): 109 | return torch.sign(grad) 110 | msg = "Only L2 and LInf norms implemented now" 111 | raise NotImplementedError(msg) 112 | -------------------------------------------------------------------------------- /src/secmlt/optimization/initializer.py: -------------------------------------------------------------------------------- 1 | """Initializers for the attacks.""" 2 | 3 | import torch 4 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 5 | from secmlt.optimization.random_perturb import RandomPerturb 6 | 7 | 8 | class Initializer: 9 | """Initialization for the perturbation delta.""" 10 | 11 | def __call__(self, x: torch.Tensor) -> torch.Tensor: 12 | """ 13 | Get initialization for the perturbation. 14 | 15 | Parameters 16 | ---------- 17 | x : torch.Tensor 18 | Input samples. 19 | 20 | Returns 21 | ------- 22 | torch.Tensor 23 | Initialized perturbation. 24 | """ 25 | return torch.zeros_like(x) 26 | 27 | 28 | class RandomLpInitializer(Initializer): 29 | """Random perturbation initialization in Lp ball.""" 30 | 31 | def __init__( 32 | self, 33 | radius: torch.Tensor, 34 | perturbation_model: LpPerturbationModels, 35 | ) -> None: 36 | """ 37 | Create random perturbation initializer. 38 | 39 | Parameters 40 | ---------- 41 | radius : torch.Tensor 42 | Radius of the Lp ball for the constraint. 43 | perturbation_model : LpPerturbationModels 44 | Perturbation model for the constraint. 45 | """ 46 | self.radius = radius 47 | self.perturbation_model = perturbation_model 48 | self.initializer = RandomPerturb(p=self.perturbation_model, epsilon=self.radius) 49 | 50 | def __call__(self, x: torch.Tensor) -> torch.Tensor: 51 | """ 52 | Get random perturbations. 53 | 54 | Parameters 55 | ---------- 56 | x : torch.Tensor 57 | Input samples. 58 | 59 | Returns 60 | ------- 61 | torch.Tensor 62 | Initialized random perturbations. 63 | """ 64 | return self.initializer(x) 65 | -------------------------------------------------------------------------------- /src/secmlt/optimization/optimizer_factory.py: -------------------------------------------------------------------------------- 1 | """Optimizer creation tools.""" 2 | 3 | import functools 4 | from typing import ClassVar 5 | 6 | import torch 7 | from torch.optim import SGD, Adam 8 | 9 | ADAM = "adam" 10 | StochasticGD = "sgd" 11 | 12 | 13 | class OptimizerFactory: 14 | """Creator class for optimizers.""" 15 | 16 | OPTIMIZERS: ClassVar[dict[str, torch.optim.Optimizer]] = { 17 | ADAM: Adam, 18 | StochasticGD: SGD, 19 | } 20 | 21 | @staticmethod 22 | def create_from_name( 23 | optimizer_name: str, 24 | lr: float, 25 | **kwargs, 26 | ) -> functools.partial[Adam] | functools.partial[SGD]: 27 | """ 28 | Create an optimizer. 29 | 30 | Parameters 31 | ---------- 32 | optimizer_name : str 33 | One of the available optimizer names. Available: `adam`, `sgd`. 34 | lr : float 35 | Learning rate. 36 | 37 | Returns 38 | ------- 39 | functools.partial[Adam] | functools.partial[SGD] 40 | The created optimizer. 41 | 42 | Raises 43 | ------ 44 | ValueError 45 | Raises ValueError when the requested optimizer is not in the list 46 | of implemented optimizers. 47 | """ 48 | if optimizer_name == ADAM: 49 | return OptimizerFactory.create_adam(lr) 50 | if optimizer_name == StochasticGD: 51 | return OptimizerFactory.create_sgd(lr) 52 | msg = f"Optimizer not found. Use one of: \ 53 | {list(OptimizerFactory.OPTIMIZERS.keys())}" 54 | raise ValueError(msg) 55 | 56 | @staticmethod 57 | def create_adam(lr: float) -> functools.partial[Adam]: 58 | """ 59 | Create the Adam optimizer. 60 | 61 | Parameters 62 | ---------- 63 | lr : float 64 | Learning rate. 65 | 66 | Returns 67 | ------- 68 | functools.partial[Adam] 69 | Adam optimizer. 70 | """ 71 | return functools.partial(Adam, lr=lr) 72 | 73 | @staticmethod 74 | def create_sgd(lr: float) -> functools.partial[SGD]: 75 | """ 76 | Create the SGD optimizer. 77 | 78 | Parameters 79 | ---------- 80 | lr : float 81 | Learning rate. 82 | 83 | Returns 84 | ------- 85 | functools.partial[SGD] 86 | SGD optimizer. 87 | """ 88 | return functools.partial(SGD, lr=lr) 89 | -------------------------------------------------------------------------------- /src/secmlt/optimization/random_perturb.py: -------------------------------------------------------------------------------- 1 | """Random pertubations in Lp balls.""" 2 | 3 | from abc import ABC, abstractmethod 4 | 5 | import torch 6 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 7 | from secmlt.data.lp_uniform_sampling import LpUniformSampling 8 | from secmlt.optimization.constraints import ( 9 | L0Constraint, 10 | L1Constraint, 11 | L2Constraint, 12 | LInfConstraint, 13 | LpConstraint, 14 | ) 15 | 16 | 17 | class RandomPerturbBase(ABC): 18 | """Class implementing the random perturbations in Lp balls.""" 19 | 20 | def __init__(self, epsilon: float) -> None: 21 | """ 22 | Create random perturbation object. 23 | 24 | Parameters 25 | ---------- 26 | epsilon : float 27 | Constraint radius. 28 | """ 29 | self.epsilon = epsilon 30 | 31 | def __call__(self, x: torch.Tensor) -> torch.Tensor: 32 | """ 33 | Get the perturbations for the given samples. 34 | 35 | Parameters 36 | ---------- 37 | x : torch.Tensor 38 | Input samples to perturb. 39 | 40 | Returns 41 | ------- 42 | torch.Tensor 43 | Perturbations (to apply) to the given samples. 44 | """ 45 | perturbations = self.get_perturb(x) 46 | return self._constraint( 47 | radius=self.epsilon, 48 | center=torch.zeros_like(perturbations), 49 | ).project(perturbations) 50 | 51 | @abstractmethod 52 | def get_perturb(self, x: torch.Tensor) -> torch.Tensor: 53 | """ 54 | Generate random perturbation for the Lp norm. 55 | 56 | Parameters 57 | ---------- 58 | x : torch.Tensor 59 | Input samples to perturb. 60 | """ 61 | ... 62 | 63 | @abstractmethod 64 | def _constraint(self) -> LpConstraint: ... 65 | 66 | 67 | class RandomPerturbLinf(RandomPerturbBase): 68 | """Random Perturbations for Linf norm.""" 69 | 70 | def get_perturb(self, x: torch.Tensor) -> torch.Tensor: 71 | """ 72 | Generate random perturbation for the Linf norm. 73 | 74 | Parameters 75 | ---------- 76 | x : torch.Tensor 77 | Input samples to perturb. 78 | 79 | Returns 80 | ------- 81 | torch.Tensor 82 | Perturbed samples. 83 | """ 84 | x_random = LpUniformSampling(p=LpPerturbationModels.LINF).sample_like(x) 85 | return x + (x_random * self.epsilon) 86 | 87 | @property 88 | def _constraint(self) -> type[LInfConstraint]: 89 | return LInfConstraint 90 | 91 | 92 | class RandomPerturbL1(RandomPerturbBase): 93 | """Random Perturbations for L1 norm.""" 94 | 95 | def get_perturb(self, x: torch.Tensor) -> torch.Tensor: 96 | """ 97 | Generate random perturbation for the L1 norm. 98 | 99 | Parameters 100 | ---------- 101 | x : torch.Tensor 102 | Input samples to perturb. 103 | 104 | Returns 105 | ------- 106 | torch.Tensor 107 | Perturbed samples. 108 | """ 109 | x_random = LpUniformSampling(p=LpPerturbationModels.L1).sample_like(x) 110 | return x + (x_random * self.epsilon) 111 | 112 | @property 113 | def _constraint(self) -> type[L1Constraint]: 114 | return L1Constraint 115 | 116 | 117 | class RandomPerturbL2(RandomPerturbBase): 118 | """Random Perturbations for L2 norm.""" 119 | 120 | def get_perturb(self, x: torch.Tensor) -> torch.Tensor: 121 | """ 122 | Generate random perturbation for the L2 norm. 123 | 124 | Parameters 125 | ---------- 126 | x : torch.Tensor 127 | Input samples to perturb. 128 | 129 | Returns 130 | ------- 131 | torch.Tensor 132 | Perturbed samples. 133 | """ 134 | x_random = LpUniformSampling(p=LpPerturbationModels.L2).sample_like(x) 135 | return x + (x_random * self.epsilon) 136 | 137 | @property 138 | def _constraint(self) -> type[L2Constraint]: 139 | return L2Constraint 140 | 141 | 142 | class RandomPerturbL0(RandomPerturbBase): 143 | """Random Perturbations for L0 norm.""" 144 | 145 | def get_perturb(self, x: torch.Tensor) -> torch.Tensor: 146 | """ 147 | Generate random perturbation for the L0 norm. 148 | 149 | Parameters 150 | ---------- 151 | x : torch.Tensor 152 | Input samples to perturb. 153 | 154 | Returns 155 | ------- 156 | torch.Tensor 157 | Perturbed samples. 158 | """ 159 | x_random = LpUniformSampling(p=LpPerturbationModels.L0).sample_like(x) 160 | return x + (x_random * self.epsilon) 161 | 162 | @property 163 | def _constraint(self) -> type[L0Constraint]: 164 | return L0Constraint 165 | 166 | 167 | class RandomPerturb: 168 | """Random perturbation creator.""" 169 | 170 | def __new__(cls, p: str, epsilon: float) -> RandomPerturbBase: 171 | """ 172 | Creator for random perturbation in Lp norms. 173 | 174 | Parameters 175 | ---------- 176 | p : str 177 | p-norm used for the random perturbation shape. 178 | epsilon : float 179 | Radius of the random perturbation constraint. 180 | 181 | Returns 182 | ------- 183 | RandomPerturbBase 184 | Random perturbation object. 185 | 186 | Raises 187 | ------ 188 | ValueError 189 | Raises ValueError if the norm is not in 0, 1, 2, inf. 190 | """ 191 | random_inits = { 192 | LpPerturbationModels.L0: RandomPerturbL0, 193 | LpPerturbationModels.L1: RandomPerturbL1, 194 | LpPerturbationModels.L2: RandomPerturbL2, 195 | LpPerturbationModels.LINF: RandomPerturbLinf, 196 | } 197 | selected = random_inits.get(p) 198 | if selected is not None: 199 | return selected(epsilon=epsilon) 200 | msg = "Perturbation model not available." 201 | raise ValueError(msg) 202 | -------------------------------------------------------------------------------- /src/secmlt/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pralab/secml-torch/44bbca14f74b508c3f5d27ec95ce2d5450306a4b/src/secmlt/tests/__init__.py -------------------------------------------------------------------------------- /src/secmlt/tests/fixtures.py: -------------------------------------------------------------------------------- 1 | """Fixtures used for testing.""" 2 | 3 | import pytest 4 | import torch 5 | from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier 6 | from secmlt.tests.mocks import MockModel 7 | from torch.utils.data import DataLoader, TensorDataset 8 | 9 | 10 | @pytest.fixture 11 | def dataset() -> TensorDataset: 12 | """Create fake dataset.""" 13 | data = torch.randn(100, 3, 32, 32).clamp(0, 1) 14 | labels = torch.randint(0, 10, (100,)) 15 | return TensorDataset(data, labels) 16 | 17 | 18 | @pytest.fixture 19 | def data_loader(dataset: TensorDataset) -> DataLoader[tuple[torch.Tensor]]: 20 | """ 21 | Create fake data loader. 22 | 23 | Parameters 24 | ---------- 25 | dataset : TensorDataset 26 | Dataset to wrap in the loader 27 | 28 | Returns 29 | ------- 30 | DataLoader[tuple[torch.Tensor]] 31 | A loader with random samples and labels. 32 | 33 | """ 34 | # Create a dummy dataset loader for testing 35 | return DataLoader(dataset, batch_size=10) 36 | 37 | 38 | @pytest.fixture 39 | def adv_loaders() -> list[DataLoader[tuple[torch.Tensor, ...]]]: 40 | """ 41 | Create fake adversarial loaders. 42 | 43 | Returns 44 | ------- 45 | list[DataLoader[Tuple[torch.Tensor, ...]]] 46 | A list of multiple loaders (with same ordered labels). 47 | """ 48 | # Create a list of dummy adversarial example loaders for testing 49 | loaders = [] 50 | adv_labels = torch.randint(0, 10, (100,)) 51 | for _ in range(3): 52 | adv_data = torch.randn(100, 3, 32, 32) 53 | adv_dataset = TensorDataset(adv_data, adv_labels) 54 | loaders.append(DataLoader(adv_dataset, batch_size=10)) 55 | return loaders 56 | 57 | 58 | @pytest.fixture 59 | def model() -> torch.nn.Module: 60 | """ 61 | Create fake model. 62 | 63 | Returns 64 | ------- 65 | torch.nn.Module 66 | Fake model. 67 | """ 68 | return BasePytorchClassifier(model=MockModel()) 69 | 70 | 71 | @pytest.fixture 72 | def data() -> torch.Tensor: 73 | """ 74 | Get random samples. 75 | 76 | Returns 77 | ------- 78 | torch.Tensor 79 | A fake tensor with samples. 80 | """ 81 | return torch.randn(10, 3, 32, 32).clamp(0.0, 1.0) 82 | 83 | 84 | @pytest.fixture 85 | def labels() -> torch.Tensor: 86 | """ 87 | Get random labels. 88 | 89 | Returns 90 | ------- 91 | torch.Tensor 92 | A fake tensor with labels. 93 | """ 94 | return torch.randint(0, 9, 10) 95 | 96 | 97 | @pytest.fixture 98 | def loss_values() -> torch.Tensor: 99 | """ 100 | Get random model outputs. 101 | 102 | Returns 103 | ------- 104 | torch.Tensor 105 | A fake tensor with model outputs. 106 | """ 107 | return torch.randn(10) 108 | 109 | 110 | @pytest.fixture 111 | def output_values() -> torch.Tensor: 112 | """ 113 | Get random model outputs. 114 | 115 | Returns 116 | ------- 117 | torch.Tensor 118 | A fake tensor with model outputs. 119 | """ 120 | return torch.randn(10, 10) 121 | -------------------------------------------------------------------------------- /src/secmlt/tests/mocks.py: -------------------------------------------------------------------------------- 1 | """Mock classes for testing.""" 2 | 3 | from collections.abc import Iterator 4 | 5 | import torch 6 | 7 | 8 | class MockLayer(torch.autograd.Function): 9 | """Fake layer that returns the input.""" 10 | 11 | @staticmethod 12 | def forward(ctx, inputs: torch.Tensor) -> torch.Tensor: # noqa: ANN001 13 | """Fake forward, returns 10 scores.""" 14 | ctx.save_for_backward(inputs) 15 | return torch.randn(inputs.size(0), 10) 16 | 17 | @staticmethod 18 | def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: # noqa: ANN001 19 | """Fake backward, returns inputs.""" 20 | (inputs,) = ctx.saved_tensors 21 | return inputs 22 | 23 | 24 | class MockModel(torch.nn.Module): 25 | """Mock class for torch model.""" 26 | 27 | @staticmethod 28 | def parameters() -> Iterator[torch.Tensor]: 29 | """Return fake parameters.""" 30 | params = torch.rand(10, 10) 31 | return iter( 32 | [ 33 | params, 34 | ], 35 | ) 36 | 37 | def forward(self, x: torch.Tensor) -> torch.Tensor: 38 | """Return random outputs for classification and add fake gradients to x.""" 39 | # Mock output shape (batch_size, 10) 40 | fake_layer = MockLayer.apply 41 | return fake_layer(x) 42 | 43 | def decision_function(self, *args, **kwargs) -> torch.Tensor: 44 | """Return random outputs for classification and add fake gradients to x.""" 45 | return self.forward(*args, **kwargs) 46 | 47 | 48 | class MockLoss(torch.nn.Module): 49 | """Fake loss function.""" 50 | 51 | def forward(*args) -> torch.Tensor: 52 | """Override forward.""" 53 | x = torch.rand(10) 54 | x.backward = lambda: x 55 | return x 56 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_aggregators.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from secmlt.adv.evasion.aggregators.ensemble import ( 3 | FixedEpsilonEnsemble, 4 | MinDistanceEnsemble, 5 | ) 6 | 7 | 8 | def test_min_distance_ensemble(model, data_loader, adv_loaders) -> None: 9 | ensemble = MinDistanceEnsemble("l2") 10 | result_loader = ensemble(model, data_loader, adv_loaders) 11 | for batch in result_loader: 12 | assert batch[0].shape == ( 13 | 10, 14 | 3, 15 | 32, 16 | 32, 17 | ) # Expected shape of adversarial examples 18 | assert batch[1].shape == (10,) # Expected shape of original labels 19 | 20 | 21 | def test_fixed_epsilon_ensemble(model, data_loader, adv_loaders) -> None: 22 | loss_fn = torch.nn.CrossEntropyLoss() 23 | ensemble = FixedEpsilonEnsemble(loss_fn) 24 | result_loader = ensemble(model, data_loader, adv_loaders) 25 | for batch in result_loader: 26 | assert batch[0].shape == ( 27 | 10, 28 | 3, 29 | 32, 30 | 32, 31 | ) # Expected shape of adversarial examples 32 | assert batch[1].shape == (10,) # Expected shape of original labels 33 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_attacks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from secmlt.adv.evasion.advlib_attacks.advlib_pgd import PGDAdvLib 3 | from secmlt.adv.evasion.base_evasion_attack import BaseEvasionAttack 4 | from secmlt.adv.evasion.foolbox_attacks.foolbox_pgd import PGDFoolbox 5 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 6 | from secmlt.adv.evasion.pgd import PGD, PGDNative 7 | from torch.utils.data import DataLoader 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "random_start", 12 | [True, False], 13 | ) 14 | @pytest.mark.parametrize( 15 | "y_target", 16 | [None, 1], 17 | ) 18 | @pytest.mark.parametrize( 19 | ( 20 | "backend", 21 | "perturbation_models", 22 | ), 23 | [ 24 | ( 25 | "foolbox", 26 | PGDFoolbox.get_perturbation_models(), 27 | ), 28 | ( 29 | "advlib", 30 | PGDAdvLib.get_perturbation_models(), 31 | ), 32 | ( 33 | "native", 34 | PGDNative.get_perturbation_models(), 35 | ), 36 | ], 37 | ) 38 | def test_pgd_attack( 39 | backend, 40 | perturbation_models, 41 | random_start, 42 | y_target, 43 | model, 44 | data_loader, 45 | ) -> BaseEvasionAttack: 46 | for perturbation_model in LpPerturbationModels.pert_models: 47 | if perturbation_model in perturbation_models: 48 | attack = PGD( 49 | perturbation_model=perturbation_model, 50 | epsilon=0.5, 51 | num_steps=10, 52 | step_size=0.1, 53 | random_start=random_start, 54 | y_target=y_target, 55 | backend=backend, 56 | ) 57 | assert isinstance(attack(model, data_loader), DataLoader) 58 | else: 59 | with pytest.raises(NotImplementedError): 60 | attack = PGD( 61 | perturbation_model=perturbation_model, 62 | epsilon=0.5, 63 | num_steps=10, 64 | step_size=0.1, 65 | random_start=random_start, 66 | y_target=y_target, 67 | backend=backend, 68 | ) 69 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_backdoors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from secmlt.adv.poisoning.backdoor import BackdoorDatasetPyTorch 4 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 5 | from secmlt.tests.mocks import MockLoss 6 | from torch.optim import SGD 7 | 8 | 9 | def add_trigger(x: torch.Tensor) -> torch.Tensor: 10 | return x 11 | 12 | 13 | @pytest.mark.parametrize( 14 | ("portion", "poison_indexes"), [(0.1, None), (1.0, None), (None, None), (None, [1])] 15 | ) 16 | def test_backdoors(model, dataset, portion, poison_indexes) -> None: 17 | pytorch_model = model._model 18 | optimizer = SGD(pytorch_model.parameters(), lr=0.01) 19 | criterion = MockLoss() 20 | 21 | # create the trainer instance 22 | trainer = BasePyTorchTrainer(optimizer=optimizer, loss=criterion) 23 | 24 | backdoored_loader = BackdoorDatasetPyTorch( 25 | dataset, 26 | trigger_label=0, 27 | data_manipulation_func=add_trigger, 28 | portion=portion, 29 | poisoned_indexes=poison_indexes, 30 | ) 31 | assert len(backdoored_loader) 32 | trained_model = trainer.train(pytorch_model, backdoored_loader) 33 | assert isinstance(trained_model, torch.nn.Module) 34 | 35 | 36 | @pytest.mark.parametrize( 37 | ("portion", "poison_indexes"), [(-1.0, None), (2.0, None), (0.5, [1])] 38 | ) 39 | def test_backdoors_errors(dataset, portion, poison_indexes) -> None: 40 | with pytest.raises(ValueError): # noqa: PT011 41 | BackdoorDatasetPyTorch( 42 | dataset, 43 | trigger_label=0, 44 | data_manipulation_func=add_trigger, 45 | portion=portion, 46 | poisoned_indexes=poison_indexes, 47 | ) 48 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_constants.py: -------------------------------------------------------------------------------- 1 | from secmlt.adv.backends import Backends 2 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 3 | 4 | 5 | def test_backends() -> None: 6 | assert hasattr(Backends, "FOOLBOX") 7 | assert hasattr(Backends, "NATIVE") 8 | assert Backends.FOOLBOX == "foolbox" 9 | assert Backends.NATIVE == "native" 10 | 11 | 12 | def test_perturbation_models() -> None: 13 | assert hasattr(LpPerturbationModels, "L0") 14 | assert hasattr(LpPerturbationModels, "L1") 15 | assert hasattr(LpPerturbationModels, "L2") 16 | assert hasattr(LpPerturbationModels, "LINF") 17 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_constraints.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from secmlt.optimization.constraints import ( 4 | ClipConstraint, 5 | L0Constraint, 6 | L1Constraint, 7 | L2Constraint, 8 | LInfConstraint, 9 | MaskConstraint, 10 | QuantizationConstraint, 11 | ) 12 | 13 | 14 | def assert_tensor_equal(actual, expected, msg="") -> None: 15 | assert torch.allclose( 16 | actual, expected 17 | ), f"Expected {expected}, but got {actual}. {msg}" 18 | 19 | 20 | @pytest.mark.parametrize( 21 | "lb, ub, x, expected", 22 | [ 23 | ( 24 | 0.0, 25 | 1.0, 26 | torch.tensor([[1.5, -0.5], [0.3, 0.7]]), 27 | torch.tensor([[1.0, 0.0], [0.3, 0.7]]), 28 | ), 29 | ( 30 | -1.0, 31 | 1.0, 32 | torch.tensor([[1.5, -1.5], [0.3, 0.7]]), 33 | torch.tensor([[1.0, -1.0], [0.3, 0.7]]), 34 | ), 35 | ], 36 | ) 37 | def test_clip_constraint(lb, ub, x, expected): 38 | constraint = ClipConstraint(lb=lb, ub=ub) 39 | projected = constraint(x) 40 | assert_tensor_equal(projected, expected) 41 | 42 | 43 | @pytest.mark.parametrize( 44 | "constraint_class, radius, x, expected_norm, norm_type", 45 | [ 46 | (L2Constraint, 5.0, torch.tensor([[3.0, 4.0], [0.5, 0.5]]), 5.0, 2), 47 | ( 48 | LInfConstraint, 49 | 1.0, 50 | torch.tensor([[3.0, -4.0], [0.5, 0.5]]), 51 | 1.0, 52 | float("inf"), 53 | ), 54 | (L1Constraint, 2.0, torch.tensor([[2.0, 2.0], [1.0, -1.0]]), 2.0, 1), 55 | ], 56 | ) 57 | def test_lp_constraints(constraint_class, radius, x, expected_norm, norm_type): 58 | constraint = constraint_class(radius=radius) 59 | projected = constraint(x) 60 | 61 | # calculate the norm for the projection and ensure it does not exceed the radius 62 | norms = torch.norm(projected.flatten(start_dim=1), p=norm_type, dim=1) 63 | assert torch.all( 64 | norms <= expected_norm 65 | ), f"{constraint_class.__name__} failed with norms {norms}" 66 | 67 | 68 | @pytest.mark.parametrize( 69 | "x, k, expected", 70 | [ 71 | ( 72 | torch.tensor([[-1.5, -0.5, 0.1, 2.0], [0.3, 0.7, 0.1, 1.0]]), 73 | 2, 74 | torch.tensor([[-1.5, 0.0, 0.0, 2.0], [0.0, 0.7, 0.0, 1.0]]), 75 | ) 76 | ], 77 | ) 78 | def test_l0_constraint(x, k, expected): 79 | constraint = L0Constraint(radius=k) 80 | projected = constraint(x) 81 | 82 | # top 2 values should remain, others should be zero 83 | assert_tensor_equal(projected, expected) 84 | 85 | 86 | def test_l0_constraint_invalid_radius(): 87 | # Test that passing a non-integer radius raises an error 88 | with pytest.raises(ValueError): # noqa: PT011 89 | L0Constraint(radius=2.5) 90 | 91 | 92 | @pytest.mark.parametrize( 93 | "x, levels, expected", 94 | [ 95 | ( 96 | torch.tensor([[0.2, 0.7], [0.4, 0.8]]), 97 | 5, 98 | torch.tensor([[0.25, 0.75], [0.5, 0.75]]), 99 | ), 100 | ( 101 | torch.tensor([[0.1, 0.9], [0.3, 0.6]]), 102 | torch.Tensor([0.1, 0.2, 0.5]), 103 | torch.tensor([[0.1, 0.5], [0.2, 0.5]]), 104 | ), 105 | ( 106 | torch.tensor([[0.1, 0.9], [0.3, 0.6]]), 107 | [0.1, 0.2, 0.5], 108 | torch.tensor([[0.1, 0.5], [0.2, 0.5]]), 109 | ), 110 | ], 111 | ) 112 | def test_quantization_constraint(x, levels, expected): 113 | constraint = QuantizationConstraint(levels=levels) 114 | projected = constraint(x) 115 | assert_tensor_equal(projected, expected) 116 | 117 | 118 | def test_quantization_constraint_invalid_levels(): 119 | # test that passing a non-integer levels value raises an error 120 | with pytest.raises(ValueError): # noqa: PT011 121 | QuantizationConstraint(levels=2.5) 122 | 123 | 124 | def test_quantization_constraint_not_enough_levels(): 125 | # test that passing a number of levels < 2 values raises an error 126 | with pytest.raises(ValueError): # noqa: PT011 127 | QuantizationConstraint(levels=1) 128 | 129 | 130 | @pytest.mark.parametrize( 131 | "x, mask, expected", 132 | [ 133 | ( 134 | torch.tensor([[0.2, 0.7], [0.4, 0.8]]), 135 | torch.tensor([[True, True], [False, False]]), 136 | torch.tensor([[0.2, 0.7], [0.0, 0.0]]), 137 | ), 138 | ( 139 | torch.tensor([0.1, -0.9, 0.1, -0.2]), 140 | torch.tensor(data=[False, True, False, False]), 141 | torch.tensor([0.0, -0.9, 0.0, 0.0]), 142 | ), 143 | ( 144 | torch.tensor([[0.1, -0.9, 0.1, -0.2]]), 145 | torch.tensor(data=[False, True, False, False]), 146 | torch.tensor([0.0, -0.9, 0.0, 0.0]), 147 | ), 148 | ], 149 | ) 150 | def test_mask_constraint(x, mask, expected): 151 | constraint = MaskConstraint(mask=mask) 152 | projected = constraint(x) 153 | assert_tensor_equal(projected, expected) 154 | 155 | 156 | def test_quantization_constraint_invalid_mask(): 157 | # test that passing a mask with shape different than x raises an error 158 | constraint = MaskConstraint(mask=torch.Tensor([1, 0])) 159 | with pytest.raises(ValueError): # noqa: PT011 160 | constraint(torch.Tensor([[0]])) 161 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_data.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from secmlt.adv.evasion.perturbation_models import LpPerturbationModels 3 | from secmlt.data.distributions import GeneralizedNormal, Rademacher 4 | from secmlt.data.lp_uniform_sampling import LpUniformSampling 5 | 6 | 7 | def test_rademacher_dist_shape(): 8 | dist = Rademacher() 9 | shape = torch.Size([3, 4]) 10 | sample = dist.sample(shape) 11 | assert sample.shape == shape 12 | 13 | 14 | def test_rademacher_dist_values(): 15 | dist = Rademacher() 16 | shape = torch.Size([3, 4]) 17 | sample = dist.sample(shape) 18 | assert (sample == -1).any() 19 | assert (sample == 1).any() 20 | assert not (((sample != -1) & (sample != 1)).all()) 21 | 22 | 23 | def test_gnormal_dist_shape(): 24 | dist = GeneralizedNormal() 25 | shape = torch.Size([3, 4]) 26 | sample = dist.sample(shape) 27 | assert sample.shape == shape 28 | 29 | 30 | def test_gnormal_dist_dtype(): 31 | dist = GeneralizedNormal() 32 | shape = torch.Size([3, 4]) 33 | sample = dist.sample(shape) 34 | assert sample.dtype == torch.float32 35 | 36 | 37 | def test_gnormal_dist_p(): 38 | dist = GeneralizedNormal() 39 | shape = torch.Size([3, 4]) 40 | sample1 = dist.sample(shape, p=1) 41 | sample2 = dist.sample(shape, p=2) 42 | assert not torch.equal(sample1, sample2) 43 | 44 | 45 | def test_lp_uniform_sampling(): 46 | shape = (1, 32) 47 | for _ in range(1, 4): 48 | for perturbation_model in LpPerturbationModels.pert_models: 49 | _p = LpPerturbationModels.get_p(perturbation_model) 50 | sampler = LpUniformSampling(p=perturbation_model) 51 | rvs = sampler.sample(*shape) 52 | 53 | assert rvs.shape == shape 54 | if perturbation_model not in [ 55 | LpPerturbationModels.L0, 56 | LpPerturbationModels.LINF, 57 | ]: 58 | assert ((torch.abs(rvs) ** _p).sum(-1) ** (1 / _p) <= 1).all() 59 | elif perturbation_model == LpPerturbationModels.L0: 60 | pass 61 | elif perturbation_model == LpPerturbationModels.LINF: 62 | assert (torch.abs(rvs) <= 1).all() 63 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_manipulations.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from secmlt.manipulations.manipulation import AdditiveManipulation 4 | from secmlt.optimization.constraints import Constraint 5 | 6 | 7 | class MockConstraint(Constraint): 8 | def __init__(self, mock_return): 9 | self.mock_return = mock_return 10 | 11 | def _apply_constraint(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: 12 | return self.mock_return 13 | 14 | 15 | @pytest.fixture 16 | def input_tensor(): 17 | return torch.tensor([[1.0, 2.0], [3.0, 4.0]]) 18 | 19 | 20 | @pytest.fixture 21 | def delta_tensor(): 22 | return torch.tensor([[0.1, 0.2], [0.3, 0.4]]) 23 | 24 | 25 | @pytest.fixture 26 | def domain_constraint(): 27 | return MockConstraint(torch.tensor([[0.5, 0.6], [0.7, 0.8]])) 28 | 29 | 30 | @pytest.fixture 31 | def perturbation_constraint(): 32 | return MockConstraint(torch.tensor([[0.1, 0.2], [0.3, 0.4]])) 33 | 34 | 35 | @pytest.fixture 36 | def additive_manipulation(domain_constraint, perturbation_constraint): 37 | return AdditiveManipulation( 38 | domain_constraints=[domain_constraint], 39 | perturbation_constraints=[perturbation_constraint], 40 | ) 41 | 42 | 43 | def test_apply_domain_constraints(additive_manipulation, input_tensor): 44 | # apply the domain constraint and check if it modifies the input correctly 45 | result = additive_manipulation._apply_domain_constraints(input_tensor) 46 | expected = torch.tensor([[0.5, 0.6], [0.7, 0.8]]) 47 | assert torch.equal(result, expected), f"Expected {expected}, got {result}" 48 | 49 | 50 | def test_apply_perturbation_constraints(additive_manipulation, delta_tensor): 51 | # apply the perturbation constraint and check if it modifies the delta correctly 52 | result = additive_manipulation._apply_perturbation_constraints(delta_tensor) 53 | expected = torch.tensor([[0.1, 0.2], [0.3, 0.4]]) 54 | assert torch.equal(result, expected), f"Expected {expected}, got {result}" 55 | 56 | 57 | def test_additive_manipulation(additive_manipulation, input_tensor, delta_tensor): 58 | # test the additive manipulation 59 | x_adv, delta = additive_manipulation(input_tensor, delta_tensor) 60 | expected_x_adv = torch.tensor([[0.5, 0.6], [0.7, 0.8]]) 61 | expected_delta = torch.tensor([[0.1, 0.2], [0.3, 0.4]]) 62 | 63 | assert torch.equal(x_adv, expected_x_adv), f"Expected {expected_x_adv}, got {x_adv}" 64 | assert torch.equal(delta, expected_delta), f"Expected {expected_delta}, got {delta}" 65 | 66 | 67 | def test_getters_and_setters( 68 | additive_manipulation, domain_constraint, perturbation_constraint 69 | ): 70 | # test getter for domain_constraints 71 | domain_constraints = additive_manipulation.domain_constraints 72 | assert domain_constraints == [ 73 | domain_constraint 74 | ], f"Expected domain constraints {domain_constraint}, got {domain_constraints}" 75 | 76 | # test setter for domain_constraints 77 | new_domain_constraint = MockConstraint(torch.tensor([[0.0, 0.1], [0.2, 0.3]])) 78 | additive_manipulation.domain_constraints = [new_domain_constraint] 79 | assert additive_manipulation.domain_constraints == [ 80 | new_domain_constraint 81 | ], "Domain constraints setter did not update correctly" 82 | 83 | # test getter for perturbation_constraints 84 | perturbation_constraints = additive_manipulation.perturbation_constraints 85 | assert perturbation_constraints == [perturbation_constraint], ( 86 | f"Expected perturbation constraints {perturbation_constraint}, " 87 | f"got {perturbation_constraints}" 88 | ) 89 | 90 | # test setter for perturbation_constraints 91 | new_perturbation_constraint = MockConstraint( 92 | torch.tensor([[0.05, 0.15], [0.25, 0.35]]) 93 | ) 94 | additive_manipulation.perturbation_constraints = [new_perturbation_constraint] 95 | assert additive_manipulation.perturbation_constraints == [ 96 | new_perturbation_constraint 97 | ], "Perturbation constraints setter did not update correctly" 98 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from secmlt.metrics.classification import ( 3 | Accuracy, 4 | AccuracyEnsemble, 5 | AttackSuccessRate, 6 | EnsembleSuccessRate, 7 | ) 8 | 9 | 10 | def test_accuracy(model, data_loader) -> None: 11 | acc_metric = Accuracy() 12 | acc = acc_metric(model, data_loader) 13 | assert torch.is_tensor(acc) 14 | 15 | 16 | def test_attack_success_rate(model, adv_loaders): 17 | attack_acc = AttackSuccessRate() 18 | acc = attack_acc(model, adv_loaders[0]) 19 | assert torch.is_tensor(acc) 20 | 21 | 22 | def test_accuracy_ensemble(model, adv_loaders): 23 | acc_ensemble = AccuracyEnsemble() 24 | acc = acc_ensemble(model, adv_loaders) 25 | assert torch.is_tensor(acc) 26 | 27 | 28 | def test_ensemble_success_rate(model, adv_loaders): 29 | ensemble_acc = EnsembleSuccessRate() 30 | acc = ensemble_acc(model, adv_loaders) 31 | assert torch.is_tensor(acc) 32 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_trackers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from secmlt.trackers.image_trackers import ( 4 | GradientsTracker, 5 | SampleTracker, 6 | ) 7 | from secmlt.trackers.trackers import ( 8 | GradientNormTracker, 9 | LossTracker, 10 | PerturbationNormTracker, 11 | PredictionTracker, 12 | ScoresTracker, 13 | ) 14 | 15 | NUM_STEPS = 5 16 | 17 | 18 | @pytest.mark.parametrize( 19 | "tracker", 20 | [ 21 | GradientsTracker(), 22 | SampleTracker(), 23 | GradientNormTracker(), 24 | LossTracker(), 25 | PerturbationNormTracker(), 26 | PredictionTracker(), 27 | ScoresTracker(y=0), 28 | ScoresTracker(y=None), 29 | ], 30 | ) 31 | def test_tracker(data, loss_values, output_values, tracker) -> None: 32 | for i in range(NUM_STEPS): 33 | tracker.track(i, loss_values, output_values, data, data, data) 34 | assert len(tracker.tracked) == NUM_STEPS 35 | assert all(torch.is_tensor(x) for x in tracker.tracked) 36 | assert torch.is_tensor(tracker.get_last_tracked()) 37 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_trainer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer 3 | from secmlt.tests.mocks import MockLoss 4 | from torch.optim import SGD 5 | 6 | 7 | def test_pytorch_trainer(model, data_loader) -> None: 8 | pytorch_model = model._model 9 | optimizer = SGD(pytorch_model.parameters(), lr=0.01) 10 | criterion = MockLoss() 11 | 12 | # Create the trainer instance 13 | trainer = BasePyTorchTrainer(optimizer=optimizer, loss=criterion) 14 | 15 | # Train the model 16 | trained_model = trainer.train(pytorch_model, data_loader) 17 | assert isinstance(trained_model, torch.nn.Module) 18 | -------------------------------------------------------------------------------- /src/secmlt/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from secmlt.utils.tensor_utils import atleast_kd 4 | 5 | 6 | @pytest.mark.parametrize( 7 | "input_tensor, desired_dims, expected_shape", 8 | [ 9 | (torch.tensor([1, 2, 3]), 2, (3, 1)), 10 | (torch.tensor([[1, 2], [3, 4]]), 3, (2, 2, 1)), 11 | (torch.tensor([[[1], [2]], [[3], [4]]]), 4, (2, 2, 1, 1)), 12 | ], 13 | ) 14 | def test_atleast_kd(input_tensor, desired_dims, expected_shape): 15 | output_tensor = atleast_kd(input_tensor, desired_dims) 16 | assert output_tensor.shape == expected_shape 17 | 18 | 19 | def test_atleast_kd_raises_error(): 20 | x = torch.tensor([[1, 2], [3, 4]]) 21 | msg = "The number of desired dimensions should be > x.dim()" 22 | with pytest.raises(ValueError, match=msg): 23 | atleast_kd(x, 1) 24 | -------------------------------------------------------------------------------- /src/secmlt/trackers/__init__.py: -------------------------------------------------------------------------------- 1 | """Module implementing trackers for adversarial attacks.""" 2 | 3 | import importlib.util 4 | 5 | if importlib.util.find_spec("tensorboard", None) is not None: 6 | from .tensorboard_tracker import TensorboardTracker # noqa: F401 7 | 8 | from .image_trackers import * # noqa: F403 9 | from .trackers import * # noqa: F403 10 | -------------------------------------------------------------------------------- /src/secmlt/trackers/image_trackers.py: -------------------------------------------------------------------------------- 1 | """Image-specific trackers.""" 2 | 3 | import torch 4 | from secmlt.trackers.trackers import IMAGE, Tracker 5 | 6 | 7 | class SampleTracker(Tracker): 8 | """Tracker for adversarial images.""" 9 | 10 | def __init__(self) -> None: 11 | """Create adversarial image tracker.""" 12 | super().__init__("Sample", IMAGE) 13 | 14 | self.tracked = [] 15 | 16 | def track( 17 | self, 18 | iteration: int, 19 | loss: torch.Tensor, 20 | scores: torch.Tensor, 21 | x_adv: torch.Tensor, 22 | delta: torch.Tensor, 23 | grad: torch.Tensor, 24 | ) -> None: 25 | """ 26 | Track the adversarial examples at the current iteration as images. 27 | 28 | Parameters 29 | ---------- 30 | iteration : int 31 | The attack iteration number. 32 | loss : torch.Tensor 33 | The value of the (per-sample) loss of the attack. 34 | scores : torch.Tensor 35 | The output scores from the model. 36 | x_adv : torch.tensor 37 | The adversarial examples at the current iteration. 38 | delta : torch.Tensor 39 | The adversarial perturbations at the current iteration. 40 | grad : torch.Tensor 41 | The gradient of delta at the given iteration. 42 | """ 43 | self.tracked.append(x_adv) 44 | 45 | 46 | class GradientsTracker(Tracker): 47 | """Tracker for gradient images.""" 48 | 49 | def __init__(self) -> None: 50 | """Create gradients tracker.""" 51 | super().__init__(name="Grad", tracker_type=IMAGE) 52 | 53 | self.tracked = [] 54 | 55 | def track( 56 | self, 57 | iteration: int, 58 | loss: torch.Tensor, 59 | scores: torch.Tensor, 60 | x_adv: torch.Tensor, 61 | delta: torch.Tensor, 62 | grad: torch.Tensor, 63 | ) -> None: 64 | """ 65 | Track the gradients at the current iteration as images. 66 | 67 | Parameters 68 | ---------- 69 | iteration : int 70 | The attack iteration number. 71 | loss : torch.Tensor 72 | The value of the (per-sample) loss of the attack. 73 | scores : torch.Tensor 74 | The output scores from the model. 75 | x_adv : torch.tensor 76 | The adversarial examples at the current iteration. 77 | delta : torch.Tensor 78 | The adversarial perturbations at the current iteration. 79 | grad : torch.Tensor 80 | The gradient of delta at the given iteration. 81 | """ 82 | self.tracked.append(grad) 83 | -------------------------------------------------------------------------------- /src/secmlt/trackers/tensorboard_tracker.py: -------------------------------------------------------------------------------- 1 | """Tensorboard tracking utilities.""" 2 | 3 | import torch 4 | from secmlt.trackers.trackers import ( 5 | IMAGE, 6 | MULTI_SCALAR, 7 | SCALAR, 8 | GradientNormTracker, 9 | LossTracker, 10 | Tracker, 11 | ) 12 | from torch.utils.tensorboard import SummaryWriter 13 | 14 | 15 | class TensorboardTracker(Tracker): 16 | """Tracker for Tensorboard. Uses other trackers as subscribers.""" 17 | 18 | def __init__(self, logdir: str, trackers: list[Tracker] | None = None) -> None: 19 | """ 20 | Create tensorboard tracker. 21 | 22 | Parameters 23 | ---------- 24 | logdir : str 25 | Folder to store tensorboard logs. 26 | trackers : list[Tracker] | None, optional 27 | List of trackers subsctibed to the updates, by default None. 28 | """ 29 | super().__init__(name="Tensorboard") 30 | if trackers is None: 31 | trackers = [ 32 | LossTracker(), 33 | GradientNormTracker(), 34 | ] 35 | self.writer = SummaryWriter(log_dir=logdir) 36 | self.trackers = trackers 37 | 38 | def track( 39 | self, 40 | iteration: int, 41 | loss: torch.Tensor, 42 | scores: torch.Tensor, 43 | x_adv: torch.tensor, 44 | delta: torch.Tensor, 45 | grad: torch.Tensor, 46 | ) -> None: 47 | """ 48 | Update all subscribed trackers. 49 | 50 | Parameters 51 | ---------- 52 | iteration : int 53 | The attack iteration number. 54 | loss : torch.Tensor 55 | The value of the (per-sample) loss of the attack. 56 | scores : torch.Tensor 57 | The output scores from the model. 58 | x_adv : torch.tensor 59 | The adversarial examples at the current iteration. 60 | delta : torch.Tensor 61 | The adversarial perturbations at the current iteration. 62 | grad : torch.Tensor 63 | The gradient of delta at the given iteration. 64 | """ 65 | for tracker in self.trackers: 66 | tracker.track(iteration, loss, scores, x_adv, delta, grad) 67 | tracked_value = tracker.get_last_tracked() 68 | for i, sample in enumerate(tracked_value): 69 | if tracker.tracked_type == SCALAR: 70 | self.writer.add_scalar( 71 | f"Sample #{i}/{tracker.name}", 72 | sample, 73 | global_step=iteration, 74 | ) 75 | elif tracker.tracked_type == MULTI_SCALAR: 76 | self.writer.add_scalars( 77 | main_tag=f"Sample #{i}/{tracker.name}", 78 | tag_scalar_dict={ 79 | f"Sample #{i}/{tracker.name}{j}": v 80 | for j, v in enumerate(sample) 81 | }, 82 | global_step=iteration, 83 | ) 84 | elif tracker.tracked_type == IMAGE: 85 | self.writer.add_image( 86 | f"Sample #{i}/{tracker.name}", 87 | sample, 88 | global_step=iteration, 89 | ) 90 | 91 | def get_last_tracked(self) -> NotImplementedError: 92 | """Not implemented for this tracker.""" 93 | return NotImplementedError( 94 | "Last tracked value is not available for this tracker.", 95 | ) 96 | -------------------------------------------------------------------------------- /src/secmlt/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utilities for the use of the library.""" 2 | -------------------------------------------------------------------------------- /src/secmlt/utils/tensor_utils.py: -------------------------------------------------------------------------------- 1 | """Basic utils for tensor handling.""" 2 | 3 | import torch 4 | 5 | 6 | def atleast_kd(x: torch.Tensor, k: int) -> torch.Tensor: 7 | """ 8 | Add dimensions to the tensor x until it reaches k dimensions. 9 | 10 | Parameters 11 | ---------- 12 | x : torch.Tensor 13 | Input tensor. 14 | k : int 15 | Number of desired dimensions. 16 | 17 | Returns 18 | ------- 19 | torch.Tensor 20 | The input tensor x but with k dimensions. 21 | """ 22 | if k <= x.dim(): 23 | msg = "The number of desired dimensions should be > x.dim()" 24 | raise ValueError(msg) 25 | shape = x.shape + (1,) * (k - x.ndim) 26 | return x.reshape(shape) 27 | --------------------------------------------------------------------------------