├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── tests.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── build.sh
├── examples
├── README.md
├── create_synthesizer.py
├── example_pytorch_callback.py
├── extract_mnist_cpu.py
└── extract_mnist_gpu.py
├── images
├── cropped.png
└── privacyraven_logo.jpeg
├── noxfile.py
├── poetry.lock
├── pyproject.toml
├── src
└── privacyraven
│ ├── __init__.py
│ ├── extraction
│ ├── attacks.py
│ ├── core.py
│ ├── metrics.py
│ └── synthesis.py
│ ├── inversion
│ ├── attacks.py
│ └── core.py
│ ├── membership_inf
│ ├── core.py
│ ├── metric.py
│ └── network.py
│ ├── models
│ ├── __init__.py
│ ├── four_layer.py
│ ├── inversion_model.py
│ ├── pytorch.py
│ └── victim.py
│ ├── run.py
│ ├── utils
│ ├── data.py
│ ├── model_creation.py
│ └── query.py
│ └── version.py
└── tests
├── __init__.py
├── generate.py
├── test_extraction_core.py
├── test_extraction_metrics.py
├── test_extraction_synthesis.py
├── test_utils_data.py
└── test_utils_query.py
/.flake8:
--------------------------------------------------------------------------------
1 | # .flake8
2 | [flake8]
3 | select = B,B9,BLK,C,E,F,I,S,W #ANN, D
4 | #docstring-convention = google
5 | ignore = E203,E501,W503,F401,D,F841,I100,S101,F811
6 | max-line-length = 100
7 | application-import-names = privacyraven,tests
8 | import-order-style = google
9 | #per-file-ignores = tests/*:S101
10 | #exclude = tests/*.py, examples/*.py, noxfile.py
11 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **System Information:**
27 | Describe as much of the setup details as possible.
28 |
29 | **Additional context**
30 | Add any other context about the problem here.
31 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels:
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | Provide a clear and concise description of what the problem is. If this is related to code quality, style, or efficiency improvements, please make that clear.
12 |
13 | **Describe the solution you'd like.**
14 | Provide a clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered.**
17 | Provide a clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Detail any additional context.**
20 | Add any other context or screenshots about the feature request here. Please link to a research paper if applicable.
21 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 | on: [push, pull_request]
3 | jobs:
4 | tests:
5 | runs-on: ubuntu-latest
6 | strategy:
7 | matrix:
8 | python-version: [3.8, '3.7']
9 | name: Python ${{ matrix.python-version }}
10 | steps:
11 | - uses: actions/checkout@v2
12 | - uses: actions/setup-python@v1
13 | with:
14 | python-version: ${{ matrix.python-version }}
15 | architecture: x64
16 | - run: pip install nox==2020.8.22
17 | - run: pip install poetry==1.0.10
18 | - run: nox
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # .pre-commit-config.yaml
2 | repos:
3 | - repo: https://github.com/pre-commit/pre-commit-hooks
4 | rev: v2.3.0
5 | hooks:
6 | - id: check-yaml
7 | - id: end-of-file-fixer
8 | - id: trailing-whitespace
9 | - repo: local
10 | hooks:
11 | - id: black
12 | name: black
13 | entry: poetry run black
14 | language: system
15 | types: [python]
16 | - id: flake8
17 | name: flake8
18 | entry: poetry run flake8
19 | language: system
20 | types: [python]
21 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Thank you for considering contributing to PrivacyRaven!
7 | Feel free to ask any questions if something is unclear.
8 | We’ve codified a set of guidelines and instructions to make contributing to PrivacyRaven as easy as possible.
9 | Please note that these guidelines are not rigid and can be broken if necessary.
10 |
11 | + Build instructions are contained in the README. Fork your own repository from GitHub and rebase your fork with new changes from master.
12 | + Raise or be assigned to an issue before submitting a pull request. After submitting a pull request, one of our core maintainers must approve it before it is pushed.
13 | + When reporting a bug, provide as much information as possible with regards to your setup.
14 | + When suggesting a feature, feel free to leave it open-ended. Provide links to similar implementations or reference papers as applicable.
15 | + Ensure that the code you’ve contributed passes all tests and formatting checks. PrivacyRaven uses Python black, isort, flake8, and nox.
16 | + Be generous with comments and explanations
17 | + Create tests and documentation for your contribution
18 | + For now, documentation merely entails incorporating docstrings like so:
19 | ```python
20 |
21 | def example(a):
22 | """Does something
23 |
24 | Parameters:
25 | a: data type that represents something
26 |
27 | Returns:
28 | a data type that represents something else"""
29 |
30 | class another(object):
31 | """Does another thing
32 |
33 | Attributes:
34 | a: data type that represents something else"""
35 | ```
36 | + Clearly disclose all known limitations
37 | + Center the user while developing. Simplify the API as much as possible
38 | + Make sure that a user can quickly understand what your code does even without understanding how it works. Reference the [Python API Checklist](https://github.com/vintasoftware/python-api-checklist/blob/master/checklist-en.md) to confirm usability.
39 | + Build from the fundamental building blocks.
40 | + To add a new attack in `attack.py`, make sure to build functions for any novel synthesizer, robustness metrics, or subset sampling strategies, and that the new attack is included in the run-all attacks feature.
41 | + To add a new synthesizer for `synthesis.py` or robustness metric for `robust.py`, maintain the same function signature.
42 | + When building classes, use [attrs](https://www.attrs.org/en/stable/).
43 | + We prefer data-specific code to be written in PyTorch and generally adhere to the [PyTorch Style Guide](https://github.com/IgorSusmelj/pytorch-styleguide).
44 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6
2 |
3 | RUN curl -sSL https://raw.githubusercontent.com/sdispater/poetry/master/get-poetry.py | python
4 |
5 | ADD . ~/PrivacyRaven
6 | WORKDIR ~/PrivacyRaven
7 |
8 | ENV PATH="${PATH}:/root/.poetry/bin"
9 |
10 | RUN poetry install
11 | #WORKDIR examples/
12 |
13 | #RUN poetry run pip install fsspec
14 | #RUN poetry run python create_synthesizer.py
15 | #RUN poetry run python example_pytorch_callback.py
16 |
17 | #WORKDIR ../
18 |
19 | RUN poetry run pip install nox
20 | RUN poetry update
21 | RUN pip install nox
22 | RUN nox
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 |
4 | Note: This project is on hiatus.
5 |
6 | **PrivacyRaven** is a privacy testing library for deep learning systems.
7 | You can use it to determine the susceptibility of a model to different privacy attacks; evaluate privacy preserving machine learning techniques; develop novel privacy metrics and attacks; and repurpose attacks for data provenance and other use cases.
8 |
9 | PrivacyRaven supports label-only black-box model extraction, membership inference, and (soon) model inversion attacks.
10 | We also plan to include differential privacy verification, automated hyperparameter optimization, more classes of attacks, and other features; see the [GitHub issues](https://github.com/trailofbits/PrivacyRaven/issues) for more information.
11 | PrivacyRaven has been featured at the [OpenMined Privacy Conference](https://www.youtube.com/watch?v=F46lX5VIoas&list=PLUNOsx6Az_ZGKQd_p4StdZRFQkCBwnaY6&t=2h21m50s), [Empire Hacking](https://www.empirehacking.nyc/), and [Trail of Bits blog](https://blog.trailofbits.com/2020/10/08/privacyraven-has-left-the-nest/).
12 |
13 | ## Why use PrivacyRaven?
14 |
15 | Deep learning systems, particularly neural networks, have proliferated in a wide range of applications, including privacy-sensitive use cases such as facial recognition and medical diagnoses.
16 | However, these models are vulnerable to privacy attacks that target both the intellectual property of the model and the confidentiality of the training data.
17 | Recent literature has seen an arms race between privacy attacks and defenses on various systems.
18 | And until now, engineers and researchers have not had the privacy analysis tools they need to rival this trend.
19 | Hence, we developed PrivacyRaven- a machine learning assurance tool that aims to be:
20 | + **Usable**: Multiple levels of abstraction allow users to either automate much of the internal mechanics or directly control them, depending on their use case and familiarity with the domain.
21 | + **Flexible**: A modular design makes the attack configurations customizable and interoperable. It also allows new privacy metrics and attacks to be incorporated straightforwardly.
22 | + **Efficient**: PrivacyRaven reduces the boilerplate, affording quick prototyping and fast experimentation. Each attack can be launched in fewer than 15 lines of code.
23 |
24 | ## How does it work?
25 |
26 | PrivacyRaven partitions each attack into multiple customizable and optimizable phases.
27 | Different interfaces are also provided for each attack.
28 | The interface shown below is known as the core interface.
29 | PrivacyRaven also provides wrappers around specific attack configurations found in the literature and a run-all-attacks feature.
30 |
31 | Here is how you would launch a model extraction attack in PrivacyRaven:
32 |
33 | ```python
34 | #examples/extract_mnist_gpu.py
35 | import privacyraven as pr
36 | from privacyraven.utils.data import get_emnist_data
37 | from privacyraven.extraction.core import ModelExtractionAttack
38 | from privacyraven.utils.query import get_target
39 | from privacyraven.models.victim import train_four_layer_mnist_victim
40 | from privacyraven.models.four_layer import FourLayerClassifier
41 |
42 | # Create a query function for a target PyTorch Lightning model
43 | model = train_four_layer_mnist_victim()
44 |
45 |
46 | def query_mnist(input_data):
47 | # PrivacyRaven provides built-in query functions
48 | return get_target(model, input_data, (1, 28, 28, 1))
49 |
50 |
51 | # Obtain seed (or public) data to be used in extraction
52 | emnist_train, emnist_test = get_emnist_data()
53 |
54 | # Run a model extraction attack
55 | attack = ModelExtractionAttack(
56 | query_mnist, # query function
57 | 200, # query limit
58 | (1, 28, 28, 1), # victim input shape
59 | 10, # number of targets
60 | (3, 1, 28, 28), # substitute input shape
61 | "copycat", # synthesizer name
62 | FourLayerClassifier, # substitute model architecture
63 | 784, # substitute input size
64 | emnist_train, # seed train data
65 | emnist_test, # seed test data
66 | )
67 | ```
68 | Since the only main requirement from the victim model is a query function, PrivacyRaven can be used to attack a wide range of models regardless of the framework and distribution method.
69 | The other classes of attacks can be launched in a similar fashion. See the `examples` folder for more information.
70 |
71 | ## Want to use PrivacyRaven?
72 |
73 | 1. Install [poetry](https://python-poetry.org/docs/).
74 | 2. Git clone this repository.
75 | 3. Run `poetry update`
76 | 4. Run `poetry install`.
77 |
78 | If you'd like to use a Jupyter Notebook environment, run `poetry shell` followed by `jupyter notebook`.
79 |
80 | Additionally, if you'd like to run PrivacyRaven in a Docker container, run `chmod +x build.sh` followed by `./build.sh`. Note that depending on the amount of resources you allocate to Docker, PrivacyRaven's performance may be drastically impacted.
81 |
82 | Feel free to join our #privacyraven channel in [Empire Hacking](https://slack.empirehacking.nyc/) if you need help using or extending PrivacyRaven.
83 | The official pip release will arrive soon.
84 |
85 | Please note that PrivacyRaven is still early in development and is undergoing rapid changes. Users are advised to update frequently and avoid applying PrivacyRaven to critical use cases.
86 |
87 | ## Want to contribute to PrivacyRaven?
88 |
89 | PrivacyRaven is still a work-in-progress.
90 | We invite you to contribute however you can whether you want to incorporate a new synthesis technique or make an attack function more readable.
91 | Please visit [CONTRIBUTING.md](https://github.com/trailofbits/PrivacyRaven/blob/master/CONTRIBUTING.md) to get started.
92 |
93 | ## Why is it called PrivacyRaven?
94 |
95 | The raven has been associated with a variety of concepts in different cultures through time.
96 | Among these, the raven is commonly associated with prophecy and insight.
97 | Naturally, we named the tool PrivacyRaven because it is designed to provide insights into the privacy of deep learning.
98 |
99 | ## Who maintains PrivacyRaven?
100 |
101 | The core maintainers are:
102 | + [Suha S. Hussain](https://github.com/suhacker1)
103 | + [Philip Wang](https://github.com/pwang00)
104 | + [Jim Miller](https://github.com/james-miller-93)
105 |
106 | ## License
107 |
108 | This library is available under the [Apache License 2.0](https://github.com/trailofbits/PrivacyRaven/blob/master/LICENSE).
109 | For an exception to the terms, please [contact us](mailto:opensource@trailofbits.com).
110 |
111 | ## References
112 |
113 | While PrivacyRaven was built upon a [plethora of research](https://github.com/stratosphereips/awesome-ml-privacy-attacks) on attacking machine learning privacy, the research most critical to the development of PrivacyRaven are:
114 |
115 | + [A Survey of Privacy Attacks in Machine Learning](https://arxiv.org/abs/2007.07646)
116 | + [Membership Inference Attacks on Machine Learning: A Survey](https://arxiv.org/abs/2103.07853)
117 | + [Neural Network Inversion in Adversarial Setting via Background Knowledge Alignment](https://dl.acm.org/doi/pdf/10.1145/3319535.3354261?casa_token=lDNQ40-4Wa4AAAAA%3Ap9olQ3qMdDZ0n2sl-nNIgk4sOuLRMBTGVTxycZ5wjGpnFPf5lTz-MYw0e8ISggSseHC9T46it5yX)
118 | + [Copycat CNN: Stealing Knowledge by Persuading Confession with Random Non-Labeled Data](https://ieeexplore.ieee.org/document/8489592)
119 | + [Knockoff Nets: Stealing Functionality of Black-Box Models](https://arxiv.org/abs/1812.02766)
120 |
121 | ## Appearances
122 |
123 | This is a list of publications, presentations, blog posts, and other public-facing media discussing PrivacyRaven.
124 |
125 | + [PrivacyRaven: Implementing a proof of concept for model inversion. Trail of Bits Blog](https://www.trailofbits.com/post/privacyraven-implementing-proof-of-concept-model-inversion)
126 | + [PrivacyRaven: Comprehensive Privacy Testing for Deep Learning. OpenMined Privacy Conference](https://www.youtube.com/watch?v=F46lX5VIoas&list=PLUNOsx6Az_ZGKQd_p4StdZRFQkCBwnaY6&t=8510s)
127 | + [PrivacyRaven: Comprehensive Privacy Testing for Deep Learning. Empire Hacking](https://www.youtube.com/watch?v=tKFc0ZsWNX4)
128 | + [PrivacyRaven: Comprehensive Privacy Testing for Deep Learning. Trail of Bits Blog](https://blog.trailofbits.com/2020/10/08/privacyraven-has-left-the-nest/)
129 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | sudo docker build --no-cache -t praven .
2 | sudo docker run -it praven -c 8
3 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | Overview:
2 |
3 | + `extract_mnist_cpu.py`: Applies a model extraction attack with the copycat synthesizer to an example MNIST model using a CPU
4 | + `extract_mnist_gpu.py`: Applies a model extraction attack with the copycat synthesizer to an example MNIST model using a GPU
5 | + `example_pytorch_callback.py`: Applies a model extraction attack with a callback from [Pytorch Lightning Bolts](https://pytorch-lightning-bolts.readthedocs.io/en/latest/index.html)
6 | + `create_synthesizer.py`: Applies a model extraction attack with a custom synthesizer
7 |
--------------------------------------------------------------------------------
/examples/create_synthesizer.py:
--------------------------------------------------------------------------------
1 | import privacyraven as pr
2 | import torch
3 | from privacyraven.extraction.synthesis import register_synth
4 | from privacyraven.utils.data import get_emnist_data
5 | from privacyraven.extraction.core import ModelExtractionAttack
6 | from privacyraven.utils.query import get_target
7 | from privacyraven.utils.query import reshape_input
8 | from privacyraven.models.victim import train_four_layer_mnist_victim
9 | from privacyraven.models.four_layer import FourLayerClassifier
10 |
11 |
12 | # Trains a 4-layer fully connected neural network on MNIST data using all of the GPUs
13 | # available to the user, or CPU if no GPUs are available (torch.cuda.device_count handles this).
14 |
15 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
16 |
17 |
18 | # Create a query function for a target PyTorch Lightning model
19 | def query_mnist(input_data):
20 | return get_target(model, input_data, (1, 28, 28, 1))
21 |
22 | # Obtain seed (or public) data to be used in extraction
23 | emnist_train, emnist_test = get_emnist_data()
24 |
25 |
26 | # Users may define a function to generate synthetic data via a function of the form
27 | #
28 | # func(seed_data_train, query, query_limit, *args, **kwargs)
29 | #
30 | # This function then can be registered via the @register_synth decorator
31 | # See the following example, which is an aliased version of the copycat synthesizer
32 | # that may be found in privacyraven.extraction.synthesis:
33 |
34 | @register_synth
35 | def custom_synthesizer(data, query, query_limit, victim_input_shape, substitute_input_shape, reshape=True):
36 | """Creates a synthetic dataset by labeling seed data"""
37 | (x_data, y_data) = data
38 | y_data = query(x_data)
39 | if reshape:
40 | x_data = reshape_input(x_data, substitute_input_shape)
41 | return x_data, y_data
42 |
43 | # Gets name of synthesizer function.
44 | attack = ModelExtractionAttack(
45 | query=query_mnist,
46 | query_limit=100,
47 | victim_input_shape=(1, 28, 28, 1),
48 | victim_output_targets=10,
49 | substitute_input_shape=(3, 1, 28, 28),
50 | synthesizer="custom_synthesizer",
51 | substitute_model_arch=FourLayerClassifier,
52 | substitute_input_size=784,
53 | seed_data_train=emnist_train,
54 | seed_data_test=emnist_test,
55 | gpus=1,
56 | )
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/examples/example_pytorch_callback.py:
--------------------------------------------------------------------------------
1 | """
2 | PrivacyRaven has support for user-defined Pytorch Lightning Bolt callbacks which may be passed as arguments
3 | into the currently available attack functions.
4 |
5 | A callback is just a function (or class in this case) that gets passed in as an argument to another
6 | function, which should execute the callback function or class at some point during its runtime.
7 |
8 | Users should refer to https://pytorch-lightning-bolts.readthedocs.io/en/latest/callbacks.html to construct
9 | Pytorch Lightning Bolt callbacks.
10 | """
11 | import privacyraven as pr
12 | import torch
13 | from privacyraven.utils.data import get_emnist_data
14 | from pl_bolts.callbacks import PrintTableMetricsCallback
15 | from privacyraven.extraction.core import ModelExtractionAttack
16 | from privacyraven.utils.query import get_target
17 | from privacyraven.models.victim import train_four_layer_mnist_victim
18 | from privacyraven.models.four_layer import FourLayerClassifier
19 | from pytorch_lightning.callbacks import Callback
20 |
21 |
22 | # Trains a 4-layer fully connected neural network on MNIST data using all of the GPUs
23 | # available to the user, or CPU if no GPUs are available (torch.cuda.device_count handles this).
24 |
25 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
26 |
27 | # Create a query function for a target PyTorch Lightning model
28 | def query_mnist(input_data):
29 | # PrivacyRaven provides built-in query functions
30 | return get_target(model, input_data, (1, 28, 28, 1))
31 |
32 | emnist_train, emnist_test = get_emnist_data()
33 |
34 | # Below is a user-defined callback that inherits from the Pytorch's Lightning Bolt Callback class.
35 | # All it does is print "End of epoch" at the end of a training epoch.
36 |
37 | class CustomCallback(Callback):
38 | def on_epoch_end(self, trainer, pl_module):
39 | print('End of epoch')
40 |
41 | # Runs a Model Extraction Attack with the user-defined CustomCallback specified as an argument.
42 | # Note that parentheses are needed while passing in the callback, since
43 | # Pytorch Lightning bolt callbacks are classes that need to be instantiated.
44 |
45 | attack = ModelExtractionAttack(
46 | query=query_mnist,
47 | query_limit=100,
48 | victim_input_shape=(1, 28, 28, 1),
49 | victim_output_targets=10,
50 | substitute_input_shape=(3, 1, 28, 28),
51 | synthesizer="copycat",
52 | substitute_model_arch=FourLayerClassifier,
53 | substitute_input_size=784,
54 | seed_data_train=emnist_train,
55 | seed_data_test=emnist_test,
56 | gpus=1,
57 | callback=CustomCallback()
58 | )
59 |
60 | # Many built-in Pytorch Lightning Bolt callbacks are already very useful. Consider the following example, which
61 | # runs the same Model Extraction Attack with the Pytorch built-in PrintTableMetricsCallback specified as an argument.
62 | # After every epoch, a table should be displayed with all of the training metrics (e.g. training loss)
63 | attack = ModelExtractionAttack(
64 | query=query_mnist,
65 | query_limit=100,
66 | victim_input_shape=(1, 28, 28, 1),
67 | victim_output_targets=10,
68 | substitute_input_shape=(3, 1, 28, 28),
69 | synthesizer="copycat",
70 | substitute_model_arch=FourLayerClassifier,
71 | substitute_input_size=784,
72 | seed_data_train=emnist_train,
73 | seed_data_test=emnist_test,
74 | gpus=1,
75 | callback=PrintTableMetricsCallback()
76 | )
77 |
78 |
--------------------------------------------------------------------------------
/examples/extract_mnist_cpu.py:
--------------------------------------------------------------------------------
1 | """
2 | This model extraction attack steals a model trained on MNIST by
3 | using the copycat synthesizer and the EMNIST dataset to train a
4 | FourLayerClassifier substitute. The number of GPUs is set to 0,
5 | and keyword attributes are used for model extraction.
6 | """
7 | import privacyraven as pr
8 |
9 | from privacyraven.utils.data import get_emnist_data
10 | from privacyraven.extraction.core import ModelExtractionAttack
11 | from privacyraven.utils.query import get_target
12 | from privacyraven.models.victim import train_four_layer_mnist_victim
13 | from privacyraven.models.four_layer import FourLayerClassifier
14 |
15 | # trains a 4-layer fully connected neural network on MNIST data with the user's CPU. See
16 | # src/privacyraven/models/victims.py for a full set of supported parameters.
17 |
18 | model = train_four_layer_mnist_victim(gpus=0)
19 |
20 | # Create a query function for a target PyTorch Lightning model
21 | def query_mnist(input_data):
22 | # PrivacyRaven provides built-in query functions
23 | return get_target(model, input_data, (1, 28, 28, 1))
24 |
25 |
26 | # Obtain seed (or public) data to be used in extraction
27 | emnist_train, emnist_test = get_emnist_data()
28 |
29 | # Run a model extraction attack
30 | attack = ModelExtractionAttack(
31 | query=query_mnist,
32 | query_limit=100,
33 | victim_input_shape=(1, 28, 28, 1), # EMNIST data point shape
34 | victim_output_targets=10,
35 | substitute_input_shape=(3, 1, 28, 28),
36 | synthesizer="copycat",
37 | substitute_model_arch=FourLayerClassifier, # 28*28: image size
38 | substitute_input_size=784,
39 | seed_data_train=emnist_train,
40 | seed_data_test=emnist_test,
41 | gpus=0,
42 | )
43 |
--------------------------------------------------------------------------------
/examples/extract_mnist_gpu.py:
--------------------------------------------------------------------------------
1 | """
2 | This model extraction attack steals a model trained on MNIST by
3 | using the copycat synthesizer and the EMNIST dataset to train a
4 | FourLayerClassifier substitute. A single GPU is assumed.
5 | """
6 | import privacyraven as pr
7 |
8 | from privacyraven.utils.data import get_emnist_data
9 | from privacyraven.extraction.core import ModelExtractionAttack
10 | from privacyraven.utils.query import get_target
11 | from privacyraven.models.victim import train_four_layer_mnist_victim
12 | from privacyraven.models.four_layer import FourLayerClassifier
13 |
14 | # Trains a 4-layer fully connected neural network on MNIST data using the user's GPUs. See
15 | # src/privacyraven/models/victims.py for a full set of supported parameters.
16 |
17 | model = train_four_layer_mnist_victim(gpus=1)
18 |
19 | # Create a query function for a target PyTorch Lightning model
20 | def query_mnist(input_data):
21 | # PrivacyRaven provides built-in query functions
22 | return get_target(model, input_data, (1, 28, 28, 1))
23 |
24 |
25 | # Obtain seed (or public) data to be used in extraction
26 | emnist_train, emnist_test = get_emnist_data()
27 |
28 | # Run a model extraction attack
29 | attack = ModelExtractionAttack(
30 | query_mnist,
31 | 200, # Less than the number of MNIST data points: 60000
32 | (1, 28, 28, 1),
33 | 10,
34 | (3, 1, 28, 28), # Shape of an EMNIST data point
35 | "copycat",
36 | FourLayerClassifier,
37 | 784, # 28 * 28 or the size of a single image
38 | emnist_train,
39 | emnist_test,
40 | )
41 |
42 | print(attack.__dict__)
43 | subs = attack.substitute_model.__dict__
44 | print(subs)
45 | tester = subs.get('test_dataloader')
46 | print(tester.__dict__)
47 |
--------------------------------------------------------------------------------
/images/cropped.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trailofbits/PrivacyRaven/8d5cee834be85ef29502028d3d8788780d393f17/images/cropped.png
--------------------------------------------------------------------------------
/images/privacyraven_logo.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trailofbits/PrivacyRaven/8d5cee834be85ef29502028d3d8788780d393f17/images/privacyraven_logo.jpeg
--------------------------------------------------------------------------------
/noxfile.py:
--------------------------------------------------------------------------------
1 | # noxfile.py
2 | import tempfile
3 |
4 | import nox
5 |
6 | package = "privacyraven"
7 | nox.options.sessions = "tests", "lint", "black", "isort"
8 | locations = "src", "tests", "noxfile.py"
9 |
10 |
11 | def install_with_constraints(session, *args, **kwargs):
12 | with tempfile.NamedTemporaryFile() as requirements:
13 | session.run(
14 | "poetry",
15 | "export",
16 | "--dev",
17 | "--format=requirements.txt",
18 | "--without-hashes",
19 | f"--output={requirements.name}",
20 | external=True,
21 | )
22 | session.install(f"--constraint={requirements.name}", *args, **kwargs)
23 |
24 |
25 | @nox.session(python=["3.7"])
26 | def tests(session):
27 | args = session.posargs # or ["--cov"]
28 | session.run("poetry", "update", external=True)
29 | session.run("poetry", "install", external=True)
30 | # session.run("poetry", "install", "--no-dev", external=True)
31 | install_with_constraints(
32 | session, "coverage[toml]", "pytest", "pytest-cov", "pytest-mock"
33 | )
34 | session.run("poetry", "run", "pytest", "-s", *args, external=True)
35 |
36 |
37 | @nox.session(python=["3.7"])
38 | def lint(session):
39 | args = session.posargs or locations
40 | # session.install("flake8", "flake8-bugbear", "flake8-bandit", "flake8-import-order")
41 | install_with_constraints(
42 | session,
43 | "flake8",
44 | "flake8-bandit",
45 | # "flake8-black",
46 | "flake8-bugbear",
47 | # "flake8-import-order",
48 | )
49 | session.run("flake8", *args)
50 |
51 |
52 | @nox.session(python="3.7")
53 | def black(session):
54 | args = session.posargs or locations
55 | # session.install("black")
56 | install_with_constraints(session, "black")
57 | session.run("black", *args)
58 |
59 |
60 | @nox.session(python="3.7")
61 | def isort(session):
62 | args = session.posargs or locations
63 | # session.install("black")
64 | install_with_constraints(session, "isort")
65 | session.run("isort", *args)
66 |
67 |
68 | """
69 | @nox.session(python="3.8")
70 | def safety(session):
71 | with tempfile.NamedTemporaryFile() as requirements:
72 | session.run(
73 | "poetry",
74 | "export",
75 | "--dev",
76 | "--format=requirements.txt",
77 | f"--output={requirements.name}",
78 | "--without-hashes",
79 | external=True,
80 | )
81 | session.install("safety")
82 | session.run("safety", "check", f"--file={requirements.name}", "--full-report")
83 | """
84 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "privacyraven"
3 | version = "0.1.0"
4 | description = "privacy testing for deep learning"
5 | readme = "README.md"
6 | homepage = "https://github.com/trailofbits/PrivacyRaven"
7 | repository = "https://github.com/trailofbits/PrivacyRaven"
8 | authors = ["suhacker1 "]
9 |
10 | [tool.poetry.dependencies]
11 | python = ">=3.6.2,<4.0"
12 | torch = "^1.8.0"
13 | torchvision = "^0.9.0"
14 | click = "^7.1.2"
15 | tqdm = "^4.46.1"
16 | matplotlib = "^3.2.1"
17 | pytorch_lightning = "^1.1.0"
18 | ipykernel = "^5.3.0"
19 | attrs = "^19.3.0"
20 | sklearn = "^0.0"
21 | pytest = "^6.0.2"
22 | flake8 = "^3.8.3"
23 | isort = "5.5.3"
24 | fsspec = "0.8.5"
25 | adversarial-robustness-toolbox = "1.4.1"
26 | pytorch-lightning-bolts = "0.2.5"
27 |
28 | hypothesis = "^5.41.1"
29 | jupyter = "^1.0.0"
30 | jupyterlab = "^1.2.21"
31 | wheel = "^0.36.2"
32 | scalene = "^1.1.12"
33 | memory_profiler = "^0.58.0"
34 | pyrsistent = "^0.17.3"
35 | torchmetrics = "^0.3.1"
36 |
37 | [tool.poetry.dev-dependencies]
38 | pytest = "^6.0"
39 | coverage = {extras = ["toml"], version = "5.1"}
40 | pytest-cov = "^2.9.0"
41 | pytest-mock = "^3.1.1"
42 | black = "19.10b0"
43 | flake8 = "^3.8.2"
44 | flake8-bandit = "^2.1.2"
45 | flake8-black = "^0.2.0"
46 | flake8-bugbear = "^20.1.4"
47 | flake8-import-order = "^0.18.1"
48 | safety = "^1.9.0"
49 | flake8-docstrings = "^1.5.0"
50 |
51 | [tool.poetry.scripts]
52 | privacyraven = "privacyraven.console:main"
53 | [tool.coverage.paths]
54 | source = ["src", "*/site-packages"]
55 |
56 | [tool.coverage.run]
57 | branch = true
58 | source = ["privacyraven"]
59 |
60 | [tool.coverage.report]
61 | show_missing = true
62 |
63 | [build-system]
64 | # requires = ["poetry>=0.12"]
65 | # build-backend = "poetry.masonry.api"
66 | requires = ["poetry_core>=1.0.0"]
67 | build-backend = "poetry.core.masonry.api"
--------------------------------------------------------------------------------
/src/privacyraven/__init__.py:
--------------------------------------------------------------------------------
1 | # src/privacyraven/__init__.py
2 | """PrivacyRaven: Comprehensive Privacy Testing for Deep Learning."""
3 |
4 | from privacyraven.version import __version__
5 |
--------------------------------------------------------------------------------
/src/privacyraven/extraction/attacks.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import attr
4 |
5 | from privacyraven.extraction.core import ModelExtractionAttack
6 |
7 |
8 | def get_extraction_attrs():
9 | """Returns all the attributes of a Model Extraction Attack"""
10 | attributes = ModelExtractionAttack.__dict__["__attrs_attrs__"]
11 | attr_names = (a.name for a in attributes)
12 | return attr_names
13 |
14 |
15 | def copycat_attack(*args, **kwargs):
16 | """Runs the CopyCat model extraction attack
17 |
18 | Arxiv Paper: https://arxiv.org/abs/1806.05476
19 |
20 | Presently, this function excludes subset sampling strategies"""
21 | copy = partial(ModelExtractionAttack, synthesizer=copycat_attack)
22 | return copy(*args, **kwargs)
23 |
24 |
25 | def cloudleak(*args, **kwargs):
26 | """Runs CloudLeak model extraction attacks
27 |
28 | Returns an array of attacks that use synthesis functions
29 | based on adversarial/evasion attacks
30 |
31 | Based upon: https://bit.ly/31Npbgj
32 |
33 | Unlike the paper, this function does not include subset
34 | sampling strategies and relies upon different evasion
35 | attacks in order to comply with the threat model"""
36 |
37 | adv_synths = ["HopSkipJump"]
38 | results = []
39 | for s in adv_synths:
40 | attack = partial(ModelExtractionAttack, synthesizer=s)
41 | result = attack(*args, **kwargs)
42 | results = results.append(result)
43 | return results
44 |
--------------------------------------------------------------------------------
/src/privacyraven/extraction/core.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import attr
4 | import torch
5 | from torch.utils.data import DataLoader
6 |
7 | from privacyraven.extraction.metrics import label_agreement
8 | from privacyraven.extraction.synthesis import synthesize, synths
9 | from privacyraven.models.pytorch import ImagenetTransferLearning
10 | from privacyraven.utils.model_creation import (
11 | convert_to_inference,
12 | set_hparams,
13 | train_and_test,
14 | )
15 | from privacyraven.utils.query import establish_query
16 |
17 |
18 | @attr.s
19 | class ModelExtractionAttack(object):
20 | """Defines and launches a model extraction attack
21 |
22 | This is a black-box label-only extraction attack. A keyword representing
23 | a synthesis functions enables the creation of synthetic data, which is then
24 | used to create a substitute model. Presently, this class does not perform
25 | substitute model retraining.
26 |
27 | Changes to the arguments of this model must be reflected in changes to
28 | 'extraction/attacks.py' as well as 'm_inference/*.py'.
29 |
30 | If your API is rate limited, it is recommended to create a dataset from
31 | querying the API prior to applying PrivacyRaven.
32 |
33 | Attributes:
34 | query: Function that queries deep learning model
35 | query_limit: Int of amount of times the model can be queried
36 | victim_input_shape: Tuple of ints describing the shape of victim inputs
37 | victim_output_targets: Int of number of labels
38 | substitute_input_shape: Tuple of ints describing shape of data accepted
39 | by the substitute model
40 | substitute_model_arch: PyTorch module of substitute architecture.
41 | This can be found in models/pytorch.py
42 | substitute_input_size: Int of input size for the substitute model
43 | seed_data_train: Tuple of tensors or tensors of seed data
44 | seed_data_test: Same as above for test data
45 | transform: A torchvision.transform to be applied to the data
46 | batch_size: Int stating how many samples are in a batch of data
47 | num_workers: Int of the number of workers used in training
48 | max_epochs: Int of the maximum number of epochs used to train the model
49 | learning_rate: Float of the learning rate of the model
50 | art_model: A representation of the classifier for IBM ART
51 | callback: A PytorchLightning CallBack
52 | trainer_args: A list of tuples with keyword arguments for the Trainer
53 | e.g.: [("deterministic", True), ("profiler", "simple")] """
54 |
55 | gpu_availability = torch.cuda.device_count()
56 | query = attr.ib()
57 | query_limit = attr.ib(default=100)
58 | victim_input_shape = attr.ib(default=None)
59 | victim_output_targets = attr.ib(default=None)
60 | substitute_input_shape = attr.ib(default=None)
61 | synthesizer = attr.ib(default="copycat")
62 | substitute_model_arch = attr.ib(default=ImagenetTransferLearning)
63 | substitute_input_size = attr.ib(default=1000)
64 | seed_data_train = attr.ib(default=None)
65 | seed_data_test = attr.ib(default=None)
66 | test_data = attr.ib(default=None)
67 |
68 | transform = attr.ib(default=None)
69 | batch_size = attr.ib(default=100)
70 | num_workers = attr.ib(default=4)
71 | gpus = attr.ib(default=gpu_availability)
72 | max_epochs = attr.ib(default=10)
73 | learning_rate = attr.ib(default=1e-3)
74 | art_model = attr.ib(default=None)
75 | callback = attr.ib(default=None)
76 | trainer_args = attr.ib(default=None)
77 |
78 | # The following attributes are created during class creation
79 | # and are not taken as arguments
80 | synth_train = attr.ib(init=False)
81 | synth_valid = attr.ib(init=False)
82 | synth_test = attr.ib(init=False)
83 | hparams = attr.ib(init=False)
84 | train_dataloader = attr.ib(init=False)
85 | valid_dataloader = attr.ib(init=False)
86 | test_dataloader = attr.ib(init=False)
87 | substitute_model = attr.ib(init=False)
88 |
89 | def __attrs_post_init__(self):
90 | """The attack itself is executed here"""
91 | self.query = establish_query(self.query, self.victim_input_shape)
92 | if self.trainer_args is not None:
93 | self.trainer_args = dict(self.trainer_args)
94 | self.synth_train, self.synth_valid, self.synth_test = self.synthesize_data()
95 | print("Synthetic Data Generated")
96 |
97 | self.hparams = self.set_substitute_hparams()
98 | (
99 | self.train_dataloader,
100 | self.valid_dataloader,
101 | self.test_dataloader,
102 | ) = self.set_dataloaders()
103 |
104 | self.substitute_model = self.get_substitute_model()
105 |
106 | # If seperate data is not provided, seed data is used for testing
107 | if self.test_data is None:
108 | self.label_agreement = label_agreement(
109 | self.seed_data_test,
110 | self.substitute_model,
111 | self.query,
112 | self.victim_input_shape,
113 | self.substitute_input_shape,
114 | )
115 | else:
116 | self.label_agreement = label_agreement(
117 | self.test_data,
118 | self.substitute_model,
119 | self.query,
120 | self.victim_input_shape,
121 | self.substitute_input_shape,
122 | )
123 |
124 | def synthesize_data(self):
125 | return synthesize(
126 | self.synthesizer,
127 | self.seed_data_train,
128 | self.seed_data_test,
129 | self.query,
130 | self.query_limit,
131 | self.art_model,
132 | self.victim_input_shape,
133 | self.substitute_input_shape,
134 | self.victim_output_targets,
135 | )
136 |
137 | def set_substitute_hparams(self):
138 | hparams = set_hparams(
139 | self.transform,
140 | self.batch_size,
141 | self.num_workers,
142 | self.gpus,
143 | self.max_epochs,
144 | self.learning_rate,
145 | self.substitute_input_size,
146 | self.victim_output_targets,
147 | )
148 | return hparams
149 |
150 | def set_dataloaders(self):
151 | print("Creating synthetic dataloaders")
152 | train_dataloader = DataLoader(
153 | self.synth_train,
154 | batch_size=self.hparams["batch_size"],
155 | num_workers=self.hparams["num_workers"],
156 | )
157 | valid_dataloader = DataLoader(
158 | self.synth_valid,
159 | batch_size=self.hparams["batch_size"],
160 | num_workers=self.hparams["num_workers"],
161 | )
162 | test_dataloader = DataLoader(
163 | self.synth_test,
164 | batch_size=self.hparams["batch_size"],
165 | num_workers=self.hparams["num_workers"],
166 | )
167 | return train_dataloader, valid_dataloader, test_dataloader
168 |
169 | def get_substitute_model(self):
170 | print("Training the substitute_model")
171 | model = train_and_test(
172 | self.substitute_model_arch,
173 | self.train_dataloader,
174 | self.valid_dataloader,
175 | self.test_dataloader,
176 | self.hparams,
177 | self.callback,
178 | self.trainer_args,
179 | )
180 | # This may limit the attack to PyTorch Lightning substitutes
181 | model = convert_to_inference(model, self.gpus)
182 | return model
183 |
--------------------------------------------------------------------------------
/src/privacyraven/extraction/metrics.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import tqdm
3 |
4 | from privacyraven.extraction.synthesis import process_data
5 | from privacyraven.utils.query import get_target, query_model
6 |
7 |
8 | def label_agreement(
9 | test_data,
10 | substitute_model,
11 | query_victim,
12 | victim_input_shape,
13 | substitute_input_shape,
14 | ):
15 | """Returns the number of agreed upon data points between victim and substitute,
16 | thereby measuring the fidelity of an extraction attack"""
17 |
18 | limit = int(len(test_data))
19 |
20 | if limit >= 100:
21 | # We limit test data to 100 samples for efficiency
22 | limit = 100
23 | x_data, y_data = process_data(test_data, limit)
24 |
25 | substitute_result = get_target(substitute_model, x_data, substitute_input_shape)
26 | victim_result = query_victim(x_data)
27 |
28 | agreed = torch.sum(torch.eq(victim_result, substitute_result)).item()
29 |
30 | print(f"Fidelity: Out of {limit} data points, the models agreed upon {agreed}.")
31 | return agreed
32 |
--------------------------------------------------------------------------------
/src/privacyraven/extraction/synthesis.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from art.attacks.evasion import BoundaryAttack, HopSkipJump
4 | from art.estimators.classification import BlackBoxClassifier
5 | from pytorch_lightning.metrics.utils import to_onehot
6 | from sklearn.model_selection import train_test_split
7 | from tqdm import tqdm
8 |
9 | from privacyraven.utils.model_creation import NewDataset # , set_evasion_model
10 | from privacyraven.utils.query import reshape_input
11 |
12 | # Creates an empty dictionary for synthesis functions
13 | synths = dict()
14 |
15 |
16 | def register_synth(func):
17 | """Register a function as a synthesizer"""
18 | synths[func.__name__] = func
19 | return func
20 |
21 |
22 | def synthesize(
23 | func_name,
24 | seed_data_train,
25 | seed_data_test,
26 | query,
27 | query_limit,
28 | art_model,
29 | victim_input_shape,
30 | substitute_input_shape,
31 | victim_output_targets,
32 | ):
33 | """Synthesizes training and testing data for a substitute model
34 |
35 | First, the data is processed. Then, the synthesizer function is called.
36 |
37 | Parameters:
38 | func_name: String of the function name
39 | seed_data_train: Tuple of tensors or tensor of training data
40 | seed_data_test: Tuple of tensors or tensor of training data
41 |
42 | Returns:
43 | Three NewDatasets containing synthetic data"""
44 |
45 | if art_model is None:
46 | art_model = BlackBoxClassifier(
47 | predict=query,
48 | input_shape=victim_input_shape,
49 | nb_classes=victim_output_targets,
50 | clip_values=None, # (0, 255),
51 | preprocessing_defences=None,
52 | postprocessing_defences=None,
53 | preprocessing=(0, 1), # None,
54 | )
55 |
56 | func = synths[func_name]
57 |
58 | # We split the query limit in half to account for two datasets.
59 | query_limit = int(0.5 * query_limit)
60 |
61 | seed_data_train = process_data(seed_data_train, query_limit)
62 | seed_data_test = process_data(seed_data_test, query_limit)
63 |
64 | x_train, y_train = func(
65 | seed_data_train,
66 | query,
67 | query_limit,
68 | art_model,
69 | victim_input_shape,
70 | substitute_input_shape,
71 | victim_output_targets,
72 | )
73 | x_test, y_test = func(
74 | seed_data_test,
75 | query,
76 | query_limit,
77 | art_model,
78 | victim_input_shape,
79 | substitute_input_shape,
80 | victim_output_targets,
81 | )
82 |
83 | # Presently, we have hard-coded specific values for the test-train split.
84 | # In the future, this should be automated and/or optimized in some form.
85 | x_train, x_valid, y_train, y_valid = train_test_split(
86 | x_train, y_train, test_size=0.4, random_state=42
87 | )
88 |
89 | # The NewDataset ensures the synthesized data is a valid PL network input.
90 | synth_train = NewDataset(x_train, y_train)
91 | synth_valid = NewDataset(x_valid, y_valid)
92 | synth_test = NewDataset(x_test, y_test)
93 | return synth_train, synth_valid, synth_test
94 |
95 |
96 | def process_data(data, query_limit):
97 | """Returns x and (if given labeled data) y tensors that are shortened
98 | to the length of the query_limit if applicable"""
99 |
100 | try:
101 | # See if the data is labeled regardless of specific representation
102 | labeled = True
103 | x, y = data[0]
104 | except ValueError:
105 | # A value error is raised if the data is not labeled
106 | labeled = False
107 | if isinstance(data, np.ndarray) is True:
108 | data = torch.from_numpy(data)
109 | x_data = data.detach().clone().float()
110 | y_data = None
111 | bounded = False
112 | # Labeled data can come in multiple data formats, including, but
113 | # not limited to Torchvision datasets, lists of tuples, and
114 | # tuple of tuples. We attempt to address these edge cases
115 | # through the exception of an AttributeError
116 | if labeled:
117 | try:
118 | if isinstance(data.data, np.ndarray) is True:
119 | x_data, y_data = (
120 | torch.from_numpy(data.data).detach().clone().float(),
121 | torch.from_numpy(data.targets).detach().clone().float(),
122 | )
123 | else:
124 | x_data, y_data = (
125 | data.data.detach().clone().float(),
126 | data.targets.detach().clone().float(),
127 | )
128 | bounded = False
129 | except AttributeError:
130 | # Setting 'bounded' increases efficiency as data that
131 | # will be ignored due to the query limit will not be
132 | # included in the initial x and y data tensors
133 | bounded = True
134 |
135 | data_limit = int(len(data))
136 | if query_limit is None:
137 | data_limit = query_limit
138 | limit = query_limit if data_limit > query_limit else data_limit
139 |
140 | data = data[:limit]
141 |
142 | x_data = torch.Tensor([x for x, y in data]).float()
143 | y_data = torch.Tensor([y for x, y in data]).float()
144 |
145 | if bounded is False:
146 | data_limit = int(x_data.size()[0])
147 | if query_limit is None:
148 | query_limit = data_limit
149 |
150 | limit = query_limit if data_limit > query_limit else data_limit
151 |
152 | # torch.narrow is more efficient than indexing and splicing
153 | x_data = x_data.narrow(0, 0, int(limit))
154 | if y_data is not None:
155 | y_data = y_data.narrow(0, 0, int(limit))
156 | processed_data = (x_data, y_data)
157 | return processed_data
158 |
159 |
160 | @register_synth
161 | def copycat(
162 | data,
163 | query,
164 | query_limit,
165 | art_model,
166 | victim_input_shape,
167 | substitute_input_shape,
168 | victim_output_targets,
169 | reshape=True,
170 | ):
171 | """Creates a synthetic dataset by labeling seed data
172 |
173 | Arxiv Paper: https://ieeexplore.ieee.org/document/8489592"""
174 | (x_data, y_data) = data
175 | y_data = query(x_data)
176 | if reshape is True:
177 | x_data = reshape_input(x_data, substitute_input_shape)
178 | return x_data, y_data
179 |
180 |
181 | @register_synth
182 | def hopskipjump(
183 | data,
184 | query,
185 | query_limit,
186 | art_model,
187 | victim_input_shape,
188 | substitute_input_shape,
189 | victim_output_targets,
190 | ):
191 | """Runs the HopSkipJump evasion attack
192 |
193 | Arxiv Paper: https://arxiv.org/abs/1904.02144"""
194 |
195 | internal_limit = int(query_limit * 0.5)
196 | X, y = copycat(
197 | data,
198 | query,
199 | internal_limit,
200 | art_model,
201 | victim_input_shape,
202 | substitute_input_shape,
203 | victim_output_targets,
204 | reshape=False,
205 | )
206 |
207 | # import pdb; pdb.set_trace()
208 | X_np = X.detach().clone().numpy()
209 | # config = set_evasion_model(query, victim_input_shape, victim_input_targets)
210 | evasion_limit = int(query_limit * 0.5)
211 |
212 | # The initial evaluation number must be lower than the maximum
213 | lower_bound = 0.01 * evasion_limit
214 | init_eval = int(lower_bound if lower_bound > 1 else 1)
215 |
216 | # Run attack and process results
217 | attack = HopSkipJump(
218 | art_model,
219 | False,
220 | norm="inf",
221 | max_iter=evasion_limit,
222 | max_eval=evasion_limit,
223 | init_eval=init_eval,
224 | )
225 | result = attack.generate(X_np)
226 | result = (
227 | torch.from_numpy(attack.generate(X_np)).clone().detach().float()
228 | ) # .detach().clone().float()
229 | y = query(result)
230 | result = reshape_input(result, substitute_input_shape)
231 | return result, y
232 |
--------------------------------------------------------------------------------
/src/privacyraven/inversion/attacks.py:
--------------------------------------------------------------------------------
1 | from privacyraven.models.four_layer import FourLayerClassifier
2 | from privacyraven.models.victim import FourLayerClassifier
3 |
4 | def TrainInversionModels(
5 | input_size = 784,
6 | output_size = 10,
7 | ):
8 |
9 | # The following is a proof of concept of Figure 4 from the paper
10 | # "Neural Network Inversion in Adversarial Setting via Background Knowledge Alignment"
11 | # We first train a classifier on a dataset to output a prediction vector
12 |
13 | forward_model = train_four_layer_mnist_victim(
14 | gpus=1,
15 | input_size = 784,
16 | output_size = 10
17 | )
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/src/privacyraven/inversion/core.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import torch.nn.functional as nnf
4 | from tqdm import tqdm
5 | from privacyraven.models.four_layer import FourLayerClassifier
6 | from privacyraven.models.victim import *
7 | from privacyraven.utils.data import get_emnist_data
8 | from privacyraven.utils.query import query_model, get_target
9 | from privacyraven.extraction.core import ModelExtractionAttack
10 | from privacyraven.extraction.synthesis import process_data
11 | from privacyraven.utils.model_creation import NewDataset, set_hparams
12 | import matplotlib.pyplot as plt
13 |
14 | # Create a query function for a target PyTorch Lightning model
15 | def get_prediction(model, input_data, emnist_dimensions=(1, 28, 28, 1)):
16 | # PrivacyRaven provides built-in query functions
17 | prediction, target = query_model(model, input_data, emnist_dimensions)
18 | return prediction
19 |
20 | def save_inversion_results(
21 | image,
22 | reconstructed,
23 | plot=False,
24 | label=None,
25 | save_dir="results",
26 | filename="recovered"
27 | ):
28 | if not os.path.isdir(save_dir):
29 | os.mkdir(save_dir)
30 |
31 | # Generates subplots and plots the results
32 | plt.subplot(1, 2, 1)
33 | plt.imshow(image[0], cmap="gray")
34 | plt.title("Auxiliary set ({})".format(label))
35 | plt.subplot(1, 2, 2)
36 | plt.imshow(reconstructed[0][0].reshape(32, 32), cmap="gray")
37 | plt.title("Reconstructed")
38 | plt.savefig(f"{save_dir}/{filename}.png")
39 |
40 | if plot:
41 | plt.show()
42 |
43 | # Trains the forward and inversion models
44 | def joint_train_inversion_model(
45 | dataset_train=None,
46 | dataset_test=None,
47 | data_dimensions = (1, 28, 28, 1),
48 | max_epochs=None,
49 | gpus=1,
50 | t=1,
51 | c=50
52 | ):
53 |
54 | # The following is a proof of concept of Figure 4 from the paper
55 | # "Neural Network Inversion in Adversarial Setting via Background Knowledge Alignment"
56 |
57 | temp_model = train_four_layer_mnist_victim(
58 | gpus=gpus
59 | )
60 |
61 | def query_mnist(input_data):
62 | # PrivacyRaven provides built-in query functions
63 | return get_target(temp_model, input_data, (1, 28, 28, 1))
64 |
65 |
66 | forward_model = ModelExtractionAttack(
67 | query_mnist,
68 | 1000, # Less than the number of MNIST data points: 60000
69 | (1, 28, 28, 1),
70 | 10,
71 | (3, 1, 28, 28), # Shape of an EMNIST data point
72 | "copycat", # "copycat",
73 | FourLayerClassifier,
74 | 784, # 28 * 28 or the size of a single image
75 | dataset_train,
76 | dataset_test,
77 | gpus=gpus
78 | ).substitute_model
79 |
80 | # Due to PrivacyRaven's black box threat model, we first run a model extraction attack on the
81 | # target classifier to extract and train a fully-trained substitute model, which the user has white-box access to.
82 | # Ideally, if the model extraction is successful, then this substitute model should approximate the target classifier
83 | # to a reasonable degree of fidelity and accuracy.
84 | # We then train the inversion model using the substitute model to query the auxiliary dataset on
85 | # under the objective of minimizing the MSE loss between the reconstructed and auxiliary
86 | # datapoints.
87 |
88 | inversion_model = train_mnist_inversion(
89 | gpus=gpus,
90 | forward_model=forward_model,
91 | inversion_params={"nz": 10, "ngf": 128, "affine_shift": c, "truncate": t},
92 | max_epochs=max_epochs,
93 | batch_size=100
94 | )
95 |
96 | return forward_model, inversion_model
97 |
98 |
99 | def test_inversion_model(
100 | forward_model,
101 | inversion_model,
102 | image,
103 | filename="recovered",
104 | save=True,
105 | label=None,
106 | debug=True
107 | ):
108 | prediction = get_prediction(forward_model, image.float())
109 |
110 | if debug:
111 | print("Prediction vector: ", prediction)
112 |
113 | # Inversion training process occurs here
114 | image = nnf.pad(input=image, pad=(2, 2, 2, 2), value=image[0][0][0])
115 | reconstructed = inversion_model(prediction[0]).to("cpu")
116 |
117 | if save:
118 | save_inversion_results(image, reconstructed, label=label, filename=filename)
119 |
120 | return nnf.mse_loss(image, reconstructed)
121 |
122 | if __name__ == "__main__":
123 | emnist_train, emnist_test = get_emnist_data()
124 |
125 | forward_model, inversion_model = joint_train_inversion_model(
126 | dataset_train=emnist_train,
127 | dataset_test=emnist_test,
128 | gpus=1,
129 | max_epochs=300
130 | )
131 |
132 | num_test = 50
133 | idx_array = random.sample(range(len(emnist_test)), num_test)
134 |
135 | for idx in idx_array:
136 | image, label = emnist_test[idx]
137 |
138 | loss = test_inversion_model(
139 | forward_model,
140 | inversion_model,
141 | image,
142 | label=str(label),
143 | filename=f"recovered_{idx}",
144 | debug=False
145 | )
--------------------------------------------------------------------------------
/src/privacyraven/membership_inf/core.py:
--------------------------------------------------------------------------------
1 | import attr
2 | import pytorch_lightning as pl
3 | from sklearn.neural_network import MLPClassifier
4 | import torch
5 | from torch.cuda import device_count
6 | import copy
7 | import sklearn.metrics as metrics
8 | #from sklearn.metrics import roc_auc_score
9 | from privacyraven.extraction.core import ModelExtractionAttack
10 | #from privacyraven.membership_inf.robustness import find_robustness
11 | from privacyraven.membership_inf.threshold import calculate_threshold_value
12 | from privacyraven.utils.query import establish_query, get_target, query_model
13 | from privacyraven.models.pytorch import ImagenetTransferLearning
14 | import torchmetrics
15 |
16 | @attr.s
17 | class TransferMembershipInferenceAttack(object):
18 | """Launches a transfer-based membership inference attack"""
19 | gpu_availability = torch.cuda.device_count()
20 | data_point = attr.ib()
21 | query = attr.ib()
22 | query_limit = attr.ib(default=100)
23 | victim_input_shape = attr.ib(default=None)
24 | victim_output_targets = attr.ib(default=None)
25 | substitute_input_shape = attr.ib(default=None)
26 | synthesizer = attr.ib(default="copycat")
27 | substitute_model_arch = attr.ib(default=ImagenetTransferLearning)
28 | substitute_input_size = attr.ib(default=1000)
29 | seed_data_train = attr.ib(default=None)
30 | seed_data_test = attr.ib(default=None)
31 | threshold = attr.ib(default=None)
32 | # test_data = attr.ib(default=None)
33 |
34 | transform = attr.ib(default=None)
35 | batch_size = attr.ib(default=100)
36 | num_workers = attr.ib(default=4)
37 | gpus = attr.ib(default=gpu_availability)
38 | max_epochs = attr.ib(default=10)
39 | learning_rate = attr.ib(default=1e-3)
40 | art_model = attr.ib(default=None)
41 | callback = attr.ib(default=None)
42 | trainer_args = attr.ib(default=None)
43 |
44 | extraction_attack = attr.ib(init=False)
45 | substitute_model = attr.ib(init=False)
46 | query_substitute = attr.ib(init=False)
47 |
48 | def __attrs_post_init__(self):
49 | self.query = establish_query(self.query, self.victim_input_shape)
50 |
51 | # We use the dict of the attack to unpack all the extraction arguments
52 | # This will need to be changed as ModelExtractionAttack is changed
53 |
54 | config = attr.asdict(self)
55 | extract_args = copy.deepcopy(config)
56 | # print(extract_args)
57 | extract_args.pop("data_point")
58 | extract_args.pop("threshold")
59 | extract_args = extract_args.values()
60 |
61 | self.extraction_attack = ModelExtractionAttack(*extract_args)
62 | self.substitute_model = extraction.substitute_model
63 |
64 | self.query_substitute = lambda x: query_model(substitute, x, self.substitute_input_shape)
65 | pred, target = query_substitute(self.data_point)
66 |
67 | # target = target.unsqueeze(0)
68 | # output = torch.nn.functional.cross_entropy(pred, target)
69 |
70 | # t_pred, t_target = query_substitute(self.seed_data_train)
71 |
72 | # We need diff formats for threshold: #, function, string (?)
73 |
74 | # threshold = torch.nn.functional.cross_entropy()
75 | # print("Cross Entropy Loss is: " + output)
76 | # print("AUROC is: " + auroc)
77 |
78 | # We need multiple: binary classifier & threshold
79 | # This maps to attackNN-based and metric-based attacks
80 | if threshold = None:
81 | binary_classifier = True
82 | else:
83 | binary_classifier = False
84 | tr = calculate_threshold_value(threshold)
85 |
86 | # Threshold value must be number, string in list of functions, OR
87 | # function
88 |
89 |
--------------------------------------------------------------------------------
/src/privacyraven/membership_inf/metric.py:
--------------------------------------------------------------------------------
1 | """
2 | import numbers
3 | from privacyraven.extraction.synthesis import process_data
4 | #metric_functions = ["aucroc"]
5 |
6 | metrics = dict()
7 |
8 | def register_metric(func):
9 | metrics[func.__name__] = func
10 | return func
11 |
12 | def calculate_metric_value(metric, data_point, query_substitute,
13 | substitute_model, extraction_attack, loss=None,
14 | threshold=None):
15 | # Threshold must be:
16 | # - a number
17 | # - a string in a list of function strings
18 | # - a callable
19 |
20 | is_number = is_instance(metric, numbers.Number)
21 | is_proper_string = metric in metric_functions
22 | is_callable = callable(metric)
23 |
24 | if (is_number || is_proper_string || is_callable) is False:
25 | raise ValueError("Metric must be a number, a string representing the
26 | name of a value metric function, or a callable that
27 | can calculate the value of the metric")
28 |
29 |
30 | @register_metric
31 | def prediction_correctness(data_point, query_substitute,
32 | substitute_model, extraction_attack, loss=None,
33 | threshold=None):
34 | # There must be a correct answer to the data point attached
35 | (x_data, y_data) = process_data(data_point)
36 | prediction = query_substitute(x_data)
37 | print(prediction)
38 | print(y_data)
39 | if (prediction == y_data):
40 | return "This data point is likely a member of the training dataset."
41 | else:
42 | return "This datapoint is not likely to be a member of the training
43 | dataset."
44 |
45 | @register_metric
46 | def prediction_loss(data_point, query_substitute,
47 | substitute_model, extraction_attack, loss=None,
48 | threshold=None):
49 | if loss(data_point) > threshold:
50 | return "This data point is not likely to be a member of the training
51 | dataset."
52 | else:
53 | return "This data point is likely a member of the training dataset."
54 |
55 |
56 | @register_metric
57 | def prediction_confidence(data_point, query_substitute,
58 | substitute_model, extraction_attack, loss=None,
59 | threshold=None):
60 | """
61 |
--------------------------------------------------------------------------------
/src/privacyraven/membership_inf/network.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trailofbits/PrivacyRaven/8d5cee834be85ef29502028d3d8788780d393f17/src/privacyraven/membership_inf/network.py
--------------------------------------------------------------------------------
/src/privacyraven/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trailofbits/PrivacyRaven/8d5cee834be85ef29502028d3d8788780d393f17/src/privacyraven/models/__init__.py
--------------------------------------------------------------------------------
/src/privacyraven/models/four_layer.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | from torch import nn
4 | from torch.nn import functional as F
5 | import torchmetrics
6 |
7 | class FourLayerClassifier(pl.LightningModule):
8 | """This module describes a neural network with four fully connected layers
9 | containing 420 hidden units and a dropout layer (of 0.4). Cross entropy
10 | loss is utilized, the Adam optimizer is used, and accuracy is reported."""
11 |
12 | def __init__(self, hparams):
13 | """Defines overall computations"""
14 | super().__init__()
15 |
16 | self.hparams = hparams
17 | self.save_hyperparameters()
18 |
19 | # A dictionary informs of the model of the input size, number of
20 | # target classes, and learning rate.
21 | self.fc1 = nn.Linear(self.hparams["input_size"], 420)
22 | self.fc2 = nn.Linear(420, 420)
23 | self.fc3 = nn.Linear(420, 420)
24 | self.fc4 = nn.Linear(420, self.hparams["targets"])
25 | self.dropout = nn.Dropout(0.4)
26 |
27 | # Instantiate accuracy metrics for each phase
28 | self.train_acc = torchmetrics.Accuracy()
29 | self.valid_acc = torchmetrics.Accuracy()
30 | self.test_acc = torchmetrics.Accuracy()
31 | # self.train_acc = pl.metrics.Accuracy()
32 | # self.valid_acc = pl.metrics.Accuracy()
33 | # self.test_acc = pl.metrics.Accuracy()
34 |
35 | def forward(self, x):
36 | """Executes the forward pass and inference phase"""
37 | x = x.view(x.size(0), -1)
38 | x = F.relu(self.fc1(x))
39 | x = self.dropout(x)
40 | x = F.relu(self.fc2(x))
41 | x = F.relu(self.fc3(x))
42 | # Add softmax to obtain probability vector
43 | x = F.softmax(x)
44 | x = self.dropout(x)
45 | x = self.fc4(x)
46 | return F.softmax(x)
47 |
48 | def training_step(self, batch, batch_idx):
49 | """Runs the training loop"""
50 | x, y = batch
51 | y_hat = self(x)
52 | loss = F.cross_entropy(y_hat, y)
53 | self.log("train_loss", loss)
54 | self.train_acc(torch.nn.functional.softmax(y_hat, dim=1), y)
55 | self.log("train_accuracy", self.train_acc) #, on_step=True, on_epoch=False)
56 | return loss
57 |
58 | def validation_step(self, batch, batch_idx):
59 | """Runs the validation loop"""
60 | x, y = batch
61 | y_hat = self(x)
62 | loss = F.cross_entropy(y_hat, y)
63 | self.log("valid_loss", loss)
64 | self.valid_acc(torch.nn.functional.softmax(y_hat, dim=1), y)
65 | # self.valid_acc(y_hat, y)
66 | self.log("valid_accuracy", self.valid_acc) #, on_step=True, on_epoch=True)
67 |
68 | def test_step(self, batch, batch_idx):
69 | """Tests the network"""
70 | x, y = batch
71 | y_hat = self(x)
72 | loss = F.cross_entropy(y_hat, y)
73 | self.log("test_loss", loss)
74 | self.test_acc(torch.nn.functional.softmax(y_hat, dim=1), y)
75 | # self.test_acc(y_hat, y)
76 | self.log("test_accuracy", self.test_acc) #, on_step=True, on_epoch=False)
77 |
78 | def configure_optimizers(self):
79 | """Executes optimization for training and validation"""
80 | return torch.optim.Adam(self.parameters(), self.hparams["learning_rate"])
81 |
82 |
83 |
--------------------------------------------------------------------------------
/src/privacyraven/models/inversion_model.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | from torch import nn
4 | from torch.nn import functional as nnf
5 | from torch import topk, add, log as vlog, tensor, sort
6 | from tqdm import tqdm
7 | from torch.cuda import device_count
8 |
9 | class InversionModel(pl.LightningModule):
10 | def __init__(self, hparams, inversion_params, classifier):
11 | super().__init__()
12 |
13 | self.classifier = classifier
14 | self.hparams = hparams
15 | #self.save_hyperparameters()
16 | self.nz = inversion_params["nz"]
17 | self.ngf = inversion_params["ngf"]
18 | self.c = inversion_params["affine_shift"]
19 | self.t = inversion_params["truncate"]
20 | self.mse_loss = 0
21 |
22 | # Forces the classifier into evaluation mode
23 | self.classifier.eval()
24 |
25 | # Forces the inversion model into training mode
26 | self.train()
27 |
28 | self.decoder = nn.Sequential(
29 | nn.ConvTranspose2d(
30 | self.nz,
31 | self.ngf * 4,
32 | stride=(1, 1),
33 | kernel_size=(4, 4)
34 | ),
35 | nn.BatchNorm2d(self.ngf * 4),
36 | nn.Tanh(),
37 |
38 | nn.ConvTranspose2d(
39 | self.ngf * 4,
40 | self.ngf * 2,
41 | stride=(2, 2),
42 | kernel_size=(4, 4),
43 | padding=(1, 1)
44 | ),
45 |
46 | nn.BatchNorm2d(self.ngf * 2),
47 | nn.Tanh(),
48 |
49 | nn.ConvTranspose2d(
50 | self.ngf * 2,
51 | self.ngf,
52 | stride=(2, 2),
53 | kernel_size=(4, 4),
54 | padding=(1, 1)
55 | ),
56 | nn.BatchNorm2d(self.ngf),
57 | nn.Tanh(),
58 |
59 | nn.ConvTranspose2d(
60 | self.ngf,
61 | 1,
62 | stride=(2, 2),
63 | padding=(1, 1),
64 | kernel_size=(4, 4)
65 | ),
66 |
67 | nn.Sigmoid()
68 |
69 | )
70 |
71 | def training_step(self, batch, batch_idx):
72 | images, _ = batch
73 |
74 | for data in images:
75 | augmented = torch.empty(1, 1, 28, 28, device=self.device)
76 | augmented[0] = data
77 |
78 | Fwx = self.classifier(augmented)
79 | reconstructed = self(Fwx[0])
80 | augmented = nnf.pad(input=augmented, pad=(2, 2, 2, 2), value=data[0][0][0])
81 | loss = nnf.mse_loss(reconstructed, augmented)
82 | self.log("train_loss: ", loss)
83 |
84 | return loss
85 |
86 | def test_step(self, batch, batch_idx):
87 | images, _ = batch
88 |
89 | for data in images:
90 | augmented = torch.empty(1, 1, 28, 28, device=self.device)
91 | augmented[0] = data
92 |
93 | Fwx = self.classifier(augmented)
94 | reconstructed = self(Fwx[0])
95 | augmented = nnf.pad(input=augmented, pad=(2, 2, 2, 2), value=data[0][0][0])
96 | loss = nnf.mse_loss(reconstructed, augmented)
97 | self.log("test_loss: ", loss)
98 |
99 | return loss
100 |
101 | def forward(self, Fwx):
102 | z = torch.zeros(len(Fwx), device=self.device)
103 | topk, indices = torch.topk(Fwx, self.t)
104 | topk = torch.clamp(topk, min=-1e3) + self.c
105 | topk_min = topk.min()
106 | # We create a new vector of all zeros and place the top k entries in their original order
107 | Fwx = z.scatter_(0, indices, topk) + nnf.relu(-topk_min)
108 | Fwx = torch.reshape(Fwx, (10, 1))
109 | Fwx = Fwx.view(-1, self.nz, 1, 1)
110 | Fwx = self.decoder(Fwx)
111 |
112 | Fwx = Fwx.view(-1, 1, 32, 32)
113 |
114 | return Fwx
115 |
116 |
117 | def configure_optimizers(self):
118 | """Executes optimization for training and validation"""
119 | return torch.optim.Adam(self.parameters(), 1e-4)
120 |
121 |
122 |
123 |
124 |
--------------------------------------------------------------------------------
/src/privacyraven/models/pytorch.py:
--------------------------------------------------------------------------------
1 | """
2 | These models will be depreciated soon. Use at your own risk.
3 | """
4 | import os
5 |
6 | import numpy as np
7 | import pytorch_lightning as pl
8 | import torch
9 | from torch import nn
10 | from torch.nn import functional as F
11 | from torchvision import datasets, models, transforms
12 | from tqdm import tqdm
13 |
14 |
15 | class ThreeLayerClassifier(pl.LightningModule):
16 | def __init__(self, hparams):
17 | """Defines a three layer fully connected neural network"""
18 | super(ThreeLayerClassifier, self).__init__()
19 | self.hparams = hparams
20 | self.layer_1 = torch.nn.Linear(self.hparams["input_size"], 128)
21 | self.layer_2 = torch.nn.Linear(128, 256)
22 | self.layer_3 = torch.nn.Linear(256, self.hparams["targets"])
23 |
24 | def forward(self, x):
25 | """Establishes the neural network's forward pass
26 |
27 | Parameters:
28 | x: A Torch tensor of the input data
29 |
30 | Returns:
31 | output probability vector for classes"""
32 | batch_size, channels, width, height = x.size()
33 |
34 | # Input Layer: (batch_size, 1, 28, 28) -> (batch_size, 1*28*28)
35 | x = x.view(batch_size, -1)
36 |
37 | # Layer 1: (batch_size, 1*28*28) -> (batch_size, 128)
38 | x = self.layer_1(x)
39 | x = torch.relu(x)
40 |
41 | # Layer 2: (batch_size, 128) -> (batch_size, 256)
42 | x = self.layer_2(x)
43 | x = torch.relu(x)
44 |
45 | # Layer 3: (batch_size, 256) -> (batch_size, 10)
46 | x = self.layer_3(x)
47 | x = torch.log_softmax(x, dim=1)
48 |
49 | return x
50 |
51 | def cross_entropy_loss(self, logits, labels):
52 | """Calculates loss- the difference between model predictions and true labels
53 |
54 | Parameters:
55 | logits: A Torch tensor of model output predictions
56 | labels: A Torch tensor of true values for predictions
57 |
58 | Returns:
59 | Cross entropy loss"""
60 | return F.cross_entropy(logits, labels)
61 |
62 | def training_step(self, train_batch, batch_idx):
63 | """Pushes training data batch through model and calculates loss in loop
64 |
65 | Parameters:
66 | train_batch: A Torch tensor of a batch of training data from training dataloader
67 | batch_idx: An integer of the index of batch in contention
68 |
69 | Returns:
70 | Formatted string with cross entropy loss and training logs"""
71 | x, y = train_batch
72 | logits = self.forward(x)
73 | loss = self.cross_entropy_loss(logits, y)
74 | logs = {"train_loss": loss}
75 | return {"loss": loss, "log": logs}
76 |
77 | def validation_step(self, val_batch, batch_idx):
78 | """Pushes validation data batch through model and calculates loss in loop
79 |
80 | Parameters:
81 | val_batch: A Torch tensor batch of validation data from validation dataloader
82 | batch_idx: An integer of the index of batch in contention
83 |
84 | Returns:
85 | Formatted string with resultant cross entropy loss"""
86 | x, y = val_batch
87 | logits = self.forward(x)
88 | loss = self.cross_entropy_loss(logits, y)
89 | targets_hat = torch.argmax(logits, dim=1)
90 | n_correct_pred = torch.sum(y == targets_hat).item()
91 | return {"val_loss": loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
92 |
93 | def validation_epoch_end(self, outputs):
94 | """Returns validation step results at the end of the epoch
95 | Parameters:
96 | outputs: An array with the result of validation step for each batch
97 | Returns:
98 | Formatted string with resultant metrics"""
99 | avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
100 | tensorboard_logs = {"val_loss": avg_loss}
101 | return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
102 |
103 | def configure_optimizers(self):
104 | """Sets up the optimization scheme"""
105 | optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
106 | return optimizer
107 |
108 | def test_step(self, batch, batch_idx):
109 | """Pushes test data into the model and returns relevant metrics
110 | Parameters:
111 | batch: A Torch tensor of a batch of test data
112 | batch_idx: An integer of the index of batch in contention
113 | Returns:
114 | Formatted string with relevant metrics"""
115 | x, y = batch
116 | y_hat = self(x)
117 | targets_hat = torch.argmax(y_hat, dim=1)
118 | n_correct_pred = torch.sum(y == targets_hat).item()
119 | return {
120 | "test_loss": F.cross_entropy(y_hat, y),
121 | "n_correct_pred": n_correct_pred,
122 | "n_pred": len(x),
123 | }
124 |
125 | def test_epoch_end(self, outputs):
126 | """Returns test step results at the end of the epoch
127 | Parameters:
128 | outputs: An array with the result of test step for each batch
129 | Returns:
130 | Formatted string with resultant metrics"""
131 | avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
132 | tensorboard_logs = {"test_loss": avg_loss}
133 | return {"avg_test_loss": avg_loss, "log": tensorboard_logs}
134 |
135 |
136 | class ImagenetTransferLearning(pl.LightningModule):
137 | def __init__(self, hparams):
138 | """Create a classifier with a pretrained MobileNet backbone"""
139 | super(ImagenetTransferLearning, self).__init__()
140 | self.hparams = hparams
141 | self.feature_extractor = models.mobilenet_v2(pretrained=True)
142 | self.feature_extractor.eval()
143 |
144 | # Establish classifier
145 | # self.layer_1 = torch.nn.Linear(hparams["input_size"], 128)
146 | self.layer_1 = torch.nn.Linear(1000, 128)
147 | self.layer_2 = torch.nn.Linear(128, 256)
148 | self.layer_3 = torch.nn.Linear(256, hparams["targets"])
149 |
150 | def forward(self, x):
151 | """Establishes the neural network's forward pass
152 |
153 | Parameters:
154 | x: A Torch tensor of the input image
155 |
156 | Returns:
157 | Output probability vector for classes
158 | """
159 | x = self.feature_extractor(x)
160 | batch_size, hidden = x.size()
161 |
162 | x = self.layer_1(x)
163 | x = torch.relu(x)
164 | x = self.layer_2(x)
165 | x = torch.relu(x)
166 | x = self.layer_3(x)
167 |
168 | x = torch.log_softmax(x, dim=1)
169 | return x
170 |
171 | def nll_loss(self, logits, labels):
172 | """Calculates loss
173 |
174 | Parameters:
175 | logits: A Torch tensor of the model output predictions
176 | labels: A Torch tensor of the true values for predictions
177 |
178 | Returns:
179 | Loss"""
180 | return F.nll_loss(logits, labels)
181 |
182 | def training_step(self, train_batch, batch_idx):
183 | """Pushes training data batch through model and calculates loss in loop
184 |
185 | Parameters:
186 | train_batch: A Torch tensor with the batch of training data
187 | batch_idx: An integer of the index of batch in contention
188 |
189 | Returns:
190 | Formatted string with cross entropy loss and training logs"""
191 | x, y = train_batch
192 | logits = self.forward(x)
193 | loss = self.nll_loss(logits, y)
194 | logs = {"train_loss": loss}
195 | return {"loss": loss, "log": logs}
196 |
197 | def validation_step(self, val_batch, batch_idx):
198 | """Pushes validation data batch through model and calculates loss in loop
199 |
200 | Parameters:
201 | val_batch: A Torch tensor of a batch of validation data
202 | batch_idx: An integer of the index of batch in contention
203 |
204 | Returns:
205 | Formatted string with resultant cross entropy loss"""
206 | x, y = val_batch
207 | logits = self.forward(x)
208 | loss = self.nll_loss(logits, y)
209 | targets_hat = torch.argmax(logits, dim=1)
210 | n_correct_pred = torch.sum(y == targets_hat).item()
211 | return {"val_loss": loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
212 |
213 | def validation_epoch_end(self, outputs):
214 | """Returns validation step results at the end of the epoch
215 |
216 | Parameters:
217 | outputs: An array of the result of validation step for each batch
218 |
219 | Returns:
220 | Formatted string with resultant metrics
221 | """
222 | avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
223 | tensorboard_logs = {"val_loss": avg_loss}
224 | return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
225 |
226 | def configure_optimizers(self):
227 | """Sets up the optimization scheme"""
228 | optimizer = torch.optim.Adam(
229 | self.parameters(), lr=self.hparams["learning_rate"]
230 | )
231 | return optimizer
232 |
233 | def test_step(self, batch, batch_idx):
234 | """Pushes test data into the model and returns relevant metrics
235 |
236 | Parameters:
237 | batch: A Torch tensor of a batch of test data from test dataloader
238 | batch_idx: An integer of the index of batch in contention
239 |
240 | Returns:
241 | Formatted string with relevant metrics"""
242 | x, y = batch
243 | y_hat = self(x)
244 | targets_hat = torch.argmax(y_hat, dim=1)
245 | n_correct_pred = torch.sum(y == targets_hat).item()
246 | return {
247 | "test_loss": F.nll_loss(y_hat, y),
248 | "n_correct_pred": n_correct_pred,
249 | "n_pred": len(x),
250 | }
251 |
252 | def test_epoch_end(self, outputs):
253 | """Returns test step results at the end of the epoch
254 |
255 | Parameters:
256 | outputs: An array with the results of test step for each batch
257 |
258 | Returns:
259 | Formatted string with resultant metrics"""
260 | avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
261 | tensorboard_logs = {"test_loss": avg_loss}
262 | return {"avg_test_loss": avg_loss, "log": tensorboard_logs}
263 |
--------------------------------------------------------------------------------
/src/privacyraven/models/victim.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytorch_lightning as pl
4 | import torch
5 | from torch import nn
6 | from torch.nn import functional as F
7 | from torch.utils.data import DataLoader, Dataset, random_split
8 | from torchvision import datasets, models, transforms
9 | from torchvision.datasets import MNIST
10 |
11 | from privacyraven.models.four_layer import FourLayerClassifier
12 | from privacyraven.models.inversion_model import InversionModel
13 | from privacyraven.models.pytorch import ThreeLayerClassifier
14 | from privacyraven.utils.data import get_mnist_loaders
15 | from privacyraven.utils.data import get_prob_loaders
16 | from privacyraven.utils.model_creation import (
17 | convert_to_inference,
18 | set_hparams,
19 | train_and_test,
20 | train_and_test_inversion,
21 | )
22 |
23 | # Trains MNIST inversion model
24 | def train_mnist_inversion(
25 | transform=None,
26 | batch_size=100,
27 | forward_model=None,
28 | num_workers=4,
29 | rand_split_val=None,
30 | gpus=None,
31 | max_epochs=8,
32 | inversion_params={"nz": 10, "ngf": 3, "affine_shift": 7, "truncate": 3},
33 | learning_rate=1e-3,
34 | ):
35 | """Trains a 4-layer fully connected neural network on MNIST data
36 |
37 | Parameters:
38 | transform: A Torchvision.transforms transformation to be applied to MNIST data
39 | batch_size: An integer of the size of batches to be trained and tested upon
40 | num_workers: An integer number of workers assigned to computations
41 | rand_split_val: An array describing how the val and train data are split
42 | gpus: An integer num of gpus available to train upon
43 | max_epochs: An integer of the maximum # of epochs to run
44 | learning_rate: A float that is the learning rate for the optimizer
45 |
46 | Returns:
47 | Trained model ready for inference"""
48 |
49 | input_size = 10 # Prediction vector
50 | targets = 784 # Reconstructed image
51 |
52 | # Uses all available GPUs for computation by default
53 | if gpus is None:
54 | gpus = torch.cuda.device_count()
55 |
56 | if transform is None:
57 | transform = transforms.Compose(
58 | [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
59 | )
60 | if rand_split_val is None:
61 | rand_split_val = [55000, 5000]
62 |
63 | # Establish hyperparameters and DataLoaders
64 | hparams = set_hparams(
65 | transform,
66 | batch_size,
67 | num_workers,
68 | gpus,
69 | max_epochs,
70 | learning_rate,
71 | input_size,
72 | targets,
73 | )
74 |
75 | hparams["rand_split_val"] = rand_split_val
76 | train_dataloader, val_dataloader, test_dataloader = get_mnist_loaders(hparams)
77 | # Train, test, and convert the model to inference
78 | inversion_model = train_and_test_inversion(
79 | forward_model, InversionModel, train_dataloader, val_dataloader, test_dataloader, hparams, inversion_params
80 | )
81 | inversion_model = convert_to_inference(inversion_model)
82 | return inversion_model
83 |
84 |
85 |
86 | def train_four_layer_mnist_victim(
87 | transform=None,
88 | batch_size=100,
89 | num_workers=4,
90 | rand_split_val=None,
91 | gpus=None,
92 | max_epochs=8,
93 | learning_rate=1e-3,
94 | ):
95 | """Trains a 4-layer fully connected neural network on MNIST data
96 |
97 | Parameters:
98 | transform: A Torchvision.transforms transformation to be applied to MNIST data
99 | batch_size: An integer of the size of batches to be trained and tested upon
100 | num_workers: An integer number of workers assigned to computations
101 | rand_split_val: An array describing how the val and train data are split
102 | gpus: An integer num of gpus available to train upon
103 | max_epochs: An integer of the maximum # of epochs to run
104 | learning_rate: A float that is the learning rate for the optimizer
105 |
106 | Returns:
107 | Trained model ready for inference"""
108 |
109 | input_size = 784 # 28*28 or the size of a single image
110 | targets = 10 # the number of digits any image can possibly represent
111 |
112 | # Uses all available GPUs for computation by default
113 | if gpus is None:
114 | gpus = torch.cuda.device_count()
115 |
116 | if transform is None:
117 | transform = transforms.Compose(
118 | [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
119 | )
120 | if rand_split_val is None:
121 | rand_split_val = [55000, 5000]
122 |
123 | # Establish hyperparameters and DataLoaders
124 | hparams = set_hparams(
125 | transform,
126 | batch_size,
127 | num_workers,
128 | gpus,
129 | max_epochs,
130 | learning_rate,
131 | input_size,
132 | targets,
133 | )
134 |
135 | train_dataloader, val_dataloader, test_dataloader = get_mnist_loaders(hparams)
136 | # Train, test, and convert the model to inference
137 | mnist_model = train_and_test(
138 | FourLayerClassifier, train_dataloader, val_dataloader, test_dataloader, hparams
139 | )
140 | mnist_model = convert_to_inference(mnist_model, gpus=gpus)
141 | return mnist_model
142 |
143 |
144 | def train_mnist_victim(
145 | transform=None,
146 | batch_size=100,
147 | num_workers=4,
148 | rand_split_val=None,
149 | gpus=None,
150 | max_epochs=8,
151 | learning_rate=1e-3,
152 | ):
153 | """Trains a 3-layer fully connected neural network on MNIST data
154 |
155 | This function will be depreciated with the ThreeLayerClassifier.
156 |
157 | Parameters:
158 | transform: A Torchvision.transforms transformation to be applied to MNIST data
159 | batch_size: An integer of the size of batches to be trained and tested upon
160 | num_workers: An integer number of workers assigned to computations
161 | rand_split_val: An array describing how the val and train data are split
162 | gpus: An integer num of gpus available to train upon
163 | max_epochs: An integer of the maximum # of epochs to run
164 | learning_rate: A float that is the learning rate for the optimizer
165 |
166 | Returns:
167 | Trained model ready for inference"""
168 |
169 | print(
170 | "WARNING: The ThreeLayerClassifier will be depreciated. Use the FourLayerClassifier instead."
171 | )
172 |
173 | # Define hyperparameters implied by the use of MNIST
174 | input_size = 784
175 | targets = 10
176 |
177 | # Uses all available GPUs for computation by default
178 | if gpus is None:
179 | gpus = torch.cuda.device_count()
180 |
181 | if transform is None:
182 | transform = transforms.Compose(
183 | [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
184 | )
185 | if rand_split_val is None:
186 | rand_split_val = [55000, 5000]
187 |
188 | # Establish hyperparameters and DataLoaders
189 | hparams = set_hparams(
190 | transform,
191 | batch_size,
192 | num_workers,
193 | gpus,
194 | max_epochs,
195 | learning_rate,
196 | input_size,
197 | targets,
198 | )
199 |
200 | train_dataloader, val_dataloader, test_dataloader = get_mnist_loaders(hparams)
201 |
202 | # Train, test, and convert the model to inference
203 | mnist_model = train_and_test(
204 | ThreeLayerClassifier, train_dataloader, val_dataloader, test_dataloader, hparams
205 | )
206 | mnist_model = convert_to_inference(mnist_model)
207 | return mnist_model
--------------------------------------------------------------------------------
/src/privacyraven/run.py:
--------------------------------------------------------------------------------
1 | from privacyraven.extraction.core import ModelExtractionAttack
2 | from privacyraven.extraction.synthesis import synths
3 |
4 |
5 | def run_all_extraction(
6 | query,
7 | query_limit=100,
8 | victim_input_shape=None,
9 | victim_output_targets=None, # (targets)
10 | substitute_input_shape=None,
11 | substitute_model=None,
12 | substitute_input_size=1000,
13 | seed_data_train=None,
14 | seed_data_test=None,
15 | transform=None,
16 | batch_size=100,
17 | num_workers=4,
18 | gpus=1,
19 | max_epochs=10,
20 | learning_rate=1e-3,
21 | ):
22 | """Run all extraction attacks.
23 |
24 | This needs to be updated with the class signature."""
25 |
26 | for s in synths:
27 | ModelExtractionAttack(
28 | query,
29 | query_limit,
30 | victim_input_shape,
31 | victim_output_targets,
32 | substitute_input_shape,
33 | s,
34 | substitute_model,
35 | substitute_input_size,
36 | seed_data_train,
37 | seed_data_test,
38 | transform,
39 | batch_size,
40 | num_workers,
41 | gpus,
42 | max_epochs,
43 | learning_rate,
44 | )
45 |
--------------------------------------------------------------------------------
/src/privacyraven/utils/data.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import torch
6 | from torch.utils.data import DataLoader, Dataset, random_split
7 | from torchvision import datasets, models, transforms
8 | from torchvision.datasets import EMNIST, MNIST
9 | from tqdm import tqdm
10 |
11 |
12 | def get_emnist_data(transform=None, RGB=True):
13 | """Returns EMNIST train and test datasets.
14 |
15 | This function is assumed to be primarily used as seed data.
16 | DataLoaders and data splits are in synthesis.py
17 |
18 | Parameters:
19 | transform: Relevant Torchvision transforms to apply to EMNIST
20 | RGB: A boolean value that decides if the images are RGB
21 |
22 | Returns:
23 | Two Torchvision datasets with the EMNIST train and test sets"""
24 |
25 | if transform is None and (RGB is True):
26 | transform = transforms.Compose(
27 | [
28 | transforms.Lambda(lambda image: image.convert("RGB")),
29 | transforms.ToTensor(),
30 | transforms.Normalize((0.1307,), (0.3081,)),
31 | ]
32 | )
33 | elif transform is None and (RGB is False):
34 | transforms.Compose(
35 | [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
36 | )
37 |
38 | emnist_train = EMNIST(
39 | os.getcwd(), split="digits", train=True, download=True, transform=transform
40 | )
41 | emnist_test = EMNIST(
42 | os.getcwd(), split="digits", train=False, download=True, transform=transform
43 | )
44 | return emnist_train, emnist_test
45 |
46 | def get_prob_loaders(hparams, datapoints):
47 |
48 | prob_train, prob_val, prob_test = random_split(datapoints, hparams["rand_split_val"])
49 |
50 | train_dataloader = DataLoader(
51 | prob_train, batch_size=hparams["batch_size"], num_workers=hparams["num_workers"]
52 | )
53 | val_dataloader = DataLoader(
54 | prob_val, batch_size=hparams["batch_size"], num_workers=hparams["num_workers"]
55 | )
56 | test_dataloader = DataLoader(
57 | prob_test, batch_size=hparams["batch_size"], num_workers=hparams["num_workers"]
58 | )
59 |
60 | return train_dataloader, val_dataloader, test_dataloader
61 |
62 | def get_mnist_loaders(hparams):
63 | """Returns MNIST DataLoaders from hyperparams in a dict"""
64 | mnist_train, mnist_val, mnist_test = get_mnist_data(hparams)
65 | train_dataloader = DataLoader(
66 | mnist_train,
67 | batch_size=hparams["batch_size"],
68 | num_workers=hparams["num_workers"],
69 | )
70 |
71 | val_dataloader = DataLoader(
72 | mnist_val, batch_size=hparams["batch_size"], num_workers=hparams["num_workers"]
73 | )
74 |
75 | test_dataloader = DataLoader(
76 | mnist_test, batch_size=hparams["batch_size"], num_workers=hparams["num_workers"]
77 | )
78 | return train_dataloader, val_dataloader, test_dataloader
79 |
80 |
81 | def get_mnist_data(hparams=None):
82 | """Returns MNIST train and test sets from hyperparams in a dict"""
83 | if hparams is None:
84 | transform = transforms.Compose(
85 | [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
86 | )
87 |
88 | hparams = {"rand_split_val": [55000, 5000],
89 | "transform": transform
90 | }
91 |
92 | mnist_train = MNIST(
93 | os.getcwd(), train=True, download=True, transform=hparams["transform"]
94 | )
95 | mnist_test = MNIST(
96 | os.getcwd(), train=False, download=True, transform=hparams["transform"]
97 | )
98 |
99 | mnist_train, mnist_val = random_split(mnist_train, hparams["rand_split_val"])
100 |
101 | return mnist_train, mnist_val, mnist_test
102 |
--------------------------------------------------------------------------------
/src/privacyraven/utils/model_creation.py:
--------------------------------------------------------------------------------
1 | from contextlib import suppress
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import pytorch_lightning as pl
6 | import torch
7 | from art.estimators.classification import BlackBoxClassifier
8 | from torch.utils.data import Dataset
9 |
10 |
11 | def set_evasion_model(query, victim_input_shape, victim_input_targets):
12 | """Defines the threat model for an evasion attack"""
13 | config = BlackBoxClassifier(
14 | predict=query,
15 | input_shape=victim_input_shape,
16 | nb_classes=victim_input_targets,
17 | clip_values=None, # (0, 255),
18 | preprocessing_defences=None,
19 | postprocessing_defences=None,
20 | preprocessing=(0, 1), # None,
21 | )
22 | return config
23 |
24 |
25 | class NewDataset(Dataset):
26 | """Creates a Dataset class for PyTorch"""
27 |
28 | def __init__(self, images, targets, transform=None):
29 | self.images = images
30 | self.targets = targets
31 | self.transform = transform
32 |
33 | def __len__(self):
34 | return len(self.images)
35 |
36 | def __getitem__(self, index):
37 | image = self.images[index]
38 | target = self.targets[index]
39 |
40 | if self.transform is not None:
41 | image = self.transform(image.numpy())
42 | return image, target
43 |
44 |
45 | def set_hparams(
46 | transform=None,
47 | batch_size=100,
48 | num_workers=4,
49 | gpus=None,
50 | max_epochs=8,
51 | learning_rate=1e-3,
52 | input_size=None,
53 | targets=None,
54 | ):
55 | """Creates a dictionary of hyperparameters"""
56 | # This should be optimized
57 | rand_split_val = [55000, 5000]
58 |
59 | if gpus is None:
60 | gpus = torch.cuda.device_count()
61 |
62 | if (input_size is None) or (targets is None):
63 | return "Input size and number of targets need to be defined"
64 | hparams = {}
65 | hparams["transform"] = transform
66 | hparams["batch_size"] = int(batch_size)
67 | hparams["num_workers"] = int(num_workers)
68 | hparams["rand_split_val"] = rand_split_val
69 | hparams["gpus"] = int(gpus)
70 | hparams["max_epochs"] = int(max_epochs)
71 | hparams["learning_rate"] = learning_rate
72 | hparams["input_size"] = input_size
73 | hparams["targets"] = targets
74 | return hparams
75 |
76 | def train_and_test_inversion(
77 | classifier,
78 | inversion_model,
79 | train_dataloader,
80 | val_dataloader,
81 | test_dataloader,
82 | hparams,
83 | inversion_params,
84 | callback=None,
85 | trainer_args=None,
86 | ):
87 | """Trains, validates, and tests a PyTorch Lightning model"""
88 | model = inversion_model(hparams, inversion_params, classifier)
89 | # model = classifier(hparams, inversion_params, classifier)
90 | # We need to configure how the Trainer interprets callbacks and extra arguments.
91 | # This should be refactored to be more dynamic and priorize the most common cases.
92 | if callback is not None and trainer_args is not None:
93 | trainer = pl.Trainer(
94 | gpus=hparams["gpus"],
95 | max_epochs=hparams["max_epochs"],
96 | callbacks=[callback],
97 | **trainer_args
98 | )
99 | elif callback is not None and trainer_args is None:
100 | trainer = pl.Trainer(
101 | gpus=hparams["gpus"], max_epochs=hparams["max_epochs"], callbacks=[callback]
102 | )
103 | elif callback is None and trainer_args is None:
104 | trainer = pl.Trainer(gpus=hparams["gpus"], max_epochs=hparams["max_epochs"])
105 | elif callback is None and trainer_args is not None:
106 | trainer = pl.Trainer(
107 | gpus=hparams["gpus"], max_epochs=hparams["max_epochs"], **trainer_args
108 | )
109 |
110 | # Runs training, validation, and testing
111 | trainer.fit(model, train_dataloader, val_dataloader)
112 | trainer.test(model, test_dataloaders=test_dataloader)
113 | return model
114 |
115 | def train_and_test(
116 | classifier,
117 | train_dataloader,
118 | val_dataloader,
119 | test_dataloader,
120 | hparams,
121 | callback=None,
122 | trainer_args=None,
123 | ):
124 | """Trains, validates, and tests a PyTorch Lightning model"""
125 | model = classifier(hparams)
126 | # We need to configure how the Trainer interprets callbacks and extra arguments.
127 | # This should be refactored to be more dynamic and priorize the most common cases.
128 | if callback is not None and trainer_args is not None:
129 | trainer = pl.Trainer(
130 | gpus=hparams["gpus"],
131 | max_epochs=hparams["max_epochs"],
132 | callbacks=[callback],
133 | **trainer_args
134 | )
135 | elif callback is not None and trainer_args is None:
136 | trainer = pl.Trainer(
137 | gpus=hparams["gpus"], max_epochs=hparams["max_epochs"], callbacks=[callback]
138 | )
139 | elif callback is None and trainer_args is None:
140 | trainer = pl.Trainer(gpus=hparams["gpus"], max_epochs=hparams["max_epochs"])
141 | elif callback is None and trainer_args is not None:
142 | trainer = pl.Trainer(
143 | gpus=hparams["gpus"], max_epochs=hparams["max_epochs"], **trainer_args
144 | )
145 |
146 | # Runs training, validation, and testing
147 | trainer.fit(model, train_dataloader, val_dataloader)
148 | trainer.test(model, test_dataloaders=test_dataloader)
149 | return model
150 |
151 |
152 | def convert_to_inference(model, gpus=0):
153 | """Allows a model to be used in an inference setting"""
154 | model.freeze()
155 | model.eval()
156 | if gpus:
157 | with suppress(Exception):
158 | pass
159 | model.cuda()
160 | return model
161 |
162 |
163 | def show_test_image(dataset, idx, cmap="gray"):
164 | """Shows a single datapoint from a test dataset as an image
165 |
166 | Parameters:
167 | dataset: A Torch dataset or tuple of the dataset with the image
168 | idx: An integer of the index of the image position
169 | cmap: An optional string defining the color map for image
170 |
171 | Returns:
172 | data sampled and displayed"""
173 | x, y = dataset[idx]
174 | plt.imshow(x.numpy()[0], cmap=cmap)
175 | return x
176 |
--------------------------------------------------------------------------------
/src/privacyraven/utils/query.py:
--------------------------------------------------------------------------------
1 | from contextlib import suppress
2 |
3 | import numpy as np
4 | import pytorch_lightning as pl
5 | import torch
6 |
7 |
8 | def reshape_input(input_data, input_size, single=True, warning=False):
9 | """Reshape input data before querying model
10 |
11 | This function will conduct a low-level resize if the size of
12 | the input data is not compatabile with the model input size.
13 |
14 | Parameters:
15 | input_data: A Torch tensor or Numpy array of the data
16 | input_size: A tuple of integers describing the new size
17 | warning: A Boolean that turns warnings on or off
18 |
19 | Returns:
20 | Data of new shape"""
21 | with suppress(Exception):
22 | input_data = torch.from_numpy(input_data)
23 |
24 | if input_size is None:
25 | if warning is True:
26 | print("No size was given and no reshaping can occur")
27 | return input_data
28 |
29 | # Reshape the data regardless of batch size
30 | start = len(input_data)
31 |
32 | alternate = list(input_size)
33 | alternate[0] = start
34 | alternate = tuple(alternate)
35 |
36 | try:
37 | if single:
38 | input_data = input_data.reshape(alternate)
39 | else:
40 | input_data = input_data.reshape(input_size)
41 | except Exception:
42 | if warning is True:
43 | print("Warning: Data loss is possible during resizing.")
44 | if single:
45 | input_data = input_data.resize_(alternate)
46 | else:
47 | input_data = input_data.resize_(input_size)
48 | return input_data
49 |
50 |
51 | def establish_query(query_func, input_size):
52 | """Equips a query function with the capacity to reshape data"""
53 | return lambda input_data: query_func(reshape_input(input_data, input_size))
54 |
55 |
56 | def query_model(model, input_data, input_size=None):
57 | """Returns the predictions of a Pytorch model
58 |
59 | Parameters:
60 | model: A pl.LightningModule or Torch module to be queried
61 | input_data: A Torch tensor entering the model
62 | input_size: A tuple of ints describes the shape of x
63 |
64 | Returns:
65 | prediction_as_torch: A Torch tensor of the predicton probabilities
66 | target: A Torch tensor displaying the predicted label"""
67 | # Transform to Torch tensor
68 | if isinstance(input_data, np.ndarray) is True:
69 | input_data = torch.from_numpy(input_data)
70 |
71 | # If the model uses a GPU, the data may need to be shifted
72 | with suppress(Exception):
73 | input_data = input_data.cuda()
74 |
75 | # Transform and validate input data
76 | input_data = input_data.float()
77 | if input_size is not None:
78 | input_data = reshape_input(input_data, input_size)
79 |
80 | # Generate predictions and targets
81 | prediction = model(input_data)
82 | if prediction.size()[0] == 1:
83 | # Sometimes, the model may not output [1, num_of_targets], resulting in
84 | # a possible loss of data during the prediction to target conversion
85 | target = torch.argmax(prediction)
86 | else:
87 | target = torch.tensor(
88 | [torch.argmax(row) for row in torch.unbind(prediction)]
89 | )
90 | return prediction, target
91 |
92 |
93 | def get_target(model, input_data, input_size=None):
94 | """Returns the predicted target of a Pytorch model
95 |
96 | Parameters:
97 | model: A pl.LightningModule or Torch module to be queried
98 | input_data: A Torch tensor entering the model
99 | input_size: A tuple of ints describes the shape of x
100 |
101 | Returns:
102 | target: An Torch tensor displaying the predicted target"""
103 | prediction, target = query_model(model, input_data, input_size)
104 | return target
105 |
--------------------------------------------------------------------------------
/src/privacyraven/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "pre-0.1.0"
2 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trailofbits/PrivacyRaven/8d5cee834be85ef29502028d3d8788780d393f17/tests/__init__.py
--------------------------------------------------------------------------------
/tests/generate.py:
--------------------------------------------------------------------------------
1 | """
2 | This is an example script of how to generate
3 | boilerplate for hypothesis tests
4 | """
5 |
6 | from hypothesis.extra import ghostwriter
7 |
8 | import privacyraven.extraction.metrics as metrics
9 |
10 | # The above line imports the source code
11 |
12 | file_name = "test_this_code.py"
13 |
14 | f = open(file_name, "w+")
15 |
16 | f.write(ghostwriter.magic(metrics))
17 |
--------------------------------------------------------------------------------
/tests/test_extraction_core.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 |
4 | import privacyraven as pr
5 | from privacyraven.extraction.core import ModelExtractionAttack
6 | from privacyraven.models.four_layer import FourLayerClassifier
7 | from privacyraven.models.victim import train_four_layer_mnist_victim
8 | from privacyraven.utils.data import get_emnist_data
9 | from privacyraven.utils.query import get_target
10 |
11 |
12 | def test_extraction():
13 | """End-to-end test of a model extraction attack"""
14 |
15 | # Create a query function for a target PyTorch Lightning model
16 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
17 |
18 | def query_mnist(input_data):
19 | # PrivacyRaven provides built-in query functions
20 | return get_target(model, input_data, (1, 28, 28, 1))
21 |
22 | # Obtain seed (or public) data to be used in extraction
23 | emnist_train, emnist_test = get_emnist_data()
24 |
25 | # Run a model extraction attack
26 | attack = ModelExtractionAttack(
27 | query=query_mnist,
28 | query_limit=100,
29 | victim_input_shape=(1, 28, 28, 1), # EMNIST data point shape
30 | victim_output_targets=10,
31 | substitute_input_shape=(3, 1, 28, 28),
32 | synthesizer="copycat",
33 | substitute_model_arch=FourLayerClassifier, # 28*28: image size
34 | substitute_input_size=784,
35 | seed_data_train=emnist_train,
36 | seed_data_test=emnist_test,
37 | gpus=0,
38 | )
39 |
--------------------------------------------------------------------------------
/tests/test_extraction_metrics.py:
--------------------------------------------------------------------------------
1 | # This test code was modified from code written by the `hypothesis.extra.ghostwriter` module
2 | # and is provided under the Creative Commons Zero public domain dedication.
3 |
4 | import argparse
5 |
6 | import numpy as np
7 | import pytest
8 | import torch
9 | from hypothesis import assume, given
10 | from hypothesis import strategies as st
11 | from hypothesis.extra.numpy import arrays
12 |
13 | import privacyraven.extraction.metrics
14 | import privacyraven.extraction.synthesis
15 | import privacyraven.utils.query
16 | from privacyraven.models.victim import train_four_layer_mnist_victim
17 | from privacyraven.utils import model_creation
18 | from privacyraven.utils.data import get_emnist_data
19 | from privacyraven.utils.query import get_target
20 |
21 | # Establish strategies
22 |
23 | device = torch.device("cpu")
24 |
25 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
26 |
27 |
28 | def query_mnist(input_data):
29 | return get_target(model, input_data, (1, 28, 28, 1))
30 |
31 |
32 | def valid_query():
33 | return st.just(query_mnist)
34 |
35 |
36 | def valid_data():
37 | return arrays(np.float64, (10, 28, 28, 1), st.floats())
38 |
39 |
40 | @given(
41 | test_data=valid_data(),
42 | substitute_model=st.just(model),
43 | query_victim=valid_query(),
44 | victim_input_shape=st.just((1, 28, 28, 1)),
45 | substitute_input_shape=st.just((1, 28, 28, 1)),
46 | )
47 | def label_agreement_returns_agreed(
48 | test_data,
49 | substitute_model,
50 | query_victim,
51 | victim_input_shape,
52 | substitute_input_shape,
53 | ):
54 | x = privacyraven.extraction.metrics.label_agreement(
55 | test_data=test_data,
56 | substitute_model=substitute_model,
57 | query_victim=query_victim,
58 | victim_input_shape=victim_input_shape,
59 | substitute_input_shape=substitute_input_shape,
60 | )
61 | # Technically, x should be 10, but that may fail on
62 | # a less faulty NN- an invariant we should not be
63 | # testing here
64 |
65 | assert x > 8
66 |
--------------------------------------------------------------------------------
/tests/test_extraction_synthesis.py:
--------------------------------------------------------------------------------
1 | # This test code was modified from code written by the `hypothesis.extra.ghostwriter` module
2 | # and is provided under the Creative Commons Zero public domain dedication.
3 |
4 | import numpy as np
5 | import pytest
6 | import torch
7 | from art.estimators.classification import BlackBoxClassifier
8 | from hypothesis import assume, given, settings
9 | from hypothesis import strategies as st
10 | from hypothesis.extra.numpy import arrays
11 |
12 | import privacyraven.extraction.synthesis
13 | import privacyraven.utils.query
14 | from privacyraven.models.victim import train_four_layer_mnist_victim
15 | from privacyraven.utils import model_creation, query
16 | from privacyraven.utils.data import get_emnist_data
17 | from privacyraven.utils.query import get_target
18 |
19 | """
20 | The synthesis tests rely on sampling data from a model.
21 | We will be training one and returning a query function here
22 | and not inside of a separate function in order to minimize
23 | the cycles dedicated for training this model.
24 | """
25 |
26 | device = torch.device("cpu")
27 |
28 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
29 |
30 | art_model = BlackBoxClassifier(
31 | predict=query,
32 | input_shape=(1, 28, 28, 1),
33 | nb_classes=10,
34 | clip_values=None, # (0, 255),
35 | preprocessing_defences=None,
36 | postprocessing_defences=None,
37 | preprocessing=(0, 1), # None,
38 | )
39 |
40 |
41 | def query_mnist(input_data):
42 | return privacyraven.utils.query.get_target(model, input_data, (1, 28, 28, 1))
43 |
44 |
45 | def valid_query():
46 | return st.just(query_mnist)
47 |
48 |
49 | def valid_data():
50 | return arrays(np.float64, (10, 28, 28, 1), st.floats())
51 |
52 |
53 | @settings(deadline=None)
54 | @given(
55 | data=valid_data(),
56 | query=st.just(query_mnist),
57 | query_limit=st.integers(10, 25),
58 | art_model=st.just(art_model),
59 | victim_input_shape=st.just((1, 28, 28, 1)),
60 | substitute_input_shape=st.just((3, 1, 28, 28)),
61 | victim_output_targets=st.just(10),
62 | )
63 | def test_copycat_preserves_shapes(
64 | data,
65 | query,
66 | query_limit,
67 | art_model,
68 | victim_input_shape,
69 | substitute_input_shape,
70 | victim_output_targets,
71 | ):
72 | # data = torch.from_numpy(data).detach().clone().float()
73 | data = privacyraven.extraction.synthesis.process_data(data, query_limit)
74 | x_data, y_data = privacyraven.extraction.synthesis.copycat(
75 | data=data,
76 | query=query,
77 | query_limit=query_limit,
78 | art_model=art_model,
79 | victim_input_shape=victim_input_shape,
80 | substitute_input_shape=substitute_input_shape,
81 | victim_output_targets=victim_output_targets,
82 | )
83 | x_1 = x_data.size()
84 | y_1 = y_data.size()
85 | assert x_1 == torch.Size([10, 1, 28, 28])
86 | assert y_1 == torch.Size([10])
87 |
88 |
89 | @given(data=valid_data(), query_limit=st.integers(10, 25))
90 | def process_data_preserves_shape_and_type(data, query_limit):
91 | processed_data = privacyraven.extraction.synthesis.process_data(
92 | data=data, query_limit=query_limit
93 | )
94 | (x, y) = processed_data
95 | assert x.size() == torch.Size([10, 28, 28, 1])
96 | assert x.type() == torch.FloatTensor
97 |
98 |
99 | """
100 | This is error-prone, but should be fixed eventually
101 |
102 | @given(
103 | data=valid_data(),
104 | query=st.just(query_mnist),
105 | query_limit=st.integers(10, 25),
106 | victim_input_shape=st.just((1, 28, 28, 1)),
107 | substitute_input_shape=st.just((1, 3, 28, 28)),
108 | victim_input_targets=st.just(10),
109 | )
110 | def test_fuzz_hopskipjump(
111 | data,
112 | query,
113 | query_limit,
114 | victim_input_shape,
115 | substitute_input_shape,
116 | victim_input_targets,
117 | ):
118 | data = torch.from_numpy(data).detach().clone().float()
119 | data = privacyraven.extraction.synthesis.process_data(data, query_limit)
120 | x_data, y_data = privacyraven.extraction.synthesis.hopskipjump(
121 | data=data,
122 | query=query,
123 | query_limit=query_limit,
124 | victim_input_shape=victim_input_shape,
125 | substitute_input_shape=substitute_input_shape,
126 | victim_input_targets=victim_input_targets,
127 | )
128 | print(x_data.size())
129 | print(y_data.size())
130 | """
131 |
--------------------------------------------------------------------------------
/tests/test_utils_data.py:
--------------------------------------------------------------------------------
1 | # This was modified from code written by the `hypothesis.extra.ghostwriter` module
2 | # and is provided under the Creative Commons Zero public domain dedication.
3 | import torch.utils.data.dataloader
4 | import torch.utils.data.dataset
5 | import torchvision.datasets.mnist
6 | from hypothesis import given
7 | from hypothesis import strategies as st
8 |
9 | import privacyraven.utils.data
10 |
11 |
12 | def valid_hparams():
13 | hparams = {
14 | "transform": None,
15 | "batch_size": 100,
16 | "num_workers": 4,
17 | "rand_split_val": [55000, 5000],
18 | "gpus": 1,
19 | "max_epochs": 10,
20 | "learning_rate": 0.001,
21 | "input_size": 1000,
22 | "targets": 10,
23 | }
24 | return hparams
25 |
26 |
27 | @given(transform=st.just(None), RGB=st.booleans())
28 | def get_emnist_data_returns_data(transform, RGB):
29 | emnist_train, emnist_test = privacyraven.utils.data.get_emnist_data(
30 | transform=transform, RGB=RGB
31 | )
32 | x, y = emnist_train.data, emnist_train.targets
33 | assert x.size() == torch.Size([240000, 28, 28])
34 |
35 |
36 | @given(hparams=valid_hparams())
37 | def get_mnist_loaders(hparams):
38 | x, y, z = privacyraven.utils.data.get_mnist_loaders(hparams=hparams)
39 |
--------------------------------------------------------------------------------
/tests/test_utils_query.py:
--------------------------------------------------------------------------------
1 | # This test code was written by the `hypothesis.extra.ghostwriter` module
2 | # and is provided under the Creative Commons Zero public domain dedication.
3 | import numpy as np
4 | import pytest
5 | import torch
6 | from hypothesis import assume, given, settings
7 | from hypothesis import strategies as st
8 | from hypothesis.extra.numpy import arrays
9 |
10 | import privacyraven.extraction.synthesis
11 | import privacyraven.utils.query
12 | from privacyraven.models.victim import train_four_layer_mnist_victim
13 | from privacyraven.utils import model_creation
14 |
15 | # Establish strategies
16 |
17 | device = torch.device("cpu")
18 |
19 | model = train_four_layer_mnist_victim(gpus=torch.cuda.device_count())
20 |
21 |
22 | def query_mnist(input_data):
23 | return privacyraven.utils.query.get_target(model, input_data, (1, 28, 28, 1))
24 |
25 |
26 | def valid_query():
27 | return st.just(query_mnist)
28 |
29 |
30 | def valid_data():
31 | return arrays(np.float64, (10, 28, 28, 1), st.floats())
32 |
33 |
34 | @given(query_func=valid_query(), input_size=st.just((1, 28, 28, 1)))
35 | def test_fuzz_establish_query(query_func, input_size):
36 | x = privacyraven.utils.query.establish_query(
37 | query_func=query_func, input_size=input_size
38 | )
39 |
40 | assert callable(x) is True
41 |
42 |
43 | @settings(deadline=None)
44 | @given(
45 | model=st.just(model), input_data=valid_data(), input_size=st.just((1, 28, 28, 1))
46 | )
47 | def test_fuzz_get_target(model, input_data, input_size):
48 | input_data = torch.from_numpy(input_data)
49 | target = privacyraven.utils.query.get_target(
50 | model=model, input_data=input_data, input_size=input_size
51 | )
52 | assert torch.argmax(target) >= 0
53 | assert torch.argmax(target) < 10
54 |
55 |
56 | @settings(deadline=None)
57 | @given(
58 | input_data=valid_data(),
59 | input_size=st.just((1, 28, 28, 1)),
60 | single=st.just(False),
61 | warning=st.just(False),
62 | )
63 | def test_fuzz_reshape_input(input_data, input_size, single, warning):
64 | x = privacyraven.utils.query.reshape_input(
65 | input_data=input_data, input_size=input_size, single=single, warning=warning
66 | )
67 | # assert x.size() == torch.Size([1, 28, 28, 1])
68 |
--------------------------------------------------------------------------------