├── .github └── workflows │ ├── deploy-docs.yml │ ├── python-publish.yml │ └── regression.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── docs ├── API │ ├── 0_Networks.rst │ ├── 1_Nodes.rst │ ├── 2_Training.rst │ ├── 3_Conversion.rst │ └── 4_Verification.rst ├── Guide │ └── usage.rst ├── Makefile ├── Setup │ └── install.rst ├── conf.py ├── index.rst └── template.py ├── download_benchmarks.sh ├── environment.yml ├── examples ├── notebooks │ ├── 00 - Introduction.ipynb │ ├── legacy │ │ ├── ReLU Over-approx.ipynb │ │ ├── Sigmoid Over-approx.ipynb │ │ ├── bidimensional_example_FC_ReLU.ipynb │ │ ├── bidimensional_example_FC_ReLU_Complete.ipynb │ │ ├── bidimensional_example_FC_ReLU_Overapprox.ipynb │ │ └── bidimensional_example_with_sigmoid.ipynb │ └── visualization.py ├── pruning_example │ ├── data │ │ └── placeholder.txt │ └── pruning_example.py ├── submissions │ └── 2023_SoftComputing │ │ ├── logs │ │ └── placeholder.txt │ │ └── soco_experiments_launcher.py └── test │ ├── README.md │ ├── instances.csv │ ├── test_nano.onnx │ ├── test_nano.vnnlib │ ├── test_small.onnx │ ├── test_small.vnnlib │ ├── test_tiny.onnx │ └── test_tiny.vnnlib ├── never2_batch.py ├── never2_launcher.py ├── pynever ├── __init__.py ├── config │ ├── __init__.py │ └── configuration.ini ├── datasets.py ├── exceptions.py ├── legacy │ └── tensors.py ├── networks.py ├── nodes.py ├── scripts │ ├── __init__.py │ └── cli.py ├── strategies │ ├── __init__.py │ ├── abstraction │ │ ├── __init__.py │ │ ├── bounds_propagation │ │ │ ├── __init__.py │ │ │ ├── bounds.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── affine.py │ │ │ │ ├── convolution.py │ │ │ │ ├── maxpool.py │ │ │ │ └── relu.py │ │ │ ├── manager.py │ │ │ ├── test │ │ │ │ ├── __init__.py │ │ │ │ ├── debugging_launcher.py │ │ │ │ ├── generate test.ipynb │ │ │ │ ├── generate_property.ipynb │ │ │ │ └── test │ │ │ │ │ └── intermediate.vnnlib │ │ │ └── util.py │ │ ├── linearfunctions.py │ │ ├── networks.py │ │ ├── nodes.py │ │ └── star.py │ ├── conversion │ │ ├── __init__.py │ │ ├── converters │ │ │ ├── __init__.py │ │ │ ├── onnx.py │ │ │ ├── pytorch.py │ │ │ └── pytorch_layers.py │ │ └── representation.py │ ├── pruning.py │ ├── smt_reading.py │ ├── training.py │ └── verification │ │ ├── __init__.py │ │ ├── algorithms.py │ │ ├── parameters.py │ │ ├── properties.py │ │ ├── ssbp │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── intersection.py │ │ ├── propagation.py │ │ ├── refinement.py │ │ └── split.py │ │ └── statistics.py └── utilities.py ├── pyproject.toml ├── test ├── logs │ └── placeholder.txt ├── regression │ ├── __init__.py │ └── run_regression_test.py └── unittests │ ├── 2d_prop.vnnlib │ ├── 2d_propagation.py │ ├── __init__.py │ ├── topological.py │ └── training.py └── vnncomp_scripts ├── config.yaml ├── install_tool.sh ├── prepare_instance.sh ├── requirements.txt └── run_instance.sh /.github/workflows/deploy-docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Sphinx Docs to GitHub Pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - dev # or your default branch 7 | 8 | jobs: 9 | build-and-deploy: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | 16 | - name: Set up Python 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: '3.11' 20 | 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install . 25 | pip install sphinx sphinx-autodoc-typehints sphinx-rtd-theme 26 | 27 | - name: Build Sphinx documentation 28 | run: | 29 | cd docs 30 | sphinx-build -M html . _build/ 31 | 32 | - name: Clone GitHub Pages repo 33 | run: | 34 | git config --global user.name "github-actions" 35 | git config --global user.email "github-actions@github.com" 36 | git clone https://x-access-token:${{ secrets.GH_PAGES_DEPLOY }}@github.com/nevertools/nevertools.github.io.git gh-pages 37 | env: 38 | GH_PAGES_DEPLOY_TOKEN: ${{ secrets.GH_PAGES_DEPLOY }} 39 | 40 | - name: Copy docs to gh-pages/pynever 41 | run: | 42 | rm -rf gh-pages/pynever 43 | mkdir -p gh-pages/pynever 44 | cp -r docs/_build/html/* gh-pages/pynever/ 45 | 46 | - name: Commit and push changes 47 | run: | 48 | cd gh-pages 49 | git add pynever 50 | git commit -m "Update API docs from pynever" || echo "No changes to commit" 51 | git push origin master 52 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.11.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/regression.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs the regression tests on push to validate the changes made 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Push-enabled regression test 10 | 11 | on: 12 | push: 13 | branches: 14 | - main 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v3 23 | 24 | - name: Set up Python 25 | uses: actions/setup-python@v3 26 | with: 27 | python-version: '3.11.x' 28 | 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install . 33 | 34 | - name: Run tests 35 | run: | 36 | python test/regression/run_regression_test.py 37 | 38 | if [ $? -eq 0 ]; then 39 | echo "Push accepted" 40 | else 41 | echo "Push rejected" 42 | exit 1 43 | fi 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | docs/build 2 | docs/_build/ 3 | docs/_autosummary/ 4 | docs/source/_generated/ 5 | 6 | docs/.doctrees/ 7 | docs/.buildinfo 8 | docs/*.log 9 | 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | *.ipynb_checkpoints/ 14 | 15 | .idea/ 16 | **/logs/* 17 | !**/logs/placeholder.txt 18 | temp* 19 | !**/data/placeholder.txt 20 | 21 | .DS_Store 22 | Thumbs.db -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to pyNeVer 2 | Bug fixes, feature additions, tests, documentation and more can be contributed 3 | via [issues](https://github.com/NeVerTools/pyNeVer/issues) and/or [merge_requests](https://github.com/NeVerTools/pyNeVer/merge_requests). All contributions are welcome. 4 | 5 | ## Contributors 6 | - Stefano Demarchi 7 | - Dario Guidotti 8 | - Andrea Gimelli 9 | - Elena Botoeva 10 | 11 | ## Alumni 12 | - Karim Pedemonte 13 | - Pedro Henrique Simão Achete 14 | 15 | ## Bug fixes, feature additions, etc. 16 | Please send a merge request to the main branch. Tests or documentation without bug fixes or feature additions are welcome too. Feel free to ask questions [via issues](https://github.com/NeVerTools/pyNeVer/issues/new), 17 | 18 | - Create a branch from _main_. 19 | - Develop bug fixes, features, tests, etc. 20 | - Test your code on Python 3.11.x. A regression test is available by running from the main directory `python test/regression/run_regression_test.py` 21 | - Create a merge request to merge the changes from your branch to the pyNeVer main branch. 22 | 23 | ### Guidelines 24 | - Separate code commits from reformatting commits. 25 | - Provide tests for any newly added code when possible. 26 | - Document your code following the [documentation guidelines](docs/template.py) 27 | 28 | ## Reporting Issues 29 | When reporting issues, please include code that reproduces the issue and whenever possible, an image that demonstrates the issue. Please upload images to GitHub, not to third-party file hosting sites. If necessary, add the image to a zip or tar archive. 30 | 31 | The best reproductions are self-contained scripts with minimal dependencies. If you are using a framework, try to replicate the issue just using pyNeVer. 32 | 33 | ### Provide details 34 | - What did you do? 35 | - What did you expect to happen? 36 | - What actually happened? 37 | - What versions of pyNeVer and Python are you using? 38 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | “Commons Clause” License Condition v1.0 2 | 3 | The Software is provided to you by the Licensor under the License, as defined 4 | below, subject to the following condition. 5 | Without limiting other conditions in the License, the grant of rights under the 6 | License will not include, and the License does not grant to you, the right to 7 | Sell the Software. 8 | For purposes of the foregoing, “Sell” means practicing any or all of the rights 9 | granted to you under the License to provide to third parties, for a fee or 10 | other consideration (including without limitation fees for hosting or 11 | consulting/ support services related to the Software), a product or service 12 | whose value derives, entirely or substantially, from the functionality of the 13 | Software. Any license notice or attribution required by the License must also 14 | include this Commons Clause License Condition notice. 15 | 16 | Software: pyNeVer 17 | 18 | License: GNU General Public License v3.0 19 | 20 | Licensor: Dario Guidotti, Stefano Demarchi, Armando Tacchella, Luca Pulina 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyNeVer 2 | 3 | [![PyPI - Version](https://img.shields.io/pypi/v/pynever.svg)](https://pypi.org/project/pynever) 4 | [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pynever.svg)](https://pypi.org/project/pynever) 5 | 6 | ----- 7 | 8 | Neural networks Verifier (__NeVer 2__) is a tool for the design, training and verification of neural networks. 9 | It supports feed-forward and residual neural networks with ReLU and activation functions. 10 | __pyNeVer__ is the corresponding python package providing all the main capabilities of __NeVer 2__ 11 | and can be easily installed using pip. 12 | 13 | Installation and setup 14 | ---------------------- 15 | 16 | __pyNeVer__ depends on several packages, which are all available via pip and should be installed automatically. 17 | The packages required for the correct execution are the following: 18 | 19 | * _numpy_ 20 | * _ortools_ 21 | * _onnx_ 22 | * _torch_ 23 | * _torchvision_ 24 | * _pysmt_ 25 | * _multipledispatch_ 26 | 27 | To install __pyNeVer__ as an API, run the command: 28 | ```bash 29 | pip install pynever 30 | ``` 31 | 32 | To run __pyNeVer__ as a standalone tool you should clone this repository and create a conda environment 33 | ```bash 34 | git clone https://github.com/nevertools/pyNeVer 35 | cd pyNeVer 36 | 37 | conda env create -f environment.yml 38 | conda activate pynever 39 | ``` 40 | 41 | Command-line interface 42 | ---------------------- 43 | To verify [VNN-LIB](https://www.vnnlib.org) specifications on ONNX models we provide two scripts: one for single instances and another one for multiple instances. 44 | To verify a single instance run 45 | ```bash 46 | python never2_launcher.py [-o OUTPUT] [-t TIMEOUT] model.onnx property.vnnlib {sslp|ssbp} 47 | ``` 48 | 49 | For multiple instances collected in a CSV file run 50 | ```bash 51 | python never2_batch.py [-o OUTPUT] [-t TIMEOUT] instances.csv {sslp|ssbp} 52 | ``` 53 | * The -o option should be used to specify the output CSV file to save results, otherwise it will be generated in the same directory 54 | * The -t option specifies the timeout for each run 55 | * sslp and ssbp are the two algorithms employed by _NeVer2_: 56 | * SSLP (Star-set with Linear Programming) is our first algorithm based on star sets presented in [this paper](https://link.springer.com/article/10.1007/s00500-024-09907-5). 57 | * SSBP (Star-set with Bounds Propagation) enhances SSLP with an abstraction-refinement search and symbolic interval propagation. This is the algorithm used in VNNCOMP 2024. 58 | 59 | Supported layers 60 | ---------------------- 61 | 62 | At present the __pyNeVer__ package supports abstraction and verification of fully connected and convolutional 63 | neural networks with ReLU activation functions. 64 | 65 | Training and conversion support all the layers supported by [VNN-LIB](https://easychair.org/publications/paper/Qgdn). 66 | 67 | The [conversion](pynever/strategies/conversion) package provides the capabilities for the conversion of PyTorch and ONNX 68 | networks: therefore this kind of networks can be loaded using the respective frameworks and then converted to the 69 | internal representation used by __pyNeVer__. 70 | 71 | The properties for the verification and abstraction of the networks must be defined either in python code following 72 | the specification which can be found in the documentation, or via an SMT-LIB file compliant to the 73 | [VNN-LIB](https://www.vnnlib.org) standard. 74 | 75 | Contributors 76 | ---------------------- 77 | 78 | The main contributors of pyNeVer are __Dario Guidotti__ and __Stefano Demarchi__, under the supervision of Professors 79 | __Armando Tacchella__ and __Luca Pulina__. 80 | A significant contribution for the participation in VNN-COMP 2024 was 81 | the help of __Elena Botoeva__. 82 | 83 | _Other contributors_: 84 | 85 | * __Andrea Gimelli__ - Bound propagation integration 86 | * __Pedro Henrique Simão Achete__ - Command-line interface and convolutional linearization 87 | * __Karim Pedemonte__ - Design and refactoring 88 | 89 | To contribute to this project, start by looking at the [CONTRIBUTING](CONTRIBUTING.md) file! 90 | 91 | Publications 92 | ---------------------- 93 | 94 | If you use __NeVer2__ or __pyNeVer__ in your work, **please kindly cite our papers**. Here you can find 95 | the list of BibTeX entries. 96 | 97 | ``` 98 | @article{demarchi2024never2, 99 | author = {Demarchi, S. and Guidotti, D. and Pulina, L. and Tacchella, A.}, 100 | journal = {Soft Computing}, 101 | number = {19}, 102 | pages = {11647-11665}, 103 | title = {{NeVer2}: learning and verification of neural networks}, 104 | volume = {28}, 105 | year = {2024} 106 | } 107 | 108 | @inproceedings{demarchi2024improving, 109 | author = {Demarchi, S. and Gimelli, A. and Tacchella, A.}, 110 | booktitle = {{ECMS} International Conference on Modelling and Simulation}, 111 | title = {Improving Abstract Propagation for Verification of Neural Networks}, 112 | year = {2024} 113 | } 114 | 115 | @inproceedings{demarchi2022formal, 116 | author = {Demarchi, S. and Guidotti, D. and Pitto, A. and Tacchella, A.}, 117 | booktitle = {{ECMS} International Conference on Modelling and Simulation}, 118 | title = {Formal Verification of Neural Networks: {A} Case Study About Adaptive Cruise Control}, 119 | year = {2022} 120 | } 121 | 122 | @inproceedings{guidotti2021pynever, 123 | author={Guidotti, D. and Pulina, L. and Tacchella, A.}, 124 | booktitle={International Symposium on Automated Technology for Verification and Analysis}, 125 | title={pynever: A framework for learning and verification of neural networks}, 126 | year={2021}, 127 | } 128 | ``` -------------------------------------------------------------------------------- /docs/API/0_Networks.rst: -------------------------------------------------------------------------------- 1 | Networks 2 | ========= 3 | 4 | .. automodule:: pynever.networks 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/API/1_Nodes.rst: -------------------------------------------------------------------------------- 1 | Nodes 2 | ========= 3 | 4 | .. automodule:: pynever.nodes 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/API/2_Training.rst: -------------------------------------------------------------------------------- 1 | Training 2 | ========= 3 | 4 | .. automodule:: pynever.strategies.training 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/API/3_Conversion.rst: -------------------------------------------------------------------------------- 1 | .. _conversion-ref: 2 | 3 | Conversion 4 | ========== 5 | 6 | **pyNeVer** provides conversion capabilities to import a NN model in the ONNX and pyTorch file formats, 7 | as well as to export to these formats trained NNs. 8 | 9 | Representation 10 | -------------- 11 | 12 | .. automodule:: pynever.strategies.conversion.representation 13 | :members: 14 | 15 | Converters 16 | ---------- 17 | 18 | .. automodule:: pynever.strategies.conversion.converters.onnx 19 | :members: 20 | 21 | .. automodule:: pynever.strategies.conversion.converters.pytorch 22 | :members: 23 | -------------------------------------------------------------------------------- /docs/API/4_Verification.rst: -------------------------------------------------------------------------------- 1 | .. _verification-ref: 2 | 3 | Verification 4 | ============ 5 | 6 | To solve a verification problem **pyNeVer** can define the specifications to verify and instantiate 7 | verification algorithms to try and prove that these specifications hold. 8 | 9 | Properties 10 | ---------- 11 | 12 | .. automodule:: pynever.strategies.verification.properties 13 | :members: 14 | 15 | Algorithms 16 | ---------- 17 | 18 | .. automodule:: pynever.strategies.verification.algorithms 19 | :members: 20 | 21 | Parameters 22 | ---------- 23 | 24 | .. automodule:: pynever.strategies.verification.parameters 25 | :members: 26 | -------------------------------------------------------------------------------- /docs/Guide/usage.rst: -------------------------------------------------------------------------------- 1 | Verification 2 | ============ 3 | 4 | Command-line interface 5 | ---------------------- 6 | | To verify `VNN-LIB `_ specifications on ONNX models we provide two scripts: 7 | | one for single instances and another one for multiple instances. 8 | 9 | To verify a single instance run 10 | 11 | .. code-block:: bash 12 | 13 | python never2_launcher.py [-o OUTPUT] [-t TIMEOUT] model.onnx property.vnnlib {sslp|ssbp} 14 | 15 | For multiple instances collected in a CSV file run 16 | 17 | .. code-block:: bash 18 | 19 | python never2_batch.py [-o OUTPUT] [-t TIMEOUT] instances.csv {sslp|ssbp} 20 | 21 | * The ``-o`` option should be used to specify the output CSV file to save results, otherwise it will be generated in the same directory 22 | * The ``-t`` option specifies the timeout for each run 23 | * ``sslp`` and ``ssbp`` are the two algorithms employed by **NeVer2**: 24 | 25 | * SSLP (Star-set with Linear Programming) is our first algorithm based on star sets presented in `this paper `_. 26 | * SSBP (Star-set with Bounds Propagation) enhances SSLP with an abstraction-refinement search and symbolic interval propagation. This is the algorithm used in VNNCOMP 2024. 27 | 28 | Supported layers 29 | ---------------------- 30 | 31 | At present the **pyNeVer** package supports abstraction and verification of fully connected and convolutional 32 | neural networks with ReLU activation functions. 33 | 34 | Training and conversion support all the layers supported by the `VNN-LIB standard `_. 35 | 36 | The :ref:`conversion ` package provides the capabilities for the conversion of PyTorch and ONNX 37 | networks: therefore this kind of networks can be loaded using the respective frameworks and then converted to the 38 | internal representation used by **pyNeVer**. 39 | 40 | The properties for the verification and abstraction of the networks must be defined either in python code following 41 | the specification which can be found in the documentation, or via an SMT-LIB file compliant to the 42 | `VNN-LIB `_ standard. -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | clean: 18 | rm -rf $(BUILDDIR)/* 19 | rm -rf Examples 20 | 21 | # Catch-all target: route all unknown targets to Sphinx using the new 22 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 23 | %: Makefile 24 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/Setup/install.rst: -------------------------------------------------------------------------------- 1 | Installation and Setup 2 | ====================== 3 | 4 | **pyNeVer** depends on several packages, which are all available via ``pip`` and should be installed automatically. 5 | The packages required for the correct execution are the following: 6 | 7 | * *numpy* 8 | * *ortools* 9 | * *onnx* 10 | * *torch* 11 | * *torchvision* 12 | * *pysmt* 13 | * *multipledispatch* 14 | 15 | To install **pyNeVer** as an API, run the command: 16 | 17 | .. code-block:: bash 18 | 19 | pip install pynever 20 | 21 | 22 | To run **pyNeVer** as a standalone tool you should clone this repository and create a conda environment 23 | 24 | .. code-block:: bash 25 | 26 | git clone https://github.com/nevertools/pyNeVer 27 | cd pyNeVer 28 | 29 | conda env create -f environment.yml 30 | conda activate pynever 31 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | 16 | sys.path.insert(0, os.path.abspath('../')) 17 | 18 | # -- Project information ----------------------------------------------------- 19 | from pynever import Configuration 20 | 21 | project = 'pyNeVer' 22 | copyright = f'{Configuration.YEAR.value}, {Configuration.AUTHORS.value}' 23 | author = Configuration.AUTHORS.value 24 | 25 | # Documentation versioning 26 | release = Configuration.RELEASE.value 27 | 28 | # -- General configuration --------------------------------------------------- 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = [ 34 | 'sphinx.ext.autodoc', 35 | 'sphinx.ext.napoleon', 36 | 'sphinx_autodoc_typehints', 37 | 'sphinx.ext.linkcode', 38 | ] 39 | 40 | autodoc_member_order = 'bysource' 41 | 42 | # Add any paths that contain templates here, relative to this directory. 43 | # templates_path = ['_templates'] 44 | 45 | # List of patterns, relative to source directory, that match files and 46 | # directories to ignore when looking for source files. 47 | # This pattern also affects html_static_path and html_extra_path. 48 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README*'] 49 | 50 | # -- Options for HTML output ------------------------------------------------- 51 | 52 | # The theme to use for HTML and HTML Help pages. See the documentation for 53 | # a list of builtin themes. 54 | html_theme = 'sphinx_rtd_theme' 55 | 56 | # Style and colors for code blocks 57 | pygments_style = 'sphinx' 58 | # highlight_language = 'python3' 59 | 60 | # Add any paths that contain custom static files (such as style sheets) here, 61 | # relative to this directory. They are copied after the builtin static files, 62 | # so a file named "default.css" will overwrite the builtin "default.css". 63 | # html_static_path = ['_static'] 64 | 65 | autodoc_default_options = { 66 | 'show-inheritance': True 67 | } 68 | 69 | 70 | def linkcode_resolve(domain, info): 71 | """Link documentation to Github""" 72 | if domain != 'py': 73 | return None 74 | if not info['module']: 75 | return None 76 | filename = info['module'].replace('.', '/') 77 | ext = "py" 78 | return "https://github.com/NeverTools/pynever/blob/main/%s.%s" % (filename, ext) 79 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | pyNeVer 2 | ========= 3 | 4 | Neural networks Verifier NeVer2 is a tool for the design, training and verification of neural networks. 5 | It supports feed-forward and residual neural networks with ReLU and activation functions. 6 | pyNeVer is the corresponding python package providing all the main capabilities of NeVer2 7 | and can be easily installed using pip. 8 | 9 | This documentation explains how to setup pyNeVer, shows examples using pyNeVer, provides API documentation, 10 | and explains how to contribute to pyNeVer. 11 | 12 | NB: this documentation is in progress! 13 | 14 | .. _Setup: 15 | .. toctree:: 16 | :maxdepth: 1 17 | :glob: 18 | :caption: Getting started 19 | 20 | Setup/* 21 | 22 | .. _Usage: 23 | .. toctree:: 24 | :maxdepth: 1 25 | :glob: 26 | :caption: Usage 27 | 28 | Guide/* 29 | 30 | .. _API: 31 | .. toctree:: 32 | :maxdepth: 1 33 | :glob: 34 | :caption: API Documentation 35 | 36 | API/* -------------------------------------------------------------------------------- /docs/template.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a short description of the module. You can, for instance, explain in few lines 3 | that you have some classes that do stuff. The name of the class should be referenced 4 | as ``AClass`` to typeset it correctly in the docs. 5 | """ 6 | 7 | class AClass: 8 | """ 9 | This is the docstring of ``AClass``. You should give a short 10 | description of it. Now let's describe the attributes after a blank line: 11 | 12 | Attributes 13 | ---------- 14 | foo: int 15 | The attribute foo of type int. Use the semicolon after the name without a space, and separate the type with a space. 16 | The description of the attribute is on a newline with 1 tab (4 spaces) indentation. 17 | """ 18 | 19 | def __init__(self, foo: int): 20 | # The __init__ function should not have a docstring, nor the class methods should be described in the 21 | # class docstring. Notice there is no blank line after the last attribute but there is one before the 22 | # def __init__ 23 | self.foo = foo 24 | 25 | def say_hello(self, name: str) -> str: 26 | """ 27 | This is the docstring of the method. Let's describe the signature after a blank line: 28 | 29 | Parameters 30 | ---------- 31 | name: str 32 | Same as for class attributes. No blank line for multiple parameters but one blank line before 33 | the description of what is returned. 34 | 35 | Returns 36 | ------- 37 | str 38 | Only the type. 39 | """ 40 | return f"Hello, {name}, my attribute is {self.foo}!" # Here you can leave a blank line before or not 41 | 42 | 43 | class AnotherClass: 44 | """ 45 | Docs of this class. Here we have an attribute of type ``AClass`` 46 | and the docstring is a little different. 47 | 48 | Attributes 49 | ---------- 50 | elem: AClass 51 | Here the type is described directly after its name, but in this description you 52 | should use :class:`~root.package.module.AClass` to have a symbolic link. 53 | The same goes for function parameters. This should be used for classes belonging to 54 | this project and not for other classes from both Python or third-party APIs. 55 | """ 56 | 57 | def __init__(self): 58 | self.elem = AClass(2) 59 | -------------------------------------------------------------------------------- /download_benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Download benchmarks from the VNN-LIB repository 4 | if [ -z "$1" ] 5 | then 6 | echo "No cloning directory specified, reverting to default..." 7 | DIRECTORY="../RegressionBenchmarks" 8 | else 9 | DIRECTORY=$1 10 | fi 11 | 12 | echo "Downloading benchmarks in $DIRECTORY..." 13 | 14 | git clone https://github.com/nevertools/RegressionBenchmarks "$DIRECTORY" -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: pynever 2 | channels: 3 | - defaults 4 | - pytorch 5 | dependencies: 6 | - python=3.11 7 | - pip 8 | - pip: 9 | - numpy 10 | - onnx 11 | - torch 12 | - torchvision 13 | - ortools 14 | - pysmt 15 | - multipledispatch -------------------------------------------------------------------------------- /examples/notebooks/00 - Introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "metadata": {}, 5 | "cell_type": "markdown", 6 | "source": [ 7 | "# Intrdocution to _pyNeVer_\n", 8 | "\n", 9 | "This notebook illustrates the main features of pyNeVer for the design, training and verification of a neural network" 10 | ], 11 | "id": "7386e4bcb1b4c027" 12 | }, 13 | { 14 | "metadata": { 15 | "jupyter": { 16 | "is_executing": true 17 | } 18 | }, 19 | "cell_type": "code", 20 | "source": "from pynever.networks import SequentialNetwork", 21 | "id": "b63800010ce3b95a", 22 | "outputs": [], 23 | "execution_count": null 24 | }, 25 | { 26 | "metadata": {}, 27 | "cell_type": "markdown", 28 | "source": "The module __networks__ contains the classes SequentialNetwork and AcyclicNetwork to represent feed-forward and residual neural networks, respectively", 29 | "id": "180d25f1124192a2" 30 | }, 31 | { 32 | "metadata": {}, 33 | "cell_type": "code", 34 | "outputs": [], 35 | "execution_count": null, 36 | "source": [ 37 | "# Create an empty FFNN with identifier 'my_net' and input identifier 'X'\n", 38 | "my_net = SequentialNetwork('my_net', 'X')" 39 | ], 40 | "id": "2667008980c520b5" 41 | } 42 | ], 43 | "metadata": { 44 | "kernelspec": { 45 | "display_name": "Python 3", 46 | "language": "python", 47 | "name": "python3" 48 | }, 49 | "language_info": { 50 | "codemirror_mode": { 51 | "name": "ipython", 52 | "version": 2 53 | }, 54 | "file_extension": ".py", 55 | "mimetype": "text/x-python", 56 | "name": "python", 57 | "nbconvert_exporter": "python", 58 | "pygments_lexer": "ipython2", 59 | "version": "2.7.6" 60 | } 61 | }, 62 | "nbformat": 4, 63 | "nbformat_minor": 5 64 | } 65 | -------------------------------------------------------------------------------- /examples/notebooks/visualization.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pypoman 4 | import scipy.spatial 5 | 6 | from pynever.strategies.abstraction.star import Star, StarSet 7 | 8 | 9 | def plot_star(p_star: Star, show=False, title=None, color=np.array([0.5, 0.5, 1])): 10 | # color = np.random.rand(3) 11 | alpha_vertexes = pypoman.compute_polytope_vertices(p_star.predicate_matrix, p_star.predicate_bias) 12 | x_vertexes = [] 13 | for v in alpha_vertexes: 14 | v_ver = np.zeros((v.shape[0], 1)) 15 | for k in range(v.shape[0]): 16 | v_ver[k, 0] = v[k] 17 | new_x_v = p_star.center + np.matmul(p_star.basis_matrix, v_ver) 18 | x_vertexes.append(new_x_v) 19 | 20 | x1_plot = [] 21 | x2_plot = [] 22 | for v in x_vertexes: 23 | x1_plot.append(v[0, 0]) 24 | x2_plot.append(v[1, 0]) 25 | 26 | for k in range(len(x1_plot)): 27 | for j in range(len(x1_plot)): 28 | plt.plot([x1_plot[k], x1_plot[j]], [x2_plot[k], x2_plot[j]], color=color) 29 | 30 | plt.scatter(x1_plot, x2_plot, color=color) 31 | if show: 32 | if title is not None: 33 | plt.title(title) 34 | plt.grid() 35 | 36 | 37 | def plot_starset(p_starset: StarSet, title, color=np.array([0.5, 0.5, 1])): 38 | for p_star in p_starset.stars: 39 | plot_star(p_star, color=color) 40 | 41 | if title is not None: 42 | plt.title(title) 43 | plt.grid() 44 | 45 | 46 | def get_star_extreme_points(p_star: Star): 47 | x_vertexes = [] 48 | alpha_vertexes = pypoman.compute_polytope_vertices(p_star.predicate_matrix, p_star.predicate_bias) 49 | for v in alpha_vertexes: 50 | v_ver = np.zeros((v.shape[0], 1)) 51 | for k in range(v.shape[0]): 52 | v_ver[k, 0] = v[k] 53 | new_x_v = p_star.center + np.matmul(p_star.basis_matrix, v_ver) 54 | new_x_v = np.array([new_x_v[0, 0], new_x_v[1, 0]]) 55 | x_vertexes.append(new_x_v) 56 | 57 | x_vertexes = np.array(x_vertexes) 58 | 59 | return x_vertexes 60 | 61 | 62 | def get_starset_extreme_points(p_starset: StarSet): 63 | x_vertexes = [] 64 | for p_star in p_starset.stars: 65 | alpha_vertexes = pypoman.compute_polytope_vertices(p_star.predicate_matrix, p_star.predicate_bias) 66 | for v in alpha_vertexes: 67 | v_ver = np.zeros((v.shape[0], 1)) 68 | for k in range(v.shape[0]): 69 | v_ver[k, 0] = v[k] 70 | new_x_v = p_star.center + np.matmul(p_star.basis_matrix, v_ver) 71 | new_x_v = np.array([new_x_v[0, 0], new_x_v[1, 0]]) 72 | x_vertexes.append(new_x_v) 73 | 74 | x_vertexes = np.array(x_vertexes) 75 | return x_vertexes 76 | 77 | 78 | def plot_convex_hull(points: np.ndarray, title=None, color=None, alpha=1, label=None): 79 | hull = scipy.spatial.ConvexHull(points) 80 | vertexes = points[hull.vertices, :] 81 | vertexes = np.vstack((vertexes, vertexes[0, :])) 82 | if color is None: 83 | plt.fill(vertexes[:, 0], vertexes[:, 1], alpha=0.5, label=label) 84 | else: 85 | plt.fill(vertexes[:, 0], vertexes[:, 1], color=color, alpha=alpha, label=label) 86 | 87 | if title is not None: 88 | plt.title(title) 89 | plt.grid() 90 | 91 | 92 | def print_star_data(p_star: Star): 93 | print("PREDICATE CONSTRAINTS:") 94 | for row in range(p_star.predicate_matrix.shape[0]): 95 | constraint = "" 96 | for col in range(p_star.predicate_matrix.shape[1]): 97 | if p_star.predicate_matrix[row, col] < 0: 98 | sign = "-" 99 | else: 100 | sign = "+" 101 | constraint = constraint + f"{sign} {abs(p_star.predicate_matrix[row, col])} * x_{col} " 102 | 103 | constraint = constraint + f"<= {p_star.predicate_bias[row, 0]}" 104 | print(constraint) 105 | 106 | print("VARIABLES EQUATIONS:") 107 | for row in range(p_star.basis_matrix.shape[0]): 108 | equation = f"z_{row} = " 109 | for col in range(p_star.basis_matrix.shape[1]): 110 | if p_star.basis_matrix[row, col] < 0: 111 | sign = "-" 112 | else: 113 | sign = "+" 114 | equation = equation + f"{sign} {abs(p_star.basis_matrix[row, col])} * x_{col} " 115 | 116 | if p_star.center[row, 0] < 0: 117 | c_sign = "-" 118 | else: 119 | c_sign = "+" 120 | equation = equation + f"{c_sign} {abs(p_star.center[row, 0])}" 121 | print(equation) 122 | -------------------------------------------------------------------------------- /examples/pruning_example/data/placeholder.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/examples/pruning_example/data/placeholder.txt -------------------------------------------------------------------------------- /examples/pruning_example/pruning_example.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | 4 | import onnx 5 | import torch 6 | import torch.nn as nn 7 | import torch.optim as opt 8 | import torchvision.transforms as tr 9 | 10 | import pynever.datasets as dt 11 | import pynever.networks as networks 12 | import pynever.nodes as nodes 13 | import pynever.strategies.training as training 14 | import pynever.utilities as util 15 | from pynever.strategies import pruning 16 | from pynever.strategies.conversion.converters.onnx import ONNXConverter 17 | 18 | # Logger Setup 19 | logger = logging.getLogger("pynever") 20 | logger.setLevel(logging.INFO) 21 | ch = logging.StreamHandler() 22 | ch.setLevel(logging.INFO) 23 | formatter = logging.Formatter('%(message)s') 24 | ch.setFormatter(formatter) 25 | logger.addHandler(ch) 26 | 27 | # Building of the network of interest 28 | small_net = networks.SequentialNetwork("SmallNetwork", "X") 29 | small_net.append_node(nodes.FullyConnectedNode('Linear_1', (784,), 64)) 30 | small_net.append_node(nodes.BatchNormNode('BatchNorm_2', (64,))) 31 | small_net.append_node(nodes.ReLUNode('ReLU_3', (64,))) 32 | small_net.append_node(nodes.FullyConnectedNode('Linear_4', (64,), 32)) 33 | small_net.append_node(nodes.BatchNormNode('BatchNorm_5', (32,))) 34 | small_net.append_node(nodes.ReLUNode('ReLU_6', (32,))) 35 | small_net.append_node(nodes.FullyConnectedNode('Linear_7', (32,), 10)) 36 | 37 | onnx_net = ONNXConverter().from_neural_network(small_net) 38 | onnx.save(onnx_net.onnx_network, "FMNIST_Example.onnx") 39 | 40 | # Loading of the dataset of interest 41 | transform = tr.Compose([tr.ToTensor(), tr.Normalize(1, 0.5), tr.Lambda(lambda x: torch.flatten(x))]) 42 | train_dataset = dt.TorchFMNIST("data/", True, transform) 43 | test_dataset = dt.TorchFMNIST("data/", False, transform) 44 | 45 | # Initialization of the training and pruning parameters 46 | cuda = False # If possible the experiment should be run with cuda, otherwise it will take quite some time. 47 | epochs = 2 48 | train_batch_size = 128 49 | validation_batch_size = 64 50 | test_batch_size = 64 51 | learning_rate = 0.1 52 | batch_norm_decay = 0.001 53 | weight_sparsity_rate = 0.7 # Prune 70% of the weights 54 | neuron_sparsity_rate = 0.5 # Prune 50% of the neurons 55 | validation_percentage = 0.3 56 | scheduler_patience = 5 57 | l1_decay = 0.0001 58 | weight_decay = 0.0001 59 | checkpoint_root = "checkpoints/" 60 | 61 | # Creation of the trainers needed for baseline training and fine tuned pruning. 62 | opt_params_pr = {"lr": learning_rate} 63 | scheduler_params = {"patience": scheduler_patience} 64 | opt_params = {"lr": learning_rate, "weight_decay": weight_decay} 65 | 66 | trainer_wp = training.PytorchTraining(opt.Adam, opt_params_pr, nn.CrossEntropyLoss(), epochs, 67 | validation_percentage, train_batch_size, validation_batch_size, 68 | scheduler_con=opt.lr_scheduler.ReduceLROnPlateau, sch_params=scheduler_params, 69 | precision_metric=training.PytorchMetrics.inaccuracy, 70 | network_transform=pruning.WPTransform(l1_decay, True, cuda), 71 | device='cuda', checkpoints_root=checkpoint_root) 72 | 73 | trainer_ns = training.PytorchTraining(opt.Adam, opt_params_pr, nn.CrossEntropyLoss(), epochs, 74 | validation_percentage, train_batch_size, validation_batch_size, 75 | scheduler_con=opt.lr_scheduler.ReduceLROnPlateau, sch_params=scheduler_params, 76 | precision_metric=training.PytorchMetrics.inaccuracy, 77 | network_transform=pruning.NSTransform(batch_norm_decay, True, cuda), 78 | device='cuda', checkpoints_root=checkpoint_root) 79 | 80 | trainer_baseline = training.PytorchTraining(opt.Adam, opt_params, nn.CrossEntropyLoss(), epochs, 81 | validation_percentage, train_batch_size, validation_batch_size, 82 | scheduler_con=opt.lr_scheduler.ReduceLROnPlateau, 83 | sch_params=scheduler_params, 84 | precision_metric=training.PytorchMetrics.inaccuracy, device='cuda', 85 | checkpoints_root=checkpoint_root) 86 | 87 | # Training and pruning of the networks of interest 88 | baseline_net = copy.deepcopy(small_net) 89 | baseline_net.identifier = "Baseline" 90 | baseline_net = trainer_baseline.train(baseline_net, train_dataset) 91 | 92 | sparse_net = copy.deepcopy(small_net) 93 | sparse_net.identifier = "Sparse" 94 | trainer_ns.network_transform.fine_tuning = False 95 | sparse_net = trainer_ns.train(sparse_net, train_dataset) 96 | trainer_ns.network_transform.fine_tuning = True 97 | 98 | wp_pruner = pruning.WeightPruning(weight_sparsity_rate, trainer_wp, pre_training=True) 99 | ns_pruner = pruning.NetworkSlimming(neuron_sparsity_rate, trainer_ns, pre_training=False) 100 | 101 | wp_pruned_net = copy.deepcopy(small_net) 102 | wp_pruned_net.identifier = "WP_PRUNED" 103 | wp_pruned_net = wp_pruner.prune(wp_pruned_net, train_dataset) 104 | 105 | ns_pruned_net = copy.deepcopy(sparse_net) 106 | ns_pruned_net.identifier = "NS_PRUNED" 107 | ns_pruned_net = ns_pruner.prune(ns_pruned_net, train_dataset) 108 | 109 | tester = training.PytorchTesting(training.PytorchMetrics.inaccuracy, {}, test_batch_size, 'cuda') 110 | 111 | baseline_accuracy = tester.test(baseline_net, test_dataset) 112 | sparse_accuracy = tester.test(sparse_net, test_dataset) 113 | ns_accuracy = tester.test(ns_pruned_net, test_dataset) 114 | wp_accuracy = tester.test(wp_pruned_net, test_dataset) 115 | 116 | # Batch norm fusion for the networks of interest (needed for verification and abstraction). 117 | if (isinstance(baseline_net, networks.SequentialNetwork) and 118 | isinstance(sparse_net, networks.SequentialNetwork) and 119 | isinstance(wp_pruned_net, networks.SequentialNetwork) and 120 | isinstance(ns_pruned_net, networks.SequentialNetwork)): 121 | com_baseline_net = util.combine_batchnorm1d_net(baseline_net) 122 | com_sparse_net = util.combine_batchnorm1d_net(sparse_net) 123 | com_wp_pruned_net = util.combine_batchnorm1d_net(wp_pruned_net) 124 | com_ns_pruned_net = util.combine_batchnorm1d_net(ns_pruned_net) 125 | 126 | com_baseline_accuracy = tester.test(com_baseline_net, test_dataset) 127 | com_sparse_accuracy = tester.test(com_sparse_net, test_dataset) 128 | com_ns_accuracy = tester.test(com_ns_pruned_net, test_dataset) 129 | com_wp_accuracy = tester.test(com_wp_pruned_net, test_dataset) 130 | 131 | logger.info("ACCURACIES (% of samples correctly classified):\n") 132 | logger.info(f"Baseline: {baseline_accuracy}, Sparse: {sparse_accuracy}, NS: {ns_accuracy}, WP: {wp_accuracy}") 133 | logger.info(f"COMBINED BATCHNORM NETWORKS") 134 | logger.info(f"Baseline: {com_baseline_accuracy}, Sparse: {com_sparse_accuracy}, NS: {com_ns_accuracy}, " 135 | f"WP: {com_wp_accuracy}") 136 | -------------------------------------------------------------------------------- /examples/submissions/2023_SoftComputing/logs/placeholder.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/examples/submissions/2023_SoftComputing/logs/placeholder.txt -------------------------------------------------------------------------------- /examples/submissions/2023_SoftComputing/soco_experiments_launcher.py: -------------------------------------------------------------------------------- 1 | # Script for launching the experiments of 'Soft Computing' journal paper 2 | # Notice that the timeout setup requires a UNIX system to run 3 | 4 | import csv 5 | import logging 6 | import os 7 | import signal 8 | import sys 9 | import time 10 | from contextlib import contextmanager 11 | 12 | from pynever.strategies.conversion.converters.onnx import ONNXConverter 13 | from pynever.strategies.conversion.representation import load_network_path, ONNXNetwork 14 | from pynever.strategies.verification.algorithms import SSLPVerification 15 | from pynever.strategies.verification.parameters import SSLPVerificationParameters 16 | from pynever.strategies.verification.properties import VnnLibProperty 17 | 18 | pynever_setting = [['Over-approx.', 'overapprox', [0]], 19 | ['Mixed1', 'mixed', [1]], 20 | ['Complete', 'complete', [10000]]] 21 | 22 | logger_stream = logging.getLogger("pynever.strategies.verification") 23 | logger_file = logging.getLogger("log_file") 24 | 25 | logger_stream.addHandler(logging.StreamHandler()) 26 | logger_file.addHandler(logging.FileHandler('logs/experiments.csv')) 27 | 28 | logger_stream.setLevel(logging.INFO) 29 | logger_file.setLevel(logging.INFO) 30 | 31 | 32 | class TimeoutException(Exception): 33 | """ 34 | Exception class for timeout 35 | 36 | """ 37 | 38 | pass 39 | 40 | 41 | @contextmanager 42 | def time_limit(seconds: int): 43 | def signal_handler(signum, frame): 44 | raise TimeoutException('Timeout') 45 | 46 | signal.signal(signal.SIGALRM, signal_handler) 47 | signal.alarm(seconds) 48 | 49 | try: 50 | yield 51 | finally: 52 | signal.alarm(0) 53 | 54 | 55 | def exec_instance(network_path: str, property_path: str, property_id: str, timeout_seconds: int): 56 | network_instance = load_network_path(network_path) 57 | onnx_net = None 58 | if isinstance(network_instance, ONNXNetwork): 59 | onnx_net = ONNXConverter().to_neural_network(network_instance) 60 | 61 | property_instance = VnnLibProperty(property_path) 62 | 63 | inst_name = f"[{network_instance.identifier} - {property_id}]" 64 | part_string = f'{inst_name},' 65 | 66 | for setting in pynever_setting: 67 | logger_stream.info(f"Benchmark: {inst_name}") 68 | logger_stream.info(f"PyNeVer setting: {setting[0]}") 69 | 70 | try: 71 | with time_limit(timeout_seconds): 72 | parameters = SSLPVerificationParameters(heuristic=setting[1], neurons_to_refine=setting[2]) 73 | strategy = SSLPVerification(parameters) 74 | time_start = time.perf_counter() 75 | safe, _ = strategy.verify(onnx_net, property_instance) 76 | time_end = time.perf_counter() 77 | part_string += f"{safe},{time_end - time_start}," 78 | 79 | except TimeoutException: 80 | part_string += f"---,---," 81 | break 82 | 83 | logger_file.info(part_string[:-1]) 84 | 85 | 86 | if __name__ == '__main__': 87 | ''' 88 | Usage: python exp_launcher.py 1 1 1 1 100 89 | for running all tests with 100 seconds timeout 90 | 91 | ''' 92 | 93 | TEST_ACAS = True if sys.argv[1] == '1' else False 94 | TEST_ACC = True if sys.argv[2] == '1' else False 95 | TEST_RL = True if sys.argv[3] == '1' else False 96 | TEST_DRONES = True if sys.argv[4] == '1' else False 97 | TIMEOUT = int(sys.argv[5]) 98 | BENCHMARKS_DIR = sys.argv[6] 99 | 100 | logger_file.info('Benchmark,Over-approx.,,Mixed1,,Complete,,') 101 | logger_file.info(',Result,Time,Result,Time,Result,Time') 102 | 103 | # ACAS XU launcher 104 | if TEST_ACAS: 105 | with open(f'{BENCHMARKS_DIR}/ACAS XU/instances.csv') as instances: 106 | folder = f'{BENCHMARKS_DIR}/ACAS XU' 107 | csv_reader = csv.reader(instances) 108 | 109 | for instance in csv_reader: 110 | exec_instance(f"{folder}/{instance[0]}", 111 | f"{folder}/{instance[1]}", 112 | instance[1], TIMEOUT) 113 | 114 | # ACC and RL launcher 115 | dirs = [] 116 | if TEST_ACC: 117 | dirs.append(f'{BENCHMARKS_DIR}/ACC') 118 | 119 | if TEST_RL: 120 | dirs.extend([f'{BENCHMARKS_DIR}/RL/Cartpole', 121 | f'{BENCHMARKS_DIR}/RL/Lunar Lander', 122 | f'{BENCHMARKS_DIR}/RL/Dubins Rejoin']) 123 | 124 | for dir_name in dirs: 125 | for property_file in os.listdir(f"{dir_name}/Properties"): 126 | p_f = os.path.join(f"{dir_name}/Properties", property_file) 127 | 128 | if os.path.isfile(p_f): 129 | for network_file in os.listdir(f"{dir_name}/Networks"): 130 | n_f = os.path.join(f"{dir_name}/Networks", network_file) 131 | 132 | if os.path.isfile(n_f): 133 | exec_instance(n_f, p_f, property_file, TIMEOUT) 134 | 135 | # Drones launcher 136 | if TEST_DRONES: 137 | with open(f'{BENCHMARKS_DIR}/Drones/instancesf.cfsv') as instances: 138 | folder = f'{BENCHMARKS_DIR}/Drones' 139 | csv_reader = csv.reader(instances) 140 | 141 | for instance in csv_reader: 142 | exec_instance(f"{folder}/{instance[0]}", 143 | f"{folder}/{instance[1]}", 144 | instance[1], TIMEOUT) 145 | -------------------------------------------------------------------------------- /examples/test/README.md: -------------------------------------------------------------------------------- 1 | This is a test benchmark category. The folder contains 3 .onnx and .vnnlib files used for 2 | the category. The instance.csv contains the full list of benchmark instances, one per line: 3 | 4 | 5 | ```onnx_file,vnn_lib_file,timeout_secs``` 6 | 7 | The test properties correspond to trivial networks that should ensure the correctness of the 8 | tool and provide a measure of the setup overhead with respect to the verification strategy. 9 | 10 | The verification result should be safe (i.e., unsat) for all three benchmarks. -------------------------------------------------------------------------------- /examples/test/instances.csv: -------------------------------------------------------------------------------- 1 | test_nano.onnx,test_nano.vnnlib,60 2 | test_tiny.onnx,test_tiny.vnnlib,60 3 | test_small.onnx,test_small.vnnlib,60 4 | -------------------------------------------------------------------------------- /examples/test/test_nano.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/examples/test/test_nano.onnx -------------------------------------------------------------------------------- /examples/test/test_nano.vnnlib: -------------------------------------------------------------------------------- 1 | (declare-const X_0 Real) 2 | (declare-const Y_0 Real) 3 | 4 | (assert (>= X_0 -1.0)) 5 | (assert (<= X_0 1.0)) 6 | 7 | (assert (<= Y_0 -1.0)) 8 | -------------------------------------------------------------------------------- /examples/test/test_small.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/examples/test/test_small.onnx -------------------------------------------------------------------------------- /examples/test/test_small.vnnlib: -------------------------------------------------------------------------------- 1 | (declare-const X_0 Real) 2 | (declare-const Y_0 Real) 3 | 4 | (assert (>= X_0 -1.0)) 5 | (assert (<= X_0 1.0)) 6 | 7 | (assert (>= Y_0 100.0)) -------------------------------------------------------------------------------- /examples/test/test_tiny.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/examples/test/test_tiny.onnx -------------------------------------------------------------------------------- /examples/test/test_tiny.vnnlib: -------------------------------------------------------------------------------- 1 | (declare-const X_0 Real) 2 | (declare-const Y_0 Real) 3 | 4 | (assert (>= X_0 -1.0)) 5 | (assert (<= X_0 1.0)) 6 | 7 | (assert (>= Y_0 100.0)) 8 | -------------------------------------------------------------------------------- /never2_batch.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module never2_batch.py 3 | 4 | Provides an entry point for the execution of NeVer2 on a batch of instances 5 | 6 | Authors: Stefano Demarchi, Pedro Achete 7 | 8 | """ 9 | 10 | import os 11 | from argparse import ArgumentParser 12 | 13 | import never2_launcher 14 | from pynever.scripts import cli 15 | 16 | parser = ArgumentParser(prog='NeVer2', 17 | description='Neural Network verifier', 18 | epilog='Università degli Studi di Genova') 19 | 20 | # Instances 21 | parser.add_argument('csv', help='Collection of instances to verify') 22 | 23 | parser = never2_launcher.add_options(parser) 24 | 25 | if __name__ == '__main__': 26 | args = vars(parser.parse_args()) 27 | 28 | # Clear default log file 29 | try: 30 | os.remove('output.csv') 31 | except OSError: 32 | pass 33 | 34 | # Check log file specification 35 | logfile = 'output.csv' 36 | if 'out' in args.keys(): 37 | logfile = args['out'] 38 | 39 | # Execute 40 | if args['algorithm'] == 'ssbp': 41 | try: 42 | cli.ssbp_verify_batch(args['csv'], '.\\', logfile, args['timeout'], args['params']) 43 | except NotImplementedError: 44 | exit(1) 45 | else: 46 | try: 47 | cli.sslp_verify_batch(False, args['csv'], args['strategy'], logfile) 48 | except NotImplementedError: 49 | exit(1) 50 | 51 | exit(0) 52 | -------------------------------------------------------------------------------- /never2_launcher.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module never2_launcher.py 3 | 4 | Provides an entry point for the execution of NeVer2 on a single instance 5 | 6 | Authors: Stefano Demarchi, Pedro Achete 7 | 8 | """ 9 | 10 | import os 11 | from argparse import ArgumentParser 12 | 13 | from pynever.scripts import cli 14 | 15 | 16 | def add_options(p: ArgumentParser): 17 | """ 18 | Common options for the execution of NeVer2 19 | 20 | """ 21 | 22 | # Options 23 | p.add_argument('-o', '--out', type=str, 24 | default='output.csv', help='output file for execution log') 25 | p.add_argument('-t', '--timeout', type=int, default=300, 26 | help='execution timeout in seconds') 27 | 28 | # Algorithm 29 | algorithm = p.add_subparsers(dest='algorithm', description='Verification algorithm to use') 30 | 31 | # SSBP 32 | ssbp = algorithm.add_parser('ssbp', description='Starset with bounds propagation') 33 | ssbp.add_argument('-p', '--params', nargs='?', default='', metavar='FILE', 34 | help='JSON file with parameters') 35 | 36 | # SSLP 37 | sslp = algorithm.add_parser('sslp', description='Starset with linear programs') 38 | sslp.add_argument('-s', '--strategy', choices=['overapprox', 'mixed', 'complete'], metavar='STRATEGY', 39 | default='complete', help='Verification strategy to use, complete by default') 40 | 41 | return p 42 | 43 | 44 | if __name__ == '__main__': 45 | parser = ArgumentParser(prog='NeVer2', 46 | description='Neural Network verifier', 47 | epilog='Università degli Studi di Genova') 48 | 49 | # Instance 50 | parser.add_argument('model', help='network model in ONNX format') 51 | parser.add_argument('property', help='property specification in VNN-LIB format') 52 | 53 | parser = add_options(parser) 54 | 55 | args = vars(parser.parse_args()) 56 | 57 | # Clear default log file 58 | try: 59 | os.remove('output.csv') 60 | except OSError: 61 | pass 62 | 63 | # Check log file specification 64 | logfile = 'output.csv' 65 | if 'out' in args.keys(): 66 | logfile = args['out'] 67 | 68 | # Execute 69 | if args['algorithm'] == 'ssbp': 70 | try: 71 | cli.ssbp_verify_single(args['model'], args['property'], './', logfile, args['timeout'], args['params']) 72 | except NotImplementedError: 73 | exit(1) 74 | else: 75 | try: 76 | cli.sslp_verify_single(False, args['model'], args['property'], args['strategy'], logfile) 77 | except NotImplementedError: 78 | exit(1) 79 | 80 | exit(0) 81 | -------------------------------------------------------------------------------- /pynever/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module initializes the configuration variables for the repository 3 | """ 4 | import enum 5 | import os 6 | 7 | with open(os.path.join(os.path.dirname(__file__), 'config/configuration.ini'), 'r') as f: 8 | config_data = { 9 | l[0]: l[2] 10 | for l in [line.strip('\n').split() for line in f] 11 | if l != [] and '#' not in l 12 | } 13 | 14 | Configuration = enum.Enum('Configuration', config_data) 15 | -------------------------------------------------------------------------------- /pynever/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/config/__init__.py -------------------------------------------------------------------------------- /pynever/config/configuration.ini: -------------------------------------------------------------------------------- 1 | # This configuration file defines the name and the value of internal variables 2 | # on which pyNeVer is based on. 3 | # This file is intended for developers. 4 | 5 | ARITHMETIC = torch 6 | YEAR = 2025 7 | AUTHORS = 'NeverTools' 8 | RELEASE = '1.2.2' -------------------------------------------------------------------------------- /pynever/datasets.py: -------------------------------------------------------------------------------- 1 | """This module contains the classes used to represent and use datasets. 2 | """ 3 | import abc 4 | from collections.abc import Callable 5 | 6 | import numpy as np 7 | import torch 8 | import torch.utils.data as tdata 9 | import torchvision as tv 10 | 11 | 12 | class Dataset(abc.ABC): 13 | """ 14 | An abstract class used to represent a Dataset. The concrete descendant must 15 | implement the methods __getitem__ and __len__. 16 | """ 17 | 18 | @abc.abstractmethod 19 | def __getitem__(self, index: int): 20 | raise NotImplementedError 21 | 22 | @abc.abstractmethod 23 | def __len__(self): 24 | raise NotImplementedError 25 | 26 | 27 | class TorchMNIST(Dataset, tv.datasets.MNIST): 28 | """ 29 | A concrete class used to represent the MNIST Dataset. It leverages the torch dataset MNIST. 30 | 31 | Attributes 32 | ---------- 33 | data_path: str 34 | Path to the folder in which the dataset will be saved. 35 | train: bool 36 | If True then the training set is loaded otherwise the test set is loaded. 37 | transform: Callable, Optional 38 | Transformation to apply to the data. We assume this is an object like the transforms presented in torchvision. 39 | The parameters of the callable (other than the object subject to the transformation) should be attributes of 40 | the object. 41 | target_transform: Callable, Optional 42 | Transformation to apply to the targets. We assume this is an object like the transforms presented in 43 | torchvision. The parameters of the callable (other than the object subject to the transformation) should be 44 | attributes of the object. 45 | download: bool 46 | True if the dataset must be downloaded, False otherwise. 47 | """ 48 | 49 | def __init__(self, data_path: str, train: bool, transform: Callable | None = None, 50 | target_transform: Callable | None = None, download: bool = True): 51 | Dataset.__init__(self) 52 | tv.datasets.MNIST.__init__(self, data_path, train, transform, target_transform, download) 53 | 54 | def __getitem__(self, index: int): 55 | return tv.datasets.MNIST.__getitem__(self, index) 56 | 57 | def __len__(self): 58 | return tv.datasets.MNIST.__len__(self) 59 | 60 | 61 | class TorchFMNIST(Dataset, tv.datasets.FashionMNIST): 62 | """ 63 | A concrete class used to represent the FMNIST Dataset. It leverages the torch dataset FMNIST. 64 | 65 | Attributes 66 | ---------- 67 | data_path: str 68 | Path to the folder in which the dataset will be saved. 69 | train: bool 70 | If True then the training set is loaded otherwise the test set is loaded. 71 | transform: Callable, Optional 72 | Transformation to apply to the data. We assume this is an object like the transforms presented in torchvision. 73 | The parameters of the callable (other than the object subject to the transformation) should be attributes of 74 | the object. 75 | target_transform: Callable, Optional 76 | Transformation to apply to the targets. We assume this is an object like the transforms presented in 77 | torchvision. The parameters of the callable (other than the object subject to the transformation) should be 78 | attributes of the object. 79 | download: bool 80 | True if the dataset must be downloaded, False otherwise. 81 | """ 82 | 83 | def __init__(self, data_path: str, train: bool, transform: Callable | None = None, 84 | target_transform: Callable | None = None, download: bool = True): 85 | Dataset.__init__(self) 86 | tv.datasets.FashionMNIST.__init__(self, data_path, train, transform, target_transform, download) 87 | 88 | def __getitem__(self, index: int): 89 | return tv.datasets.FashionMNIST.__getitem__(self, index) 90 | 91 | def __len__(self): 92 | return tv.datasets.FashionMNIST.__len__(self) 93 | 94 | 95 | class GenericFileDataset(Dataset, tdata.Dataset): 96 | """ 97 | A concrete class used to represent a generic dataset memorized as a txt file. It loads the values using numpy 98 | loadtxt function. It assumes each line of the file is a separated datapoint. 99 | For each line we assume that the first n values are the input and the following are the target. The index of the 100 | first element of the target is identified by the target_index attribute. 101 | 102 | Attributes 103 | ---------- 104 | filepath: str 105 | Path to the file containing the dataset. 106 | N.B.: the names of the dataset are supposed to be jame_pos_*.txt where * can be tested or train. 107 | target_index: int 108 | Index of the first element of the outputs. 109 | dtype: type, Optional 110 | Data type of the values of the data-points. Refer to numpy.loadtxt for more details. 111 | delimiter: str, Optional 112 | Delimiter between the different values of the data-points. Refer to numpy.loadtxt for more details. 113 | transform: Callable, Optional 114 | Transformation to apply to the data. We assume this is an object like the transforms presented in torchvision. 115 | The parameters of the callable (other than the object subject to the transformation) should be attributes of 116 | the object. 117 | target_transform: Callable, Optional 118 | Transformation to apply to the targets. We assume this is an object like the transforms presented in 119 | torchvision. The parameters of the callable (other than the object subject to the transformation) should be 120 | attributes of the object. 121 | """ 122 | 123 | def __init__(self, filepath: str, target_index: int, dtype: type = float, delimiter: str = ",", 124 | transform: Callable | None = None, target_transform: Callable | None = None): 125 | 126 | self.filepath = filepath 127 | self.target_index = target_index 128 | self.dtype = dtype 129 | self.delimiter = delimiter 130 | self.transform = transform 131 | self.target_transform = target_transform 132 | 133 | dataset = torch.Tensor(np.loadtxt(filepath, dtype=self.dtype, delimiter=self.delimiter)) 134 | self.__data = dataset[:, 0:self.target_index] 135 | self.__targets = dataset[:, self.target_index:].to(torch.long) 136 | 137 | def __getitem__(self, index: int) -> tuple: 138 | 139 | data, target = self.__data[index], self.__targets[index] 140 | if self.transform is not None: 141 | data = self.transform(data) 142 | if self.target_transform is not None: 143 | target = self.target_transform(target) 144 | 145 | return data, target 146 | 147 | def __len__(self): 148 | return len(self.__data) 149 | -------------------------------------------------------------------------------- /pynever/exceptions.py: -------------------------------------------------------------------------------- 1 | """This module contains customized exceptions used in pynever. 2 | """ 3 | from multipledispatch import dispatch 4 | 5 | 6 | class InvalidDimensionError(Exception): 7 | def __init__(self, message): 8 | super().__init__(message) 9 | 10 | 11 | class OutOfRangeError(Exception): 12 | @dispatch(str) 13 | def __init__(self, message): 14 | super().__init__(message) 15 | 16 | @dispatch((int, float), (int, float), (int, float)) 17 | def __init__(self, parameter, min_value, max_value): 18 | super().__init__( 19 | f'Parameter {parameter.__name__} with value {parameter} is out of range: [{min_value}, {max_value}]') 20 | 21 | 22 | class EmptyNetworkError(Exception): 23 | def __init__(self, message='The network is empty'): 24 | super(EmptyNetworkError, self).__init__(message) 25 | 26 | 27 | class InvalidNodeError(Exception): 28 | def __init__(self, message): 29 | super().__init__(message) 30 | 31 | 32 | class NotInNetworkError(Exception): 33 | 34 | @dispatch(str) 35 | def __init__(self, message): 36 | super().__init__(message) 37 | 38 | @dispatch() 39 | def __init__(self, node): 40 | super().__init__(f'{node.identifier} is not a node of the Network') 41 | 42 | 43 | class NonOptimalLPError(Exception): 44 | def __init__(self, message='The LP problem was not Optimal'): 45 | super().__init__(message) 46 | 47 | 48 | class FixedConflictWithBounds(Exception): 49 | pass 50 | -------------------------------------------------------------------------------- /pynever/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/scripts/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/abstraction/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | LOGGER_EMPTY = logging.getLogger("pynever.strategies.abstraction.empty_times") 4 | LOGGER_LP = logging.getLogger("pynever.strategies.abstraction.lp_times") 5 | LOGGER_LB = logging.getLogger("pynever.strategies.abstraction.lb_times") 6 | LOGGER_UB = logging.getLogger("pynever.strategies.abstraction.ub_times") 7 | LOGGER_LAYER = logging.getLogger('pynever.strategies.abstraction.layers') 8 | 9 | ABSTRACTION_PRECISION_GUARD = 10e-15 10 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from enum import Enum 3 | 4 | BOUNDS_LOGGER = logging.getLogger("pynever.strategies.bounds_propagation") 5 | 6 | 7 | class ReLUStatus(Enum): 8 | """This enumerator registers the status of a ReLU neuron 9 | 10 | ACTIVE means that the input is positive, i.e., ReLU acts as identity 11 | INACTIVE means that the input is negative, i.e., ReLU outputs zero 12 | UNSTABLE means that the input is both positive and negative 13 | 14 | """ 15 | ACTIVE = 0 16 | INACTIVE = 1 17 | UNSTABLE = 2 18 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/bounds.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module bounds.py 3 | 4 | This file contains the different representations of symbolic and 5 | numeric bounds for the verification of neural networks 6 | """ 7 | import abc 8 | import copy 9 | from abc import abstractmethod 10 | 11 | import torch 12 | 13 | from pynever.exceptions import InvalidDimensionError 14 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 15 | 16 | 17 | class AbstractBounds(abc.ABC): 18 | """ 19 | Abstract class that defines the abstraction of lower and upper bounds for a neural network layer 20 | 21 | Attributes 22 | ---------- 23 | lower: Any 24 | The lower bounds 25 | upper: Any 26 | The upper bounds 27 | size: int 28 | The number of dimensions of the lower and upper bounds 29 | """ 30 | 31 | def __init__(self, lower, upper): 32 | self.lower = lower 33 | self.upper = upper 34 | self.size = self.get_size() 35 | 36 | def __repr__(self): 37 | return ', '.join(["({}, {})".format(self.lower[i], self.upper[i]) for i in range(self.size)]) 38 | 39 | @abstractmethod 40 | def get_lower(self): 41 | raise NotImplementedError 42 | 43 | @abstractmethod 44 | def get_upper(self): 45 | raise NotImplementedError 46 | 47 | @abstractmethod 48 | def get_size(self) -> int: 49 | raise NotImplementedError 50 | 51 | 52 | class HyperRectangleBounds(AbstractBounds): 53 | """ 54 | Class that defines the hyper-rectangle bounds for a neural network layer, i.e., 55 | bounding the variables with individual lower and upper bounds. 56 | 57 | Methods 58 | ------- 59 | get_dimension_bounds(int) 60 | Procedure to get the bounds for a specific dimension 61 | """ 62 | 63 | def __init__(self, lower: torch.Tensor, upper: torch.Tensor): 64 | super(HyperRectangleBounds, self).__init__(lower, upper) 65 | 66 | def __repr__(self): 67 | return ', '.join(["(lb[{}]: {:.5f}, ub[{}]: {:.5f})".format(i, self.lower[i], i, self.upper[i]) 68 | for i in range(self.size)]) 69 | 70 | def get_lower(self) -> torch.Tensor: 71 | return self.lower 72 | 73 | def get_upper(self) -> torch.Tensor: 74 | return self.upper 75 | 76 | def get_size(self) -> int: 77 | return len(self.lower) 78 | 79 | def clone(self): 80 | return HyperRectangleBounds(copy.deepcopy(self.lower), copy.deepcopy(self.upper)) 81 | 82 | def get_dimension_bounds(self, dim: int) -> tuple[float, float]: 83 | """Procedure to get the bounds for a specific dimension""" 84 | if 0 <= dim < self.size: 85 | return self.lower[dim].item(), self.upper[dim].item() 86 | else: 87 | raise InvalidDimensionError("Dimension {} is out of range for size {}".format(dim, self.size)) 88 | 89 | 90 | class SymbolicLinearBounds(AbstractBounds): 91 | """ 92 | Class that defines the symbolic linear bounds for a neural network layer, i.e., 93 | the linear equations for the lower and upper bounds. 94 | 95 | Methods 96 | ------- 97 | get_upper_bounds(HyperRectangleBounds) -> torch.Tensor 98 | Procedure to compute the numeric upper bounds 99 | get_lower_bounds(HyperRectangleBounds) -> torch.Tensor 100 | Procedure to compute the numeric lower bounds 101 | get_all_bounds(HyperRectangleBounds) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor] 102 | Procedure to compute all bounds 103 | to_hyper_rectangle_bounds(HyperRectangleBounds) -> HyperRectangleBounds 104 | Procedure to compute the hyper-rectangle bounds 105 | """ 106 | 107 | def __init__(self, lower: LinearFunctions, upper: LinearFunctions): 108 | super(SymbolicLinearBounds, self).__init__(lower, upper) 109 | 110 | def get_lower(self) -> LinearFunctions: 111 | return self.lower 112 | 113 | def get_upper(self) -> LinearFunctions: 114 | return self.upper 115 | 116 | def get_size(self) -> int: 117 | return self.lower.get_size() 118 | 119 | def get_upper_bounds(self, input_bounds: HyperRectangleBounds) -> torch.Tensor: 120 | """Procedure to compute the numeric upper bounds 121 | Parameters 122 | ---------- 123 | input_bounds: HyperRectangleBounds 124 | The initial bounds 125 | """ 126 | return self.upper.compute_max_values(input_bounds) 127 | 128 | def get_lower_bounds(self, input_bounds: HyperRectangleBounds) -> torch.Tensor: 129 | """Procedure to compute the numeric lower bounds 130 | Parameters 131 | ---------- 132 | input_bounds: HyperRectangleBounds 133 | The initial bounds 134 | """ 135 | return self.lower.compute_min_values(input_bounds) 136 | 137 | def get_all_bounds(self, input_bounds: HyperRectangleBounds) -> tuple[ 138 | torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: 139 | """Procedure to compute all bounds 140 | Parameters 141 | ---------- 142 | input_bounds: HyperRectangleBounds 143 | The initial bounds 144 | """ 145 | return self.lower.compute_min_values(input_bounds), \ 146 | self.lower.compute_max_values(input_bounds), \ 147 | self.upper.compute_min_values(input_bounds), \ 148 | self.upper.compute_max_values(input_bounds) 149 | 150 | def to_hyper_rectangle_bounds(self, input_bounds: HyperRectangleBounds) -> HyperRectangleBounds: 151 | """Procedure to compute the hyper-rectangle bounds 152 | Parameters 153 | ---------- 154 | input_bounds: HyperRectangleBounds 155 | The initial bounds 156 | """ 157 | return HyperRectangleBounds(self.lower.compute_min_values(input_bounds), 158 | self.upper.compute_max_values(input_bounds)) 159 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/abstraction/bounds_propagation/layers/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/layers/affine.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pynever.networks import NeuralNetwork 4 | from pynever.nodes import FullyConnectedNode 5 | from pynever.strategies.abstraction.bounds_propagation import util 6 | from pynever.strategies.abstraction.bounds_propagation.bounds import SymbolicLinearBounds, HyperRectangleBounds 7 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 8 | 9 | 10 | def get_layer_equation(layer: FullyConnectedNode) -> LinearFunctions: 11 | """Return the linear equation of the layer 12 | 13 | Parameters 14 | ---------- 15 | layer: FullyConnectedNode 16 | The linear layer 17 | 18 | Returns 19 | ---------- 20 | LinearFunctions 21 | The equation of the layer 22 | """ 23 | return LinearFunctions(layer.weight, layer.bias) 24 | 25 | 26 | def substitute_one_step_back(equation: LinearFunctions, prev: SymbolicLinearBounds, 27 | lower: bool = False) -> LinearFunctions: 28 | """Perform one substitution step 29 | Given an equation mapping R^n -> R^m in the form of a matrix and an offset, and 30 | previous equations mapping R^k to R^n, 31 | computes a new equation (in the form of a matrix and an offset) that 32 | maps R^k to R^m. 33 | 34 | """ 35 | prev_lower = prev.get_lower() 36 | prev_upper = prev.get_upper() 37 | 38 | matrix_pos = torch.clamp(equation.get_matrix(), min=0) 39 | matrix_neg = torch.clamp(equation.get_matrix(), max=0) 40 | 41 | if lower: 42 | cur_matrix = torch.matmul(matrix_pos, prev_lower.get_matrix()) + \ 43 | torch.matmul(matrix_neg, prev_upper.get_matrix()) 44 | cur_offset = torch.matmul(matrix_pos, prev_lower.get_offset()) + \ 45 | torch.matmul(matrix_neg, prev_upper.get_offset()) + equation.get_offset() 46 | else: 47 | cur_matrix = torch.matmul(matrix_pos, prev_upper.get_matrix()) + \ 48 | torch.matmul(matrix_neg, prev_lower.get_matrix()) 49 | cur_offset = torch.matmul(matrix_pos, prev_upper.get_offset()) + \ 50 | torch.matmul(matrix_neg, prev_lower.get_offset()) + equation.get_offset() 51 | 52 | return LinearFunctions(cur_matrix, cur_offset) 53 | 54 | 55 | def get_backwards_layer_equation(layer: FullyConnectedNode, 56 | network: NeuralNetwork, 57 | equations_in: dict[str, SymbolicLinearBounds], 58 | input_bounds: HyperRectangleBounds, 59 | lower: bool = False) -> tuple[LinearFunctions, torch.Tensor]: 60 | """Compute the lower or upper bound equation of the layer by backwards substitution 61 | of the previous layers from the variables of the input layer 62 | 63 | Parameters 64 | ---------- 65 | layer: FullyConnectedNode 66 | The linear layer 67 | network: NeuralNetwork 68 | The neural network 69 | equations_in: dict[str, SymbolicLinearBounds] 70 | The layer-to-layer symbolic bounds 71 | input_bounds: HyperRectangleBounds 72 | The input bounds 73 | lower: bool 74 | Flag to compute the lower bound or the upper bound 75 | 76 | Returns 77 | ---------- 78 | tuple[LinearFunctions, torch.Tensor] 79 | The bound equation from the input and the bound values 80 | """ 81 | if lower: 82 | cur_matrix = equations_in[layer.identifier].get_lower().get_matrix() 83 | cur_offset = equations_in[layer.identifier].get_lower().get_offset() 84 | else: 85 | cur_matrix = equations_in[layer.identifier].get_upper().get_matrix() 86 | cur_offset = equations_in[layer.identifier].get_upper().get_offset() 87 | 88 | cur_equation = LinearFunctions(cur_matrix, cur_offset) 89 | prev_layer_id = network.get_previous_id(layer.identifier) 90 | 91 | while prev_layer_id is not None: 92 | cur_equation = substitute_one_step_back(cur_equation, equations_in[prev_layer_id], lower) 93 | prev_layer_id = network.get_parents(network.nodes[prev_layer_id])[0].identifier 94 | 95 | bound = cur_equation.compute_min_values(input_bounds) if lower else cur_equation.compute_max_values(input_bounds) 96 | return cur_equation, bound 97 | 98 | 99 | def compute_dense_output_bounds(layer: FullyConnectedNode, inputs: SymbolicLinearBounds) -> SymbolicLinearBounds: 100 | """Compute the forwards symbolic output bounds for the layer 101 | 102 | Parameters 103 | ---------- 104 | layer: FullyConnectedNode 105 | The linear layer 106 | inputs: SymbolicLinearBounds 107 | The input symbolic bounds 108 | 109 | Returns 110 | ---------- 111 | SymbolicLinearBounds 112 | The symbolic output bounds for the layer 113 | """ 114 | weights_plus = torch.clamp(layer.weight, min=0) 115 | weights_minus = torch.clamp(layer.weight, max=0) 116 | 117 | lm = inputs.get_lower().get_matrix() 118 | um = inputs.get_upper().get_matrix() 119 | lo = inputs.get_lower().get_offset() 120 | uo = inputs.get_upper().get_offset() 121 | 122 | lower_matrix = util.compute_lower(weights_minus, weights_plus, lm, um) 123 | lower_offset = util.compute_lower(weights_minus, weights_plus, lo, uo) + layer.bias 124 | upper_matrix = util.compute_upper(weights_minus, weights_plus, lm, um) 125 | upper_offset = util.compute_upper(weights_minus, weights_plus, lo, uo) + layer.bias 126 | 127 | return SymbolicLinearBounds(LinearFunctions(lower_matrix, lower_offset), 128 | LinearFunctions(upper_matrix, upper_offset)) 129 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/layers/convolution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from pynever.nodes import ConvNode 5 | from pynever.strategies.abstraction.bounds_propagation.bounds import SymbolicLinearBounds 6 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 7 | 8 | 9 | class LinearizeConv: 10 | 11 | def compute_output_equations(self, conv_node: ConvNode, inputs: SymbolicLinearBounds): 12 | 13 | # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 14 | device = torch.device("cpu") 15 | 16 | DATA_TYPE = torch.float32 17 | 18 | sym_lower_bounds = inputs.get_lower() 19 | sym_upper_bounds = inputs.get_upper() 20 | 21 | # Conversion torch: this conversion in only temporary 22 | if isinstance(sym_lower_bounds.matrix, np.ndarray): 23 | sym_lower_bounds.matrix = sym_lower_bounds.matrix 24 | sym_upper_bounds.matrix = sym_upper_bounds.matrix 25 | sym_lower_bounds.offset = sym_lower_bounds.offset 26 | sym_upper_bounds.offset = sym_upper_bounds.offset 27 | elif isinstance(sym_lower_bounds.matrix, torch.Tensor): 28 | sym_lower_bounds.matrix = sym_lower_bounds.matrix 29 | sym_upper_bounds.matrix = sym_upper_bounds.matrix 30 | sym_lower_bounds.offset = sym_lower_bounds.offset 31 | sym_upper_bounds.offset = sym_upper_bounds.offset 32 | 33 | weights = conv_node.weight 34 | 35 | if conv_node.has_bias: 36 | bias_weights = conv_node.bias 37 | 38 | # Extract kernel dimensions 39 | if isinstance(conv_node.kernel_size, tuple) and len(conv_node.kernel_size) == 2: 40 | kernel_height, kernel_width = conv_node.kernel_size 41 | kernel_size = conv_node.kernel_size 42 | elif isinstance(conv_node.kernel_size, int): 43 | kernel_height = kernel_width = conv_node.kernel_size 44 | kernel_size = (conv_node.kernel_size, conv_node.kernel_size) 45 | else: 46 | raise ValueError("Kernel size must be an int or a tuple of two integers.") 47 | 48 | # Determine padding values 49 | if isinstance(conv_node.padding, int): 50 | pad_tuple = (conv_node.padding, conv_node.padding, conv_node.padding, conv_node.padding) 51 | elif isinstance(conv_node.padding, tuple) and len(conv_node.padding) == 2: 52 | pad_tuple = (conv_node.padding[1], conv_node.padding[1], conv_node.padding[0], conv_node.padding[0]) 53 | elif isinstance(conv_node.padding, tuple) and len(conv_node.padding) == 4: 54 | if conv_node.padding[0] != conv_node.padding[1] or conv_node.padding[2] != conv_node.padding[3]: 55 | raise ValueError( 56 | "Only symmetrical padding is supported. Top must equal bottom and left must equal right.") 57 | pad_tuple = conv_node.padding 58 | elif conv_node.padding == 0 or conv_node.padding is None: 59 | pad_tuple = (0, 0, 0, 0) 60 | else: 61 | raise ValueError("Padding must be an int or a tuple of appropriate dimensions.") 62 | 63 | # Extract input shape information: channels, height, width 64 | input_channels = conv_node.in_channels 65 | 66 | # Flatten filter weights for sparse matrix operations 67 | num_filters = weights.shape[0] 68 | filter_weights = weights.to(DATA_TYPE).to(device) 69 | 70 | if filter_weights.ndim == 3: 71 | filter_weights = filter_weights.reshape(num_filters, -1) 72 | elif filter_weights.ndim == 4: 73 | filter_weights = filter_weights.reshape(num_filters, input_channels, -1) 74 | 75 | if conv_node.has_bias: 76 | filter_biases = bias_weights.to(DATA_TYPE).to(device) 77 | 78 | input_shape = conv_node.in_dims[0] 79 | input_flattened_size = input_shape[1] * input_shape[2] 80 | 81 | # Calculate output dimensions of the convolution 82 | pad_top, pad_bottom, pad_left, pad_right = pad_tuple 83 | output_height = int(((input_shape[1] - kernel_height + pad_top + pad_bottom) / conv_node.stride[0]) + 1) 84 | output_width = int(((input_shape[2] - kernel_width + pad_left + pad_right) / conv_node.stride[1]) + 1) 85 | output_flattened_size = output_height * output_width 86 | 87 | assert output_height == conv_node.out_dim[1] and output_width == conv_node.out_dim[ 88 | 2], "The predicted output dim is different from the real one" 89 | 90 | # Create an index matrix for image patches 91 | index_matrix = torch.arange(0, input_flattened_size * input_channels, dtype=DATA_TYPE, device=device).reshape( 92 | input_channels, input_shape[1], 93 | input_shape[2]) 94 | 95 | list_index_matrixes = [] 96 | # Apply padding to input tensors. When there is padding the indexing system must be modified 97 | if conv_node.padding is not None: 98 | for index in range(input_channels): 99 | # The matrix used to generate is padded with -1 padding if necessary 100 | idx_matrix = torch.nn.functional.pad(index_matrix[index], pad=pad_tuple, mode='constant', value=-1) 101 | list_index_matrixes.append(idx_matrix) 102 | else: 103 | list_index_matrixes.append(index_matrix) 104 | 105 | # Unfold the input indices to get patch indices 106 | list_patches_indices = list() 107 | for index in range(len(list_index_matrixes)): 108 | patch_indices = torch.nn.functional.unfold(list_index_matrixes[index].unsqueeze(0), kernel_size=kernel_size, 109 | stride=conv_node.stride).transpose(0, 110 | 1).to(torch.int32) 111 | list_patches_indices.append(patch_indices) 112 | 113 | num_patches = list_patches_indices[0].shape[0] 114 | 115 | assert output_flattened_size == num_patches, "Mismatch between the calculated output_size of the image and the real one" 116 | 117 | # Ensure the number of patches matches the expected output size 118 | assert num_patches == output_flattened_size, f"Mismatch in patch count: {num_patches} != {output_flattened_size}." 119 | 120 | # This conversion is temporary 121 | filter_temp_matrix_pos_results = [] 122 | filter_temp_matrix_neg_results = [] 123 | filter_temp_offset_pos_results = [] 124 | filter_temp_offset_neg_results = [] 125 | 126 | # This FOR loop handles the generations of the filters 127 | for filter_idx in range(num_filters): 128 | patch_temp_matrix_pos_results = [] 129 | patch_temp_matrix_neg_results = [] 130 | patch_temp_offset_pos_results = [] 131 | patch_temp_offset_neg_results = [] 132 | 133 | # This FOR loop cycle generates the symbolic formulas for the elements of a single filter 134 | for patch_idx in range(num_patches): 135 | 136 | padding_re_indexing_list = list() 137 | patch_list = list() 138 | for patch_indices in list_patches_indices: 139 | patch = patch_indices[patch_idx] 140 | filter_pad = patch != -1 141 | patch = patch[filter_pad] 142 | padding_re_indexing_list.append(filter_pad) 143 | patch_list.append(patch) 144 | 145 | padding_re_indexing = patch_list[0] != -1 146 | 147 | temp_pos_results = [] 148 | temp_neg_results = [] 149 | 150 | temp_offset_pos_results = [] 151 | temp_offset_neg_results = [] 152 | 153 | for in_ch_idx in range(input_channels): 154 | 155 | if filter_weights.ndim == 2: 156 | filter = filter_weights[filter_idx].reshape(-1) 157 | elif filter_weights.ndim == 3: 158 | filter = filter_weights[filter_idx, in_ch_idx].reshape(-1) 159 | else: 160 | assert False, "Case not implemented." 161 | 162 | pos_filter = torch.max(filter, torch.tensor(0.).to(DATA_TYPE).to(device)) 163 | neg_filter = torch.min(filter, torch.tensor(0.).to(DATA_TYPE).to(device)) 164 | pos_filter = pos_filter[padding_re_indexing_list[in_ch_idx]] 165 | neg_filter = neg_filter[padding_re_indexing_list[in_ch_idx]] 166 | 167 | # Phantom dimension for broadcasting 168 | pos_filter = pos_filter[:, None] 169 | neg_filter = neg_filter[:, None] 170 | 171 | # patch indexes mast bu update 172 | pos_matrix = sym_upper_bounds.matrix[patch_list[in_ch_idx]] 173 | neg_matrix = sym_lower_bounds.matrix[patch_list[in_ch_idx]] 174 | pos_offset = sym_upper_bounds.offset[patch_list[in_ch_idx]] 175 | neg_offset = sym_lower_bounds.offset[patch_list[in_ch_idx]] 176 | 177 | i_ch_lower_symb_matrix_bounds = neg_matrix * pos_filter + pos_matrix * neg_filter 178 | i_ch_upper_symb_matrix_bounds = pos_matrix * pos_filter + neg_matrix * neg_filter 179 | i_ch_lower_symb_bias_bounds = neg_offset * pos_filter.squeeze() + pos_offset * neg_filter.squeeze() 180 | i_ch_upper_symb_bias_bounds = pos_offset * pos_filter.squeeze() + neg_offset * neg_filter.squeeze() 181 | 182 | temp_neg_results.append(i_ch_lower_symb_matrix_bounds) 183 | temp_pos_results.append(i_ch_upper_symb_matrix_bounds) 184 | temp_offset_neg_results.append(i_ch_lower_symb_bias_bounds) 185 | temp_offset_pos_results.append(i_ch_upper_symb_bias_bounds) 186 | 187 | pos_matrix_stack = torch.cat(temp_neg_results, dim=0) 188 | neg_matrix_stack = torch.cat(temp_pos_results, dim=0) 189 | pos_offset_stack = torch.cat(temp_offset_neg_results, dim=0) 190 | neg_offset_stack = torch.cat(temp_offset_pos_results, dim=0) 191 | 192 | pos_matrix_sum = torch.sum(pos_matrix_stack, dim=0) 193 | neg_matrix_sum = torch.sum(neg_matrix_stack, dim=0) 194 | pos_offset_sum = torch.sum(pos_offset_stack, dim=0) 195 | neg_offset_sum = torch.sum(neg_offset_stack, dim=0) 196 | 197 | if conv_node.has_bias: 198 | bias = filter_biases[filter_idx] 199 | pos_offset_sum += bias 200 | neg_offset_sum += bias 201 | 202 | patch_temp_matrix_pos_results.append(pos_matrix_sum) 203 | patch_temp_matrix_neg_results.append(neg_matrix_sum) 204 | patch_temp_offset_pos_results.append(pos_offset_sum) 205 | patch_temp_offset_neg_results.append(neg_offset_sum) 206 | 207 | i_pos_filter_matrix = torch.stack(patch_temp_matrix_pos_results, dim=0) 208 | i_neg_filter_matrix = torch.stack(patch_temp_matrix_neg_results, dim=0) 209 | 210 | i_pos_filter_offset = torch.stack(patch_temp_offset_pos_results, dim=0) 211 | i_neg_filter_offset = torch.stack(patch_temp_offset_neg_results, dim=0) 212 | 213 | filter_temp_matrix_pos_results.append(i_pos_filter_matrix) 214 | filter_temp_matrix_neg_results.append(i_neg_filter_matrix) 215 | 216 | filter_temp_offset_pos_results.append(i_pos_filter_offset) 217 | filter_temp_offset_neg_results.append(i_neg_filter_offset) 218 | 219 | pos_matrix = torch.cat(filter_temp_matrix_pos_results, dim=0) 220 | neg_matrix = torch.cat(filter_temp_matrix_neg_results, dim=0) 221 | 222 | pos_offset = torch.cat(filter_temp_offset_pos_results, dim=0) 223 | neg_offset = torch.cat(filter_temp_offset_neg_results, dim=0) 224 | 225 | if device.type == "cuda": 226 | pos_matrix = pos_matrix.cpu() 227 | neg_matrix = neg_matrix.cpu() 228 | pos_offset = pos_offset.cpu() 229 | neg_offset = neg_offset.cpu() 230 | 231 | # # Converting to numpy ndarray 232 | # pos_matrix = pos_matrix.numpy() 233 | # neg_matrix = neg_matrix.numpy() 234 | # pos_offset = pos_offset.numpy() 235 | # neg_offset = neg_offset.numpy() 236 | 237 | upper = LinearFunctions(pos_matrix, pos_offset) 238 | lower = LinearFunctions(neg_matrix, neg_offset) 239 | 240 | return SymbolicLinearBounds(lower, upper) 241 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/layers/maxpool.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from pynever.nodes import MaxPoolNode 5 | from pynever.strategies.abstraction.bounds_propagation.bounds import SymbolicLinearBounds, HyperRectangleBounds 6 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 7 | 8 | #NON COMPLETED 9 | 10 | class MaxPoolLinearization: 11 | def __init__(self): 12 | pass 13 | 14 | def compute_output_equation(self, max_pool_node: MaxPoolNode, input_dim: tuple, 15 | sym_inputs: SymbolicLinearBounds, numeric_inputs: HyperRectangleBounds): 16 | # Ensure the input dimension is 3D: (out_channels, out_height, out_width) 17 | if len(input_dim) != 3: 18 | raise ValueError("MaxPoolLinearization is only supported for 3-dimensional inputs") 19 | 20 | # Check MaxPool constraints: no padding, dilation, or ceil_mode 21 | if max_pool_node.dilation not in [(1, 1), None]: 22 | raise ValueError("Only dilation of (1, 1) or None is supported") 23 | if max_pool_node.ceil_mode: 24 | raise ValueError("MaxPool ceil_mode is not supported") 25 | 26 | # Check that the flattened vectors in input are compatible with the input_dim 27 | if input_dim[0] * input_dim[1] * input_dim[2] != numeric_inputs.size: 28 | raise ValueError("The product of the input_dim dimensions id different from the numeric_inputs size") 29 | 30 | # Check that the flattened vectors in input are compatible with the input_dim 31 | if input_dim[0] * input_dim[1] * input_dim[2] != sym_inputs.size: 32 | raise ValueError("The product of the input_dim dimensions id different from the sym_input size") 33 | 34 | def calculate_output_size(input_size, kernel_size, stride, padding, dilation=1): 35 | """Calculate output size of the max pooling operation.""" 36 | return int(np.floor((input_size + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)) 37 | 38 | # Calculate output height and width 39 | output_channel = input_dim[0] 40 | 41 | if type(max_pool_node.padding) == int: 42 | padding_h = max_pool_node.padding 43 | padding_w = max_pool_node.padding 44 | padding = max_pool_node.padding 45 | 46 | elif type(max_pool_node.padding) == tuple and len(max_pool_node.padding) == 2: 47 | padding_h = max_pool_node.padding[0] 48 | padding_w = max_pool_node.padding[1] 49 | padding = (padding_h, padding_w) 50 | 51 | elif type(max_pool_node.padding) == tuple and len(max_pool_node.padding) == 4: 52 | if max_pool_node.padding[2] != max_pool_node.padding[3] or max_pool_node.padding[1] != max_pool_node.padding[0]: 53 | raise ValueError("Only symmetrical padding is supported. Left must be equal to right padding as well as top and bottom padding") 54 | padding_h = max_pool_node.padding[2] + max_pool_node.padding[3] 55 | padding_w = max_pool_node.padding[0] + max_pool_node.padding[1] 56 | padding = (padding_h, padding_w) 57 | 58 | 59 | 60 | else: 61 | raise ValueError("MaxPool padding format is not supported. It can be only an integer or a tuple of integers of 2 or 4 values.") 62 | 63 | 64 | output_height = calculate_output_size(input_dim[1], max_pool_node.kernel_size[0], 65 | max_pool_node.stride[0], padding_h) 66 | output_width = calculate_output_size(input_dim[2], max_pool_node.kernel_size[1], 67 | max_pool_node.stride[1], padding_w) 68 | 69 | # Dimension of elements of the output flattened 70 | output_flattened = output_height * output_width * output_channel 71 | c_output_flattened = output_height * output_width 72 | 73 | # Number of variables to express the equations 74 | num_variables = sym_inputs.get_lower().matrix.shape[1] 75 | 76 | # Reshaping input symb and numeric lower and upper bounds 77 | lower_matrix = sym_inputs.lower.matrix.reshape(output_channel, -1, num_variables) 78 | lower_offset = sym_inputs.lower.offset.reshape(output_channel, -1) 79 | 80 | upper_matrix = sym_inputs.upper.matrix.reshape(output_channel, -1, num_variables) 81 | upper_offset = sym_inputs.upper.offset.reshape(output_channel, -1) 82 | 83 | numeric_lower = numeric_inputs.get_lower().reshape(output_channel, -1) 84 | numeric_upper = numeric_inputs.get_upper().reshape(output_channel, -1) 85 | 86 | # Generating output symb and numeric lower and upper bounds 87 | output_lower_matrix = np.zeros(shape = (output_channel, c_output_flattened, num_variables), dtype=np.float32) 88 | output_lower_offset = np.zeros(shape = (output_channel, c_output_flattened), dtype=np.float32) 89 | 90 | output_upper_matrix = np.zeros(shape = (output_channel, c_output_flattened, num_variables), dtype=np.float32) 91 | output_upper_offset = np.zeros(shape = (output_channel, c_output_flattened), dtype=np.float32) 92 | 93 | output_numeric_lower_bounds = np.zeros(shape = (output_channel, c_output_flattened), dtype=np.float32) 94 | output_numeric_upper_bounds = np.zeros(shape = (output_channel, c_output_flattened), dtype=np.float32) 95 | 96 | indexes = torch.arange(input_dim[1] * input_dim[2]).view(1, input_dim[1], input_dim[2]) 97 | indexes = indexes.to(dtype=torch.float) 98 | Unfold = torch.nn.Unfold(kernel_size=max_pool_node.kernel_size, padding=padding, stride=max_pool_node.stride) 99 | patches = Unfold(indexes) 100 | patches = patches.numpy() 101 | patches = patches.T 102 | 103 | for c in range(output_channel): 104 | c_lower_matrix = lower_matrix[c, :] 105 | c_upper_matrix = upper_matrix[c, :] 106 | 107 | c_lower_offset = lower_offset[c, :] 108 | c_upper_offset = upper_offset[c, :] 109 | 110 | c_numeric_lower = numeric_lower[c, :] 111 | c_numeric_upper = numeric_upper[c, :] 112 | 113 | for index, patch in enumerate(patches): 114 | patch = patch.astype(np.int32) 115 | extracted_min = c_numeric_lower[patch] 116 | min_index = patch[np.argmax(extracted_min)] 117 | 118 | # Get maximum lower values, to handle rare cases where there are multiple max values 119 | min_value = np.max(extracted_min) 120 | min_values = np.where(extracted_min == min_value) 121 | min_indexes = patch[min_values] 122 | 123 | extracted_max = c_numeric_upper[patch] 124 | max_value = np.max(extracted_max) 125 | max_values = np.where(extracted_max == max_value) 126 | max_indexes = patch[max_values] 127 | intersection = np.intersect1d(min_indexes, max_indexes) 128 | 129 | 130 | if len(intersection) >= 0: 131 | output_upper_matrix[c, index] = c_upper_matrix[intersection[0], :] 132 | output_upper_offset[c, index] = c_upper_offset[intersection[0]] 133 | output_numeric_upper_bounds[c, index] = c_numeric_upper[intersection[0]] 134 | 135 | output_lower_matrix[c, index] = c_lower_matrix[intersection[0], :] 136 | output_lower_offset[c, index] = c_lower_offset[intersection[0]] 137 | output_numeric_lower_bounds[c, index] = c_numeric_lower[intersection[0]] 138 | else: 139 | output_upper_offset[c, index] = c_upper_offset[max_indexes[0], :] 140 | output_numeric_upper_bounds[c, index] = c_numeric_upper[max_indexes[0], :] 141 | output_lower_matrix[c, index] = c_lower_matrix[min_indexes[0], :] 142 | output_lower_offset[c, index] = c_lower_offset[min_indexes[0]] 143 | output_numeric_lower_bounds[c, index] = c_numeric_lower[min_indexes[0]] 144 | 145 | 146 | 147 | # Check that the flattened output is equal to the output_channel * output_height * output_width 148 | output_lower_matrix = output_lower_matrix.reshape(-1, c_output_flattened, num_variables) 149 | output_lower_offset = output_lower_offset.reshape(-1) 150 | 151 | output_upper_matrix = output_upper_matrix.reshape(-1, c_output_flattened, num_variables) 152 | output_upper_offset = output_upper_offset.reshape(-1) 153 | 154 | output_numeric_lower_bounds = output_numeric_lower_bounds.reshape(-1) 155 | output_numeric_upper_bounds = output_numeric_upper_bounds.reshape(-1) 156 | 157 | output_hyperect = HyperRectangleBounds(output_numeric_lower_bounds, output_numeric_upper_bounds) 158 | lower = LinearFunctions(output_lower_matrix, output_lower_offset) 159 | upper = LinearFunctions(output_upper_matrix, output_upper_offset) 160 | output_symb = SymbolicLinearBounds(lower, upper) 161 | 162 | return output_symb, output_hyperect 163 | 164 | 165 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/layers/relu.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains specialized methods that provide 3 | the linearization of ReLU activation functions 4 | 5 | """ 6 | import torch 7 | from torch import Tensor 8 | 9 | from pynever import nodes 10 | from pynever.exceptions import FixedConflictWithBounds 11 | from pynever.strategies.abstraction import ABSTRACTION_PRECISION_GUARD 12 | from pynever.strategies.abstraction.bounds_propagation.bounds import SymbolicLinearBounds, HyperRectangleBounds 13 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 14 | 15 | 16 | class LinearizeReLU: 17 | """ 18 | This class provides the linearization for the ReLU function enhanced by information 19 | about currently active and inactive neurons 20 | 21 | """ 22 | 23 | USE_FIXED_NEURONS = False 24 | 25 | def __init__(self, fixed_neurons: dict, input_hyper_rect: HyperRectangleBounds): 26 | self.fixed_neurons = fixed_neurons 27 | self.input_hyper_rect = input_hyper_rect 28 | 29 | def compute_output_linear_bounds(self, input_eq: SymbolicLinearBounds) -> SymbolicLinearBounds: 30 | """ 31 | Set the equations to zero for the neurons that have been fixed to 0 32 | This does not work well, at least for ACAS_XU. 33 | It seems to mess up the equations in a strange way. 34 | For instance, when there are no stable neurons, the equations are different from 35 | what we get with abstract propagation. 36 | Not sure if there is a problem is with abstract propagation or here. 37 | Could be abstract propagation as the bug I was getting was because 38 | the counter-example after using abstract propagation was not valid. 39 | However, the bug does not appear when we don't incorporate info from the fixed neurons. 40 | 41 | """ 42 | 43 | lower_l, lower_u, upper_l, upper_u = input_eq.get_all_bounds(self.input_hyper_rect) 44 | lower, upper = LinearizeReLU.compute_symb_lin_bounds_equations(input_eq, lower_l, lower_u, upper_l, upper_u) 45 | 46 | return SymbolicLinearBounds(lower, upper) 47 | 48 | def compute_output_numeric_bounds(self, relu: nodes.ReLUNode, cur_numeric_bounds: HyperRectangleBounds, 49 | cur_symbolic_bounds: SymbolicLinearBounds) -> HyperRectangleBounds: 50 | """ 51 | Compute the numeric post-activation bounds of the linearized ReLU function 52 | using the information about currently inactive neurons 53 | 54 | """ 55 | 56 | layer_id = relu.identifier 57 | 58 | cur_layer_inactive = LinearizeReLU.extract_layer_inactive_from_fixed_neurons(self.fixed_neurons, layer_id) 59 | 60 | cur_layer_output_num_bounds = HyperRectangleBounds( 61 | torch.max(cur_numeric_bounds.get_lower(), torch.zeros(cur_numeric_bounds.get_size())), 62 | torch.max(cur_numeric_bounds.get_upper(), torch.zeros(cur_numeric_bounds.get_size()))) 63 | 64 | if LinearizeReLU.USE_FIXED_NEURONS: 65 | LinearizeReLU.force_inactive_neurons(cur_symbolic_bounds, cur_layer_output_num_bounds, cur_layer_inactive) 66 | 67 | return cur_layer_output_num_bounds 68 | 69 | @staticmethod 70 | def compute_relu_equation(preact_num_lower, preact_num_upper): 71 | lower_relu_eq, postact_lower = LinearizeReLU.get_relu_relax_lower_bound_equation(preact_num_lower, 72 | preact_num_upper) 73 | upper_relu_eq, postact_upper = LinearizeReLU.get_relu_relax_upper_bound_equation(preact_num_lower, 74 | preact_num_upper) 75 | 76 | return SymbolicLinearBounds(lower_relu_eq, upper_relu_eq), HyperRectangleBounds(postact_lower, postact_upper) 77 | 78 | @staticmethod 79 | def get_relu_relax_lower_bound_equation(preact_lower_bounds, preact_upper_bounds): 80 | """ 81 | The lower bound of unstable nodes is either 0, or 82 | the linear relaxation of the preactivation (hence, the slope). 83 | 84 | The latter is the case when the upper bound is greater than or equal to the absolute value of the lower bound, 85 | thus resulting in a triangle of smaller area than the one formed by 0. 86 | 87 | The former is the case when the absolute value of the lower bound is greater than the upper bound, 88 | thus resulting is a triangle of smaller area than the one formed by the slope. 89 | """ 90 | size = len(preact_lower_bounds) 91 | 92 | # matrix and offset for the relaxation 93 | matrix = torch.eye(size) 94 | offset = torch.zeros(size) 95 | 96 | postact_lower_bounds = Tensor(preact_lower_bounds) 97 | 98 | for i in range(size): 99 | if preact_lower_bounds[i] >= 0: 100 | # the lower bound is exactly the preactivation 101 | # it remains 1 102 | pass 103 | 104 | elif preact_upper_bounds[i] >= -preact_lower_bounds[i]: 105 | # Unstable node, lower bound is linear relaxation of the equation 106 | k = preact_upper_bounds[i] / (preact_upper_bounds[i] - preact_lower_bounds[i]) 107 | matrix[i][i] = k 108 | postact_lower_bounds[i] *= k 109 | 110 | else: # upper[i] <= 0 (inactive node) 111 | # or 112 | # -lower[i] > upper[i] 113 | # lower bound is 0 114 | matrix[i][i] = 0 115 | postact_lower_bounds[i] = 0 116 | 117 | return LinearFunctions(matrix, offset), postact_lower_bounds 118 | 119 | @staticmethod 120 | def get_relu_relax_upper_bound_equation(preact_lower_bounds, preact_upper_bounds): 121 | """ 122 | Compute the resulting upper bound equation after relaxing ReLU, 123 | qiven a preactivation upper bound equation. 124 | 125 | input_bounds are required for computing the concrete bounds. 126 | """ 127 | size = len(preact_lower_bounds) 128 | 129 | # matrix and offset for the relaxation 130 | matrix = torch.eye(size) 131 | offset = torch.zeros(size) 132 | 133 | postact_upper_bounds = Tensor(preact_upper_bounds) 134 | for i in range(size): 135 | if preact_lower_bounds[i] >= 0: 136 | # the upper bound is exactly the preactivation 137 | # it remains 1 138 | pass 139 | 140 | elif preact_upper_bounds[i] >= 0: 141 | # Unstable node - linear relaxation of preactivation 142 | k = preact_upper_bounds[i] / (preact_upper_bounds[i] - preact_lower_bounds[i]) 143 | matrix[i][i] = k 144 | offset[i] = - preact_lower_bounds[i] * k 145 | 146 | else: # preact_upper_bounds[i] <= 0 (inactive node) 147 | # The upper bound is 0 148 | matrix[i][i] = 0 149 | postact_upper_bounds[i] = 0 150 | 151 | return LinearFunctions(matrix, offset), postact_upper_bounds 152 | 153 | @staticmethod 154 | def check_and_enforce_fixed_constraints(relu_input_eq, preact_bounds, fixed_neurons, layer_id): 155 | """ 156 | We need to check if the bounds do not conflict with the currently fixed neurons. 157 | That could happen if we haven't detected that the current branch is infeasible. 158 | That could happen because we are dealing with approximated bounds. 159 | 160 | """ 161 | 162 | current_layer_inactive = LinearizeReLU.extract_layer_inactive_from_fixed_neurons(fixed_neurons, layer_id) 163 | current_layer_active = LinearizeReLU.extract_layer_active_from_fixed_neurons(fixed_neurons, layer_id) 164 | 165 | new_bounds = preact_bounds.clone() 166 | 167 | new_eq = SymbolicLinearBounds(relu_input_eq.lower.clone(), relu_input_eq.upper.clone()) 168 | 169 | for neuron_n in current_layer_active: 170 | if preact_bounds.upper[neuron_n] < 0: 171 | raise FixedConflictWithBounds("A neuron has been fixed to be positive, " 172 | "but the bounds are negative. The current branch is not viable.") 173 | 174 | for neuron_n in current_layer_inactive: 175 | if preact_bounds.lower[neuron_n] > 0: 176 | raise FixedConflictWithBounds("A neuron has been fixed to be negative, " 177 | "but the bounds are positive. The current branch is not viable.") 178 | 179 | if preact_bounds.upper[neuron_n] > 0: 180 | new_eq.lower.matrix[neuron_n] = 0 * new_eq.lower.matrix[neuron_n] 181 | new_eq.lower.offset[neuron_n] = 0 182 | 183 | new_eq.upper.matrix[neuron_n] = 0 * new_eq.upper.matrix[neuron_n] 184 | new_eq.upper.offset[neuron_n] = 0 185 | 186 | new_bounds.lower[neuron_n] = -ABSTRACTION_PRECISION_GUARD 187 | new_bounds.upper[neuron_n] = -ABSTRACTION_PRECISION_GUARD # TODO is the sign correct? 188 | 189 | return new_eq, new_bounds 190 | 191 | @staticmethod 192 | def extract_layer_active_from_fixed_neurons(fixed_neurons: dict, layer_id: str) -> list[int]: 193 | return [neuron_n for ((lay_n, neuron_n), value) in fixed_neurons.items() 194 | if lay_n == layer_id and value == 1] 195 | 196 | @staticmethod 197 | def extract_layer_inactive_from_fixed_neurons(fixed_neurons: dict, layer_id: str) -> list[int]: 198 | return [neuron_n for ((lay_n, neuron_n), value) in fixed_neurons.items() 199 | if lay_n == layer_id and value == 0] 200 | 201 | @staticmethod 202 | def force_inactive_neurons(relu_eq, postact_bounds, current_layer_inactive): 203 | for neuron_n in current_layer_inactive: 204 | if postact_bounds.lower[neuron_n] > 0: 205 | raise Exception("A neuron is supposed to be fixed to be negative, " 206 | "but the bounds are positive. A conflict must have been detected before.") 207 | 208 | if postact_bounds.upper[neuron_n] > 0: 209 | relu_eq.lower.matrix[neuron_n] = 0 * relu_eq.lower.matrix[neuron_n] 210 | relu_eq.lower.offset[neuron_n] = 0 211 | relu_eq.upper.matrix[neuron_n] = 0 * relu_eq.upper.matrix[neuron_n] 212 | relu_eq.upper.offset[neuron_n] = 0 213 | postact_bounds.lower[neuron_n] = 0 214 | postact_bounds.upper[neuron_n] = 0 215 | 216 | @staticmethod 217 | def compute_symb_lin_bounds_equations(inputs, lower_l, lower_u, upper_l, upper_u): 218 | k_lower, b_lower = LinearizeReLU.get_array_lin_lower_bound_coefficients(lower_l, lower_u) 219 | k_upper, b_upper = LinearizeReLU.get_array_lin_upper_bound_coefficients(upper_l, upper_u) 220 | 221 | lower_matrix = LinearizeReLU.get_transformed_matrix(inputs.get_lower().get_matrix(), k_lower) 222 | upper_matrix = LinearizeReLU.get_transformed_matrix(inputs.get_upper().get_matrix(), k_upper) 223 | 224 | lower_offset = LinearizeReLU.get_transformed_offset(inputs.get_lower().get_offset(), k_lower, b_lower) 225 | upper_offset = LinearizeReLU.get_transformed_offset(inputs.get_upper().get_offset(), k_upper, b_upper) 226 | 227 | lower = LinearFunctions(lower_matrix, lower_offset) 228 | upper = LinearFunctions(upper_matrix, upper_offset) 229 | 230 | return lower, upper 231 | 232 | @staticmethod 233 | def get_transformed_matrix(matrix, k): 234 | return matrix * k[:, None] 235 | 236 | @staticmethod 237 | def get_transformed_offset(offset, k, b): 238 | return offset * k + b 239 | 240 | @staticmethod 241 | def get_array_lin_lower_bound_coefficients(lower, upper): 242 | ks = torch.zeros(len(lower)) 243 | bs = torch.zeros(len(lower)) 244 | 245 | for i in range(len(lower)): 246 | k, b = LinearizeReLU.get_lin_lower_bound_coefficients(lower[i], upper[i]) 247 | ks[i] = k 248 | bs[i] = b 249 | 250 | return ks, bs 251 | 252 | @staticmethod 253 | def get_array_lin_upper_bound_coefficients(lower, upper): 254 | ks = torch.zeros(len(lower)) 255 | bs = torch.zeros(len(lower)) 256 | 257 | for i in range(len(lower)): 258 | k, b = LinearizeReLU.get_lin_upper_bound_coefficients(lower[i], upper[i]) 259 | ks[i] = k 260 | bs[i] = b 261 | 262 | return ks, bs 263 | 264 | @staticmethod 265 | def get_lin_lower_bound_coefficients(lower, upper): 266 | if lower >= 0: 267 | return 1, 0 268 | 269 | if upper >= - lower: 270 | mult = upper / (upper - lower) 271 | return mult, 0 272 | 273 | # upper <= 0: 274 | # or 275 | # -lower > upper, i.e., 0 is a tighter lower bound that the slope mult above 276 | return 0, 0 277 | 278 | @staticmethod 279 | def get_lin_upper_bound_coefficients(lower, upper): 280 | if lower >= 0: 281 | return 1, 0 282 | 283 | if upper <= 0: 284 | return 0, 0 285 | 286 | mult = upper / (upper - lower) 287 | add = -mult * lower 288 | 289 | return mult, add 290 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module controls the bounds propagation for neural networks 3 | 4 | """ 5 | import torch 6 | 7 | from pynever.networks import NeuralNetwork, SequentialNetwork, AcyclicNetwork 8 | from pynever.nodes import LayerNode, ConcreteLayerNode 9 | from pynever.strategies.abstraction.bounds_propagation import ReLUStatus 10 | from pynever.strategies.abstraction.bounds_propagation.bounds import HyperRectangleBounds, SymbolicLinearBounds 11 | from pynever.strategies.abstraction.bounds_propagation.util import check_stable 12 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 13 | from pynever.strategies.abstraction.networks import AbsSeqNetwork, AbsAcyclicNetwork, AbsNeuralNetwork 14 | from pynever.strategies.abstraction.nodes import AbsReLUNode 15 | from pynever.strategies.verification.parameters import SSBPVerificationParameters 16 | from pynever.strategies.verification.properties import NeverProperty 17 | from pynever.strategies.verification.ssbp.constants import BoundsDirection 18 | from pynever.strategies.verification.statistics import VerboseBounds, BoundsStats 19 | from pynever.utilities import xnor 20 | 21 | 22 | class BoundsManager: 23 | """ 24 | This class manages the symbolic bounds propagation framework for NeVer2. 25 | It is designed to handle feed-forward neural networks as computational graphs and can be instantiated 26 | either with fixed lower and upper bounds or with a structured verification property. 27 | 28 | Attributes 29 | ---------- 30 | ref_nn: NeuralNetwork 31 | The reference NN that defines the structure of the graph 32 | abs_nn: AbsNeuralNetwork 33 | The abstract NN that contains the abstraction of the layers 34 | topological_stack: list[str] 35 | The topological sort of the layers in the NN used for the propagation 36 | direction: BoundsDirection 37 | The direction in which the bounds are computed, either forwards or backwards 38 | bounds_dict: VerboseBounds 39 | The data structure storing all bounds information 40 | input_bounds: HyperRectangleBounds 41 | The input bounds to propagate 42 | statistics: BoundsStats 43 | Statistics about neurons stability 44 | 45 | Methods 46 | ---------- 47 | init_symbolic_bounds() 48 | Procedure to set up the initial symbolic bounds 49 | propagate_bounds(HyperRectangleBounds | None, SymbolicLinearBounds | None, LayerNode | None) 50 | Recursive procedure to propagate the bounds. When invoked as a root level, all parameters are None 51 | update_stats(AbsLayerNode, HyperRectangleBounds) 52 | Procedure to update statistics 53 | """ 54 | 55 | def __init__(self, network: NeuralNetwork, prop: NeverProperty = None, input_bounds: HyperRectangleBounds = None, 56 | parameters: SSBPVerificationParameters = None): 57 | if prop is None and input_bounds is None: 58 | raise Exception('Please initialize with either a property or input bounds') 59 | 60 | # Initialize the parameters 61 | self.ref_nn: NeuralNetwork = network 62 | 63 | if isinstance(self.ref_nn, SequentialNetwork): 64 | self.abs_nn: AbsNeuralNetwork = AbsSeqNetwork(self.ref_nn, parameters) 65 | elif isinstance(self.ref_nn, AcyclicNetwork): 66 | self.abs_nn: AbsNeuralNetwork = AbsAcyclicNetwork(self.ref_nn, parameters) 67 | else: 68 | raise NotImplementedError 69 | 70 | self.topological_stack: list[str] = self.ref_nn.get_topological_order(reverse=True) 71 | self.direction: BoundsDirection = parameters.bounds_direction if parameters else BoundsDirection.FORWARDS 72 | 73 | # Initialize the bounds data structure 74 | self.bounds_dict = VerboseBounds() 75 | 76 | # Initialize the bounds 77 | self.input_bounds: HyperRectangleBounds = prop.to_numeric_bounds() if prop else input_bounds 78 | 79 | # Initialize the statistics 80 | self.statistics = BoundsStats() 81 | 82 | def init_symbolic_bounds(self) -> SymbolicLinearBounds: 83 | """Initialize the input symbolic linear bounds""" 84 | input_size = self.input_bounds.get_size() 85 | lower_equation = LinearFunctions(torch.eye(input_size), torch.zeros(input_size)) 86 | upper_equation = LinearFunctions(torch.eye(input_size), torch.zeros(input_size)) 87 | 88 | return SymbolicLinearBounds(lower_equation, upper_equation) 89 | 90 | def compute_bounds(self, in_num_bounds: HyperRectangleBounds | list[HyperRectangleBounds] | None = None, 91 | in_sym_bounds: SymbolicLinearBounds | list[SymbolicLinearBounds] | None = None, 92 | start_layer: LayerNode = None) -> VerboseBounds: 93 | """ 94 | Entry point 95 | 96 | N.B. inside the propagation we use abstract layers but with their concrete counterpart identifier 97 | """ 98 | if start_layer is None: 99 | # Set the identifier for the abstract layer equal to the concrete 100 | start_layer = self.abs_nn.get_abstract(self.ref_nn.get_first_node(), abs_id=False) 101 | 102 | if in_sym_bounds is None: 103 | in_sym_bounds = self.init_symbolic_bounds() 104 | if in_num_bounds is None: 105 | in_num_bounds = self.input_bounds 106 | 107 | # Pop the current layer 108 | self.topological_stack.pop() 109 | 110 | # Current layer data 111 | cur_layer = start_layer 112 | cur_sym_bounds = in_sym_bounds 113 | cur_num_bounds = in_num_bounds 114 | 115 | # TODO remove after debugging 116 | assert xnor(len(self.ref_nn.get_children(self.abs_nn.get_concrete(cur_layer))) == 0, 117 | len(self.topological_stack) == 0) 118 | 119 | # Compute bounds for this layer 120 | out_sym_bounds, out_num_bounds = cur_layer.forward_bounds(cur_sym_bounds, cur_num_bounds, self.input_bounds) 121 | self.update_stats(cur_layer, cur_num_bounds) 122 | 123 | # Fill the bounds dictionary for this layer 124 | self.bounds_dict.identifiers.append(cur_layer.identifier) 125 | self.bounds_dict.numeric_pre_bounds[cur_layer.identifier] = cur_num_bounds 126 | self.bounds_dict.symbolic_bounds[cur_layer.identifier] = out_sym_bounds 127 | self.bounds_dict.numeric_post_bounds[cur_layer.identifier] = out_num_bounds 128 | self.bounds_dict.statistics = self.statistics 129 | 130 | if len(self.topological_stack) == 0: 131 | return self.bounds_dict 132 | 133 | else: 134 | next_layer = self.abs_nn.get_abstract(self.ref_nn.nodes[self.topological_stack[-1]], abs_id=False) 135 | return self.compute_bounds(out_num_bounds, out_sym_bounds, start_layer=next_layer) 136 | 137 | def update_stats(self, layer: LayerNode, num_bounds: HyperRectangleBounds) -> None: 138 | """Update the statistics for this layer 139 | 140 | Parameters 141 | ---------- 142 | layer: LayerNode 143 | The current layer 144 | num_bounds: HyperRectangleBounds 145 | The numeric pre-activation bounds 146 | """ 147 | 148 | # Update statistics for ReLU layers only 149 | if not isinstance(layer, AbsReLUNode): 150 | return 151 | 152 | layer_id = layer.identifier 153 | 154 | for neuron in range(num_bounds.get_size()): 155 | l, u = num_bounds.get_dimension_bounds(neuron) 156 | status = check_stable(l, u) 157 | 158 | for relu in [ReLUStatus.ACTIVE, ReLUStatus.INACTIVE, ReLUStatus.UNSTABLE]: 159 | if layer_id not in self.statistics.stability_info[relu].keys(): 160 | self.statistics.stability_info[relu][layer_id] = list() 161 | 162 | match status: 163 | case ReLUStatus.ACTIVE: 164 | self.statistics.stability_info[ReLUStatus.ACTIVE][layer_id].append(neuron) 165 | self.statistics.stability_info['stable_count'] += 1 166 | 167 | case ReLUStatus.INACTIVE: 168 | self.statistics.stability_info[ReLUStatus.INACTIVE][layer_id].append(neuron) 169 | self.statistics.stability_info['stable_count'] += 1 170 | 171 | case ReLUStatus.UNSTABLE: 172 | self.statistics.stability_info[ReLUStatus.UNSTABLE][layer_id].append(neuron) 173 | 174 | # Compute approximation area 175 | area = 0.5 * (u - l) * u 176 | self.statistics.approximation_info[(layer_id, neuron)] = area 177 | 178 | case _: 179 | raise NotImplementedError 180 | 181 | @staticmethod 182 | def get_symbolic_preactivation_bounds_at(bounds: VerboseBounds, layer: ConcreteLayerNode, 183 | nn: NeuralNetwork) -> list[SymbolicLinearBounds]: 184 | """Retrieve the preactivation symbolic bounds for the given layer""" 185 | return [bounds.symbolic_bounds[identifier] 186 | for identifier in 187 | [parent.identifier 188 | for parent in nn.get_parents(layer) 189 | ] 190 | ] 191 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/abstraction/bounds_propagation/test/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/test/debugging_launcher.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module never2_launcher.py 3 | 4 | Provides an entry point for the execution of NeVer2 on a single instance 5 | 6 | Authors: Stefano Demarchi, Pedro Achete 7 | 8 | """ 9 | 10 | import os 11 | import re 12 | import time 13 | import warnings 14 | from argparse import ArgumentParser 15 | 16 | import torch 17 | 18 | from pynever.strategies.abstraction.bounds_propagation.bounds import HyperRectangleBounds 19 | from pynever.strategies.abstraction.bounds_propagation.manager import BoundsManager 20 | from pynever.strategies.abstraction.networks import networks 21 | from pynever.strategies.conversion.converters.onnx import ONNXConverter 22 | from pynever.strategies.conversion.converters.pytorch import PyTorchConverter 23 | from pynever.strategies.conversion.representation import load_network_path, ONNXNetwork 24 | from pynever.strategies.verification.properties import VnnLibProperty 25 | 26 | warnings.simplefilter("error", RuntimeWarning) 27 | 28 | # Set the environment variable 29 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' 30 | 31 | POST_CONDITIONS_TEMP_FILE = r'C:\Users\andr3\PycharmProjects\pyNeVer\pynever\strategies\bounds_propagation\test\test\intermediate.vnnlib' 32 | 33 | 34 | def add_options(p: ArgumentParser): 35 | """ 36 | Common options for the execution of NeVer2 37 | 38 | """ 39 | 40 | # Options 41 | p.add_argument('-o', '--out', type=str, 42 | default='output.csv', help='output file for execution log') 43 | p.add_argument('-t', '--timeout', type=int, default=300, 44 | help='execution timeout in seconds') 45 | 46 | # Algorithm 47 | algorithm = p.add_subparsers(dest='algorithm', description='Verification algorithm to use') 48 | 49 | # SSBP 50 | ssbp = algorithm.add_parser('ssbp', description='Starset with bounds propagation') 51 | ssbp.add_argument('-p', '--params', nargs='?', default='', metavar='FILE', 52 | help='JSON file with parameters') 53 | 54 | # SSLP 55 | sslp = algorithm.add_parser('sslp', description='Starset with linear programs') 56 | sslp.add_argument('-s', '--strategy', choices=['overapprox', 'mixed', 'complete'], metavar='STRATEGY', 57 | default='complete', help='Verification strategy to use, complete by default') 58 | 59 | return p 60 | 61 | 62 | def neg_post_condition(prop_path: str) -> None: 63 | """ 64 | This method negates the property post-condition in order 65 | to represent both safety and unsafety properties 66 | 67 | Parameters 68 | ---------- 69 | prop_path : str 70 | Path to the property file 71 | 72 | """ 73 | 74 | def replace_with_negatives(match): 75 | number = match.group() 76 | if number not in ("0", "0.0") and match.string[match.start() - 1] != '_': 77 | number = float(number) 78 | negative_number = -number 79 | return str(negative_number) 80 | return number 81 | 82 | with open(prop_path, 'r', newline='') as cur_prop: 83 | with open(POST_CONDITIONS_TEMP_FILE, 'w', newline='') as new_prop: 84 | # List of post-condition constraints 85 | y_constraints = [] 86 | 87 | # Read file 88 | for row in cur_prop: 89 | # Filter declarations 90 | if not (row.find('Y') and row.find('assert')): 91 | new_prop.write(row) 92 | 93 | else: 94 | if row.find('<') > 0 and row.find('Y') > 0: 95 | if row.find('(* -1.0') > 0: 96 | temp_row = row.replace('(assert (<= (* -1.0', '(<=') 97 | temp_row = temp_row[:temp_row.find(')')] + temp_row[temp_row.find(')') + 1:] 98 | pattern = r'(?=') 102 | 103 | elif row.find('>') > 0 and row.find('Y') > 0: 104 | if row.find('(* -1.0') > 0: 105 | temp_row = row.replace('(assert (>= (* -1.0', '(>=') 106 | temp_row = temp_row[:temp_row.find(')')] + temp_row[temp_row.find(')') + 1:] 107 | pattern = r'(?=', '(<=') 111 | 112 | else: 113 | new_prop.write(row) 114 | continue 115 | 116 | temp_row = temp_row[:temp_row.rfind(')')] + temp_row[temp_row.rfind(')') + 1:] 117 | y_constraints.extend(temp_row) 118 | 119 | new_prop.write('(assert (or \n') 120 | 121 | for row in y_constraints: 122 | new_prop.write(row) 123 | new_prop.write('\n))') 124 | 125 | 126 | if __name__ == '__main__': 127 | nn_path = r"C:\Users\andr3\PycharmProjects\pyNeVer\pynever\strategies\bounds_propagation\test\mnist_fcnn_double_conv.onnx" 128 | prop_path = r"C:\Users\andr3\PycharmProjects\pyNeVer\pynever\strategies\bounds_propagation\test\test\loc_rob_property_0.vnnlib" 129 | safety_prop = False 130 | 131 | if not os.path.isfile(nn_path): 132 | raise Exception(f'Error: file {nn_path} not found!') 133 | 134 | if not os.path.isfile(prop_path): 135 | raise Exception(f'Error: file {prop_path} not found!') 136 | 137 | alt_repr = load_network_path(nn_path) 138 | 139 | if not isinstance(alt_repr, ONNXNetwork): 140 | raise Exception('The network is not an ONNX network!') 141 | 142 | network = ONNXConverter().to_neural_network(alt_repr) 143 | 144 | if not isinstance(network, networks.SequentialNetwork): 145 | raise Exception('The network is not a sequential network!') 146 | 147 | prop = VnnLibProperty(prop_path) 148 | 149 | if safety_prop: 150 | neg_post_condition(prop_path) 151 | prop = VnnLibProperty(os.path.abspath(POST_CONDITIONS_TEMP_FILE)) 152 | os.remove(POST_CONDITIONS_TEMP_FILE) 153 | 154 | lower = torch.rand(784) 155 | upper = lower + 10e-8 156 | input = HyperRectangleBounds(lower, upper) 157 | 158 | results_dict = BoundsManager(network, input_bounds=input) 159 | start_time = time.time() 160 | bounds_dict = results_dict.compute_bounds() 161 | num_bounds = bounds_dict.numeric_post_bounds[network.get_last_node().identifier] 162 | end_time = time.time() 163 | execution_time = end_time - start_time 164 | 165 | py_net = PyTorchConverter().from_neural_network(network) 166 | py_net.pytorch_network.eval() 167 | py_net.pytorch_network.float() 168 | output_nn = py_net.pytorch_network(lower.view(1, 1, 28, 28)) 169 | 170 | lower_output = bounds_dict.numeric_post_bounds[bounds_dict.identifiers[-1]].lower 171 | upper_output = bounds_dict.numeric_post_bounds[bounds_dict.identifiers[-1]].upper 172 | 173 | assert ((output_nn >= lower_output) & (output_nn <= upper_output)).all(), "Bounds errati" 174 | 175 | # Check bounds: all lower bounds must be smaller than the upper bounds 176 | for index, id in enumerate(bounds_dict.identifiers): 177 | print(f'Bounds #{index}: {id}') 178 | lower_num_bounds = bounds_dict.numeric_post_bounds[id].lower 179 | upper_num_bounds = bounds_dict.numeric_post_bounds[id].upper 180 | print(f'Dims {lower_num_bounds.shape}') 181 | assert (lower_num_bounds <= upper_num_bounds).all(), "Lower bounds greater than upper bounds" 182 | 183 | print(execution_time) 184 | 185 | exit(0) 186 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/test/generate test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "initial_id", 7 | "metadata": { 8 | "collapsed": true 9 | }, 10 | "outputs": [], 11 | "source": [ 12 | "" 13 | ] 14 | }, 15 | { 16 | "metadata": { 17 | "ExecuteTime": { 18 | "end_time": "2025-02-02T16:39:22.230582Z", 19 | "start_time": "2025-02-02T16:32:32.427576Z" 20 | } 21 | }, 22 | "cell_type": "code", 23 | "source": [ 24 | "# SCRIPT FOR FC NETWORK\n", 25 | "\n", 26 | "import torch\n", 27 | "import torch.nn as nn\n", 28 | "import torch.optim as optim\n", 29 | "import torch.onnx\n", 30 | "import torchvision.transforms as transforms\n", 31 | "import torchvision.datasets as datasets\n", 32 | "from torch.utils.data import DataLoader\n", 33 | "\n", 34 | "# Definizione del modello FC con ReLU\n", 35 | "class FCNN(nn.Module):\n", 36 | " def __init__(self):\n", 37 | " super(FCNN, self).__init__()\n", 38 | " self.flatten = nn.Flatten()\n", 39 | " self.fc1 = nn.Linear(28 * 28, 128)\n", 40 | " self.relu1 = nn.ReLU()\n", 41 | " self.fc2 = nn.Linear(128, 64)\n", 42 | " self.relu2 = nn.ReLU()\n", 43 | " self.fc3 = nn.Linear(64, 10)\n", 44 | "\n", 45 | " def forward(self, x):\n", 46 | " x = self.flatten(x)\n", 47 | " x = self.relu1(self.fc1(x))\n", 48 | " x = self.relu2(self.fc2(x))\n", 49 | " x = self.fc3(x)\n", 50 | " return x\n", 51 | "\n", 52 | "# Caricamento dataset MNIST\n", 53 | "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n", 54 | "train_dataset = datasets.MNIST(root=\"./data\", train=True, transform=transform, download=True)\n", 55 | "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n", 56 | "\n", 57 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", 58 | "\n", 59 | "# Inizializzazione del modello, loss e ottimizzatore\n", 60 | "model = FCNN().to(device)\n", 61 | "criterion = nn.CrossEntropyLoss()\n", 62 | "optimizer = optim.Adam(model.parameters(), lr=0.001)\n", 63 | "\n", 64 | "# Training loop\n", 65 | "for epoch in range(10):\n", 66 | " model.train()\n", 67 | " running_loss = 0.0\n", 68 | " for images, labels in train_loader:\n", 69 | " images, labels = images.to(device), labels.to(device)\n", 70 | " optimizer.zero_grad()\n", 71 | " outputs = model(images)\n", 72 | " loss = criterion(outputs, labels)\n", 73 | " loss.backward()\n", 74 | " optimizer.step()\n", 75 | " running_loss += loss.item()\n", 76 | " print(f\"Epoch {epoch+1}, Loss: {running_loss / len(train_loader):.4f}\")\n", 77 | "\n", 78 | "# Esportazione in ONNX\n", 79 | "dummy_input = torch.randn(1, 1, 28, 28, device=device)\n", 80 | "torch.onnx.export(model, dummy_input, \"mnist_fcnn.onnx\", input_names=[\"input\"], output_names=[\"output\"], opset_version=11)\n", 81 | "\n", 82 | "print(\"Modello esportato come mnist_fcnn.onnx\")\n" 83 | ], 84 | "id": "f72e2683eea139c2", 85 | "outputs": [ 86 | { 87 | "name": "stdout", 88 | "output_type": "stream", 89 | "text": [ 90 | "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n", 91 | "Failed to download (trying next):\n", 92 | "HTTP Error 404: Not Found\n", 93 | "\n", 94 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\n", 95 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ./data\\MNIST\\raw\\train-images-idx3-ubyte.gz\n" 96 | ] 97 | }, 98 | { 99 | "name": "stderr", 100 | "output_type": "stream", 101 | "text": [ 102 | "100.0%\n" 103 | ] 104 | }, 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "Extracting ./data\\MNIST\\raw\\train-images-idx3-ubyte.gz to ./data\\MNIST\\raw\n", 110 | "\n", 111 | "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n", 112 | "Failed to download (trying next):\n", 113 | "HTTP Error 404: Not Found\n", 114 | "\n", 115 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\n", 116 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz to ./data\\MNIST\\raw\\train-labels-idx1-ubyte.gz\n" 117 | ] 118 | }, 119 | { 120 | "name": "stderr", 121 | "output_type": "stream", 122 | "text": [ 123 | "100.0%\n" 124 | ] 125 | }, 126 | { 127 | "name": "stdout", 128 | "output_type": "stream", 129 | "text": [ 130 | "Extracting ./data\\MNIST\\raw\\train-labels-idx1-ubyte.gz to ./data\\MNIST\\raw\n", 131 | "\n", 132 | "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n", 133 | "Failed to download (trying next):\n", 134 | "HTTP Error 404: Not Found\n", 135 | "\n", 136 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\n", 137 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz to ./data\\MNIST\\raw\\t10k-images-idx3-ubyte.gz\n" 138 | ] 139 | }, 140 | { 141 | "name": "stderr", 142 | "output_type": "stream", 143 | "text": [ 144 | "100.0%\n" 145 | ] 146 | }, 147 | { 148 | "name": "stdout", 149 | "output_type": "stream", 150 | "text": [ 151 | "Extracting ./data\\MNIST\\raw\\t10k-images-idx3-ubyte.gz to ./data\\MNIST\\raw\n", 152 | "\n", 153 | "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n", 154 | "Failed to download (trying next):\n", 155 | "HTTP Error 404: Not Found\n", 156 | "\n", 157 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\n", 158 | "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz to ./data\\MNIST\\raw\\t10k-labels-idx1-ubyte.gz\n" 159 | ] 160 | }, 161 | { 162 | "name": "stderr", 163 | "output_type": "stream", 164 | "text": [ 165 | "100.0%\n" 166 | ] 167 | }, 168 | { 169 | "name": "stdout", 170 | "output_type": "stream", 171 | "text": [ 172 | "Extracting ./data\\MNIST\\raw\\t10k-labels-idx1-ubyte.gz to ./data\\MNIST\\raw\n", 173 | "\n", 174 | "Epoch 1, Loss: 0.2672\n", 175 | "Epoch 2, Loss: 0.1114\n", 176 | "Epoch 3, Loss: 0.0786\n", 177 | "Epoch 4, Loss: 0.0590\n", 178 | "Epoch 5, Loss: 0.0465\n", 179 | "Epoch 6, Loss: 0.0404\n", 180 | "Epoch 7, Loss: 0.0349\n", 181 | "Epoch 8, Loss: 0.0280\n", 182 | "Epoch 9, Loss: 0.0257\n", 183 | "Epoch 10, Loss: 0.0219\n", 184 | "Modello esportato come mnist_fcnn.onnx\n" 185 | ] 186 | } 187 | ], 188 | "execution_count": 1 189 | }, 190 | { 191 | "metadata": { 192 | "ExecuteTime": { 193 | "end_time": "2025-02-04T11:12:07.261560Z", 194 | "start_time": "2025-02-04T11:10:00.317966Z" 195 | } 196 | }, 197 | "cell_type": "code", 198 | "source": [ 199 | "import torch\n", 200 | "import torch.nn as nn\n", 201 | "import torch.optim as optim\n", 202 | "import torch.onnx\n", 203 | "import torchvision.transforms as transforms\n", 204 | "import torchvision.datasets as datasets\n", 205 | "from torch.utils.data import DataLoader\n", 206 | "\n", 207 | "# Definizione del modello FC con un layer convoluzionale iniziale\n", 208 | "class FCNN(nn.Module):\n", 209 | " def __init__(self):\n", 210 | " super(FCNN, self).__init__()\n", 211 | " self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)\n", 212 | " self.relu_conv = nn.ReLU()\n", 213 | " self.flatten = nn.Flatten()\n", 214 | " self.fc1 = nn.Linear(16 * 28 * 28, 128)\n", 215 | " self.relu1 = nn.ReLU()\n", 216 | " self.fc2 = nn.Linear(128, 64)\n", 217 | " self.relu2 = nn.ReLU()\n", 218 | " self.fc3 = nn.Linear(64, 10)\n", 219 | "\n", 220 | " def forward(self, x):\n", 221 | " x = self.relu_conv(self.conv1(x))\n", 222 | " x = self.flatten(x)\n", 223 | " x = self.relu1(self.fc1(x))\n", 224 | " x = self.relu2(self.fc2(x))\n", 225 | " x = self.fc3(x)\n", 226 | " return x\n", 227 | "\n", 228 | "# Caricamento dataset MNIST\n", 229 | "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n", 230 | "train_dataset = datasets.MNIST(root=\"./data\", train=True, transform=transform, download=True)\n", 231 | "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n", 232 | "\n", 233 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", 234 | "\n", 235 | "# Inizializzazione del modello, loss e ottimizzatore\n", 236 | "model = FCNN().to(device)\n", 237 | "criterion = nn.CrossEntropyLoss()\n", 238 | "optimizer = optim.Adam(model.parameters(), lr=0.001)\n", 239 | "\n", 240 | "# Training loop\n", 241 | "for epoch in range(10):\n", 242 | " model.train()\n", 243 | " running_loss = 0.0\n", 244 | " for images, labels in train_loader:\n", 245 | " images, labels = images.to(device), labels.to(device)\n", 246 | " optimizer.zero_grad()\n", 247 | " outputs = model(images)\n", 248 | " loss = criterion(outputs, labels)\n", 249 | " loss.backward()\n", 250 | " optimizer.step()\n", 251 | " running_loss += loss.item()\n", 252 | " print(f\"Epoch {epoch+1}, Loss: {running_loss / len(train_loader):.4f}\")\n", 253 | "\n", 254 | "# Esportazione in ONNX\n", 255 | "dummy_input = torch.randn(1, 1, 28, 28, device=device)\n", 256 | "torch.onnx.export(model, dummy_input, \"mnist_fcnn.onnx\", input_names=[\"input\"], output_names=[\"output\"], opset_version=11)\n", 257 | "\n", 258 | "print(\"Modello esportato come mnist_conv.onnx\")\n" 259 | ], 260 | "id": "17d7ab8dcbb01f6c", 261 | "outputs": [ 262 | { 263 | "name": "stdout", 264 | "output_type": "stream", 265 | "text": [ 266 | "Epoch 1, Loss: 0.2075\n", 267 | "Epoch 2, Loss: 0.0666\n", 268 | "Epoch 3, Loss: 0.0410\n", 269 | "Epoch 4, Loss: 0.0281\n", 270 | "Epoch 5, Loss: 0.0203\n", 271 | "Epoch 6, Loss: 0.0157\n", 272 | "Epoch 7, Loss: 0.0115\n", 273 | "Epoch 8, Loss: 0.0092\n", 274 | "Epoch 9, Loss: 0.0090\n", 275 | "Epoch 10, Loss: 0.0074\n", 276 | "Modello esportato come mnist_fcnn.onnx\n" 277 | ] 278 | } 279 | ], 280 | "execution_count": 2 281 | }, 282 | { 283 | "metadata": { 284 | "ExecuteTime": { 285 | "end_time": "2025-02-04T13:06:46.879458Z", 286 | "start_time": "2025-02-04T13:04:21.072995Z" 287 | } 288 | }, 289 | "cell_type": "code", 290 | "source": [ 291 | "import torch\n", 292 | "import torch.nn as nn\n", 293 | "import torch.optim as optim\n", 294 | "import torch.onnx\n", 295 | "import torchvision.transforms as transforms\n", 296 | "import torchvision.datasets as datasets\n", 297 | "from torch.utils.data import DataLoader\n", 298 | "\n", 299 | "# Definizione del modello con due layer convoluzionali\n", 300 | "class FCNN(nn.Module):\n", 301 | " def __init__(self):\n", 302 | " super(FCNN, self).__init__()\n", 303 | " self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)\n", 304 | " self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)\n", 305 | " self.relu2 = nn.ReLU()\n", 306 | " self.flatten = nn.Flatten()\n", 307 | " self.fc1 = nn.Linear(32 * 28 * 28, 128)\n", 308 | " self.relu3 = nn.ReLU()\n", 309 | " self.fc2 = nn.Linear(128, 64)\n", 310 | " self.relu4 = nn.ReLU()\n", 311 | " self.fc3 = nn.Linear(64, 10)\n", 312 | "\n", 313 | " def forward(self, x):\n", 314 | " x = self.conv1(x)\n", 315 | " x = self.relu2(self.conv2(x))\n", 316 | " x = self.flatten(x)\n", 317 | " x = self.relu3(self.fc1(x))\n", 318 | " x = self.relu4(self.fc2(x))\n", 319 | " x = self.fc3(x)\n", 320 | " return x\n", 321 | "\n", 322 | "# Caricamento dataset MNIST\n", 323 | "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n", 324 | "train_dataset = datasets.MNIST(root=\"./data\", train=True, transform=transform, download=True)\n", 325 | "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n", 326 | "\n", 327 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", 328 | "\n", 329 | "# Inizializzazione del modello, loss e ottimizzatore\n", 330 | "model = FCNN().to(device)\n", 331 | "criterion = nn.CrossEntropyLoss()\n", 332 | "optimizer = optim.Adam(model.parameters(), lr=0.001)\n", 333 | "\n", 334 | "# Training loop\n", 335 | "for epoch in range(10):\n", 336 | " model.train()\n", 337 | " running_loss = 0.0\n", 338 | " for images, labels in train_loader:\n", 339 | " images, labels = images.to(device), labels.to(device)\n", 340 | " optimizer.zero_grad()\n", 341 | " outputs = model(images)\n", 342 | " loss = criterion(outputs, labels)\n", 343 | " loss.backward()\n", 344 | " optimizer.step()\n", 345 | " running_loss += loss.item()\n", 346 | " print(f\"Epoch {epoch+1}, Loss: {running_loss / len(train_loader):.4f}\")\n", 347 | "\n", 348 | "# Esportazione in ONNX\n", 349 | "dummy_input = torch.randn(1, 1, 28, 28, device=device)\n", 350 | "torch.onnx.export(model, dummy_input, \"mnist_fcnn_double_conv.onnx\", input_names=[\"input\"], output_names=[\"output\"], opset_version=11)\n", 351 | "\n", 352 | "print(\"Modello esportato come mnist_fcnn_double_conv.onnx\")" 353 | ], 354 | "id": "5faf4647dea7936e", 355 | "outputs": [ 356 | { 357 | "name": "stdout", 358 | "output_type": "stream", 359 | "text": [ 360 | "Epoch 1, Loss: 0.1537\n", 361 | "Epoch 2, Loss: 0.0458\n", 362 | "Epoch 3, Loss: 0.0287\n", 363 | "Epoch 4, Loss: 0.0192\n", 364 | "Epoch 5, Loss: 0.0154\n", 365 | "Epoch 6, Loss: 0.0110\n", 366 | "Epoch 7, Loss: 0.0116\n", 367 | "Epoch 8, Loss: 0.0097\n", 368 | "Epoch 9, Loss: 0.0082\n", 369 | "Epoch 10, Loss: 0.0062\n", 370 | "Modello esportato come mnist_fcnn_double_conv.onnx\n" 371 | ] 372 | } 373 | ], 374 | "execution_count": 4 375 | } 376 | ], 377 | "metadata": { 378 | "kernelspec": { 379 | "display_name": "Python 3", 380 | "language": "python", 381 | "name": "python3" 382 | }, 383 | "language_info": { 384 | "codemirror_mode": { 385 | "name": "ipython", 386 | "version": 2 387 | }, 388 | "file_extension": ".py", 389 | "mimetype": "text/x-python", 390 | "name": "python", 391 | "nbconvert_exporter": "python", 392 | "pygments_lexer": "ipython2", 393 | "version": "2.7.6" 394 | } 395 | }, 396 | "nbformat": 4, 397 | "nbformat_minor": 5 398 | } 399 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/test/generate_property.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "id": "initial_id", 6 | "metadata": { 7 | "collapsed": true, 8 | "ExecuteTime": { 9 | "end_time": "2025-02-03T09:11:25.730558Z", 10 | "start_time": "2025-02-03T09:11:25.629797Z" 11 | } 12 | }, 13 | "source": [ 14 | "import os\n", 15 | "import numpy as np\n", 16 | "\n", 17 | "\n", 18 | "def generate_lc_props(eps_noise: float, delta_tol: float, io_pairs: list, folder_path: str):\n", 19 | " # Property: x_i - eps_noise <= X_i <= x_i + eps_noise\n", 20 | " # y_j - delta_tol <= Y_j <= y_j + delta_tol\n", 21 | "\n", 22 | " # generate folder for properties if it doesn't exist\n", 23 | " os.makedirs(folder_path, exist_ok=True)\n", 24 | "\n", 25 | " i = 0\n", 26 | " for pair in io_pairs:\n", 27 | " if isinstance(pair[0], np.ndarray):\n", 28 | " pair_0 = pair[0].tolist()\n", 29 | " elif isinstance(pair[0], list):\n", 30 | " pair_0 = pair[0]\n", 31 | " else:\n", 32 | " raise ValueError(\"Input sample must be either numpy array or list.\")\n", 33 | "\n", 34 | " n_inputs = len(pair_0)\n", 35 | " n_outputs = len(pair[1])\n", 36 | "\n", 37 | " with open(f'{folder_path}/loc_rob_property_{i}.vnnlib', 'w') as prop_file:\n", 38 | " for n in range(n_inputs):\n", 39 | " prop_file.write(f'(declare-const X_{n} Real)\\n')\n", 40 | " prop_file.write('\\n')\n", 41 | "\n", 42 | " for n in range(n_outputs):\n", 43 | " prop_file.write(f'(declare-const Y_{n} Real)\\n')\n", 44 | " prop_file.write('\\n')\n", 45 | "\n", 46 | " for n in range(n_inputs):\n", 47 | " prop_file.write(f'(assert (>= X_{n} {pair[0][n] - eps_noise}))\\n')\n", 48 | " prop_file.write(f'(assert (<= X_{n} {pair[0][n] + eps_noise}))\\n')\n", 49 | " prop_file.write('\\n')\n", 50 | "\n", 51 | " for n in range(n_outputs):\n", 52 | " prop_file.write(f'(assert (>= Y_{n} {pair[1][n] - delta_tol}))\\n')\n", 53 | " prop_file.write(f'(assert (<= Y_{n} {pair[1][n] + delta_tol}))\\n')\n", 54 | "\n", 55 | " i += 1\n" 56 | ], 57 | "outputs": [], 58 | "execution_count": 1 59 | }, 60 | { 61 | "metadata": { 62 | "ExecuteTime": { 63 | "end_time": "2025-02-03T09:12:45.910438Z", 64 | "start_time": "2025-02-03T09:12:45.903437Z" 65 | } 66 | }, 67 | "cell_type": "code", 68 | "source": [ 69 | "input = np.ones(28*28) * 0.5\n", 70 | "eps_noise = 0.5\n", 71 | "output = np.ones(10)\n", 72 | "delta_tol = 0.6\n", 73 | "io_pairs = []\n", 74 | "io_pairs.append((input, output))\n", 75 | "\n", 76 | "generate_lc_props(eps_noise, delta_tol, io_pairs, \"test\")\n", 77 | "\n" 78 | ], 79 | "id": "2466b6325f15494a", 80 | "outputs": [], 81 | "execution_count": 3 82 | } 83 | ], 84 | "metadata": { 85 | "kernelspec": { 86 | "display_name": "Python 3", 87 | "language": "python", 88 | "name": "python3" 89 | }, 90 | "language_info": { 91 | "codemirror_mode": { 92 | "name": "ipython", 93 | "version": 2 94 | }, 95 | "file_extension": ".py", 96 | "mimetype": "text/x-python", 97 | "name": "python", 98 | "nbconvert_exporter": "python", 99 | "pygments_lexer": "ipython2", 100 | "version": "2.7.6" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 5 105 | } 106 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/test/test/intermediate.vnnlib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/abstraction/bounds_propagation/test/test/intermediate.vnnlib -------------------------------------------------------------------------------- /pynever/strategies/abstraction/bounds_propagation/util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | 4 | from pynever.strategies.abstraction import ABSTRACTION_PRECISION_GUARD 5 | from pynever.strategies.abstraction.bounds_propagation import ReLUStatus 6 | from pynever.strategies.abstraction.bounds_propagation.bounds import AbstractBounds 7 | from pynever.strategies.verification.statistics import VerboseBounds 8 | 9 | 10 | def check_stable(lb: float, ub: float) -> ReLUStatus: 11 | """Return the status of a ReLU neuron given the values of the lower and upper pre-activation bounds""" 12 | 13 | # Positive stable 14 | if lb >= ABSTRACTION_PRECISION_GUARD: 15 | return ReLUStatus.ACTIVE 16 | 17 | # Negative stable 18 | elif ub <= -ABSTRACTION_PRECISION_GUARD: 19 | return ReLUStatus.INACTIVE 20 | 21 | # Unstable 22 | else: 23 | return ReLUStatus.UNSTABLE 24 | 25 | 26 | def compute_lower(weights_minus: Tensor, weights_plus: Tensor, input_lower: Tensor, input_upper: Tensor) -> Tensor: 27 | """Procedure that computes the matrix of coefficients for a lower bounds linear function. 28 | 29 | Parameters 30 | ---------- 31 | weights_minus: Tensor 32 | The negative part of the weights 33 | weights_plus: Tensor 34 | The positive part of the weights 35 | input_lower: Tensor 36 | The lower input bounds 37 | input_upper: Tensor 38 | The upper input bounds 39 | 40 | Returns 41 | ----------- 42 | Tensor 43 | The lower bounds matrix 44 | """ 45 | return torch.matmul(weights_plus, input_lower) + torch.matmul(weights_minus, input_upper) 46 | 47 | 48 | def compute_upper(weights_minus: Tensor, weights_plus: Tensor, input_lower: Tensor, input_upper: Tensor) -> Tensor: 49 | """Procedure that computes the matrix of coefficients for an upper bounds linear function. 50 | 51 | Parameters 52 | ---------- 53 | weights_minus: Tensor 54 | The negative part of the weights 55 | weights_plus: Tensor 56 | The positive part of the weights 57 | input_lower: Tensor 58 | The lower input bounds 59 | input_upper: Tensor 60 | The upper input bounds 61 | 62 | Returns 63 | ----------- 64 | Tensor 65 | The upper bounds matrix 66 | """ 67 | return torch.matmul(weights_plus, input_upper) + torch.matmul(weights_minus, input_lower) 68 | 69 | 70 | def compute_max(weights: Tensor, input_bounds: AbstractBounds) -> Tensor: 71 | """Procedure that computes the maximum value of a weights matrix given some input bounds 72 | 73 | Parameters 74 | ---------- 75 | weights: Tensor 76 | The weights matrix 77 | input_bounds: AbstractBounds 78 | The input bounds 79 | 80 | Returns 81 | ---------- 82 | Tensor 83 | The computed output 84 | """ 85 | return compute_upper(torch.clamp(weights, max=0), torch.clamp(weights, min=0), 86 | input_bounds.get_lower(), input_bounds.get_upper()) 87 | 88 | 89 | def compute_min(weights: Tensor, input_bounds: AbstractBounds) -> Tensor: 90 | """Procedure that computes the minimum value of a weights matrix given some input bounds 91 | 92 | Parameters 93 | ---------- 94 | weights: Tensor 95 | The weights matrix 96 | input_bounds: AbstractBounds 97 | The input bounds 98 | 99 | Returns 100 | ---------- 101 | Tensor 102 | The computed output 103 | """ 104 | return compute_lower(torch.clamp(weights, max=0), torch.clamp(weights, min=0), 105 | input_bounds.get_lower(), input_bounds.get_upper()) 106 | 107 | 108 | def compute_overapproximation_volume(areas_map: dict) -> float: 109 | """Procedure that computes the volume of the approximation as the product of the areas. 110 | """ 111 | return torch.prod(Tensor(list(areas_map.values()))).item() 112 | 113 | 114 | def compute_layer_inactive_from_bounds_and_fixed_neurons(bounds: VerboseBounds, 115 | fixed_neurons: dict, 116 | layer_id: str) -> list[int]: 117 | """Procedure to obtain the inactive neurons for a given layer based on bounds and information about neurons. 118 | 119 | Parameters 120 | ---------- 121 | bounds: VerboseBounds 122 | The bounds information 123 | fixed_neurons: dict 124 | The fixed neurons so far 125 | layer_id: str 126 | The layer id 127 | 128 | Returns 129 | ---------- 130 | list[int] 131 | The list of computed inactive neurons and fixed inactive neurons in the layer 132 | """ 133 | return (bounds.statistics.stability_info[ReLUStatus.INACTIVE][layer_id] + 134 | [i for (lay_id, i), value in fixed_neurons.items() if lay_id == layer_id and value == 0]) 135 | 136 | 137 | def compute_layer_unstable_from_bounds_and_fixed_neurons(bounds: VerboseBounds, 138 | fixed_neurons: dict, 139 | layer_id: str) -> list[int]: 140 | """Procedure to obtain the unstable neurons for a given layer based on bounds and information about neurons. 141 | 142 | Parameters 143 | ---------- 144 | bounds: VerboseBounds 145 | The bounds information 146 | fixed_neurons: dict 147 | The fixed neurons so far 148 | layer_id: str 149 | The layer id 150 | 151 | Returns 152 | ---------- 153 | list[int] 154 | The list of computed unstable neurons and fixed unstable neurons in the layer 155 | """ 156 | layer_unstable = [] 157 | for lay_id, neurons in bounds.statistics.stability_info[ReLUStatus.UNSTABLE].items(): 158 | if lay_id == layer_id: 159 | layer_unstable.extend(neurons) 160 | 161 | return [neuron_n for neuron_n in layer_unstable if (layer_id, neuron_n) not in fixed_neurons] 162 | 163 | 164 | def compute_unstable_from_bounds_and_fixed_neurons(bounds: VerboseBounds, fixed_neurons: dict) -> list[int]: 165 | """Procedure to obtain the overall unstable neurons based on bounds and information about neurons. 166 | 167 | Parameters 168 | ---------- 169 | bounds: VerboseBounds 170 | The bounds information 171 | fixed_neurons: dict 172 | The fixed neurons so far 173 | 174 | Returns 175 | ---------- 176 | list[int] 177 | The list of overall computed unstable neurons and fixed unstable neurons 178 | """ 179 | unstable = bounds.statistics.stability_info[ReLUStatus.UNSTABLE] 180 | return [neuron for neuron in unstable if neuron not in fixed_neurons] 181 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/linearfunctions.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | import torch 4 | 5 | 6 | class LinearFunctions: 7 | """ 8 | A class representing a set of n linear functions f(i) of m input variables x 9 | 10 | matrix is an (n x m) Tensor 11 | offset is an (n) Tensor 12 | 13 | f(i) = matrix[i]*x + offset[i] 14 | 15 | """ 16 | 17 | def __init__(self, matrix: torch.Tensor, offset: torch.Tensor): 18 | self.size = matrix.shape[0] 19 | self.matrix = matrix 20 | self.offset = offset 21 | 22 | def __repr__(self): 23 | return "LinearFunctions({})".format(self.size) 24 | 25 | def clone(self): 26 | return LinearFunctions(copy.deepcopy(self.matrix), copy.deepcopy(self.offset)) 27 | 28 | def mask_zero_outputs(self, zero_outputs): 29 | mask = torch.diag( 30 | torch.Tensor([0 if neuron_n in zero_outputs else 1 for neuron_n in range(self.size)]) 31 | ) 32 | return LinearFunctions(torch.matmul(mask, self.matrix), torch.matmul(mask, self.offset)) 33 | 34 | def get_size(self) -> int: 35 | return self.size 36 | 37 | def get_matrix(self) -> torch.Tensor: 38 | return self.matrix 39 | 40 | def get_offset(self) -> torch.Tensor: 41 | return self.offset 42 | 43 | def compute_max_values(self, input_bounds) -> torch.Tensor: 44 | return torch.matmul(torch.clamp(self.matrix, min=0), input_bounds.get_upper()) + \ 45 | torch.matmul(torch.clamp(self.matrix, max=0), input_bounds.get_lower()) + \ 46 | self.offset 47 | 48 | def compute_min_values(self, input_bounds) -> torch.Tensor: 49 | return torch.matmul(torch.clamp(self.matrix, min=0), input_bounds.get_lower()) + \ 50 | torch.matmul(torch.clamp(self.matrix, max=0), input_bounds.get_upper()) + \ 51 | self.offset 52 | -------------------------------------------------------------------------------- /pynever/strategies/abstraction/networks.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import copy 3 | import time 4 | 5 | import pynever.strategies.abstraction.nodes as absnodes 6 | from pynever import networks, nodes 7 | from pynever.strategies.abstraction import LOGGER_LAYER 8 | from pynever.strategies.abstraction.bounds_propagation.bounds import HyperRectangleBounds 9 | from pynever.strategies.abstraction.star import AbsElement 10 | from pynever.strategies.verification.parameters import VerificationParameters 11 | 12 | 13 | # TODO update documentation 14 | 15 | 16 | class AbsNeuralNetwork(abc.ABC): 17 | """ 18 | An abstract class used for our internal representation of a generic NeuralNetwork for Abstract Interpretation. 19 | It consists of a graph of AbsLayerNodes. The properties of the computational graph are specialized in the 20 | concrete classes. The method forward and backward calls the corresponding methods in the AbsLayerNodes following the 21 | correct order to compute the output AbsElement. 22 | 23 | Attributes 24 | ---------- 25 | nodes : dict 26 | Dictionary containing str keys and AbsLayerNodes values. It contains the nodes of the graph, 27 | the identifier of the node of interest is used as a key in the nodes dictionary. 28 | 29 | Methods 30 | ---------- 31 | forward(AbsElement) 32 | Procedure which takes an AbsElement and computes the corresponding output AbsElement based on the AbsLayerNode 33 | of the network. 34 | 35 | """ 36 | 37 | corresponding_classes = { 38 | 'FullyConnectedNode': absnodes.AbsFullyConnectedNode, 39 | 'ConvNode': absnodes.AbsConvNode, 40 | 'ReshapeNode': absnodes.AbsReshapeNode, 41 | 'FlattenNode': absnodes.AbsFlattenNode, 42 | 'ReLUNode': absnodes.AbsReLUNode, 43 | 'ConcatNode': absnodes.AbsConcatNode, 44 | 'SumNode': absnodes.AbsSumNode, 45 | } 46 | 47 | def __init__(self, ref_network: networks.NeuralNetwork, parameters: VerificationParameters): 48 | self.nodes: dict[str, absnodes.AbsLayerNode] = {} 49 | self.ref_network = ref_network 50 | 51 | for node_id, node in ref_network.nodes.items(): 52 | self.nodes[f'ABS_{node_id}'] = AbsNeuralNetwork.__get_abstract_node_class(node)(f'ABS_{node_id}', 53 | node, parameters) 54 | 55 | @staticmethod 56 | def __get_abstract_node_class(node: nodes.ConcreteLayerNode) -> absnodes.AbsLayerNode.__class__: 57 | try: 58 | return AbsNeuralNetwork.corresponding_classes[type(node).__name__] 59 | except KeyError: 60 | raise Exception(f'Node {type(node).__name__} is not supported') 61 | 62 | @abc.abstractmethod 63 | def forward(self, abs_input: AbsElement | list[AbsElement]) -> AbsElement | list[AbsElement]: 64 | """ 65 | Compute the output AbsElement based on the input AbsElement and the characteristics of the 66 | concrete abstract transformers. 67 | 68 | Parameters 69 | ---------- 70 | abs_input : AbsElement 71 | The input abstract element. 72 | 73 | Returns 74 | ---------- 75 | AbsElement 76 | The AbsElement resulting from the computation corresponding to the abstract transformer. 77 | """ 78 | raise NotImplementedError 79 | 80 | def get_abstract(self, node: nodes.ConcreteLayerNode, abs_id: bool = True) -> absnodes.AbsLayerNode: 81 | node = self.nodes[f'ABS_{node.identifier}'] 82 | node.identifier = node.identifier.replace('ABS_', '') if not abs_id else node.identifier 83 | 84 | return node 85 | 86 | def get_concrete(self, absnode: absnodes.AbsLayerNode) -> nodes.ConcreteLayerNode: 87 | return self.ref_network.nodes[absnode.identifier.replace('ABS_', '', 1)] 88 | 89 | 90 | class AbsSeqNetwork(AbsNeuralNetwork): 91 | """ 92 | Concrete children of AbsNeuralNetwork representing a sequential AbsNeuralNetwork. 93 | It consists of a graph of LayerNodes. The computational graph of a SequentialNetwork must 94 | correspond to a standard list. The method forward and backward calls the corresponding methods 95 | in the AbsLayerNodes following the correct order to compute the output AbsElement. 96 | 97 | Methods 98 | ------- 99 | forward(AbsElement) 100 | Procedure which takes an AbsElement and computes the corresponding output AbsElement based on the AbsLayerNode 101 | of the network. 102 | 103 | """ 104 | 105 | def __init__(self, ref_network: networks.SequentialNetwork, parameters: VerificationParameters): 106 | super().__init__(ref_network, parameters) 107 | self.ref_network = ref_network 108 | self.bounds = None 109 | 110 | def set_bounds(self, layers_bounds: dict[str, HyperRectangleBounds]): 111 | self.bounds = layers_bounds 112 | 113 | def forward(self, abs_input: AbsElement) -> AbsElement: 114 | """ 115 | Compute the output AbsElement based on the input AbsElement and the characteristics of the 116 | concrete abstract transformers. 117 | 118 | Parameters 119 | ---------- 120 | abs_input : AbsElement 121 | The input abstract element. 122 | 123 | Returns 124 | ---------- 125 | AbsElement 126 | The AbsElement resulting from the computation corresponding to the abstract transformer. 127 | """ 128 | 129 | current_node = self.get_abstract(self.ref_network.get_first_node()) 130 | 131 | while current_node is not None: 132 | time_start = time.perf_counter() 133 | 134 | if self.bounds: 135 | identifier = current_node.identifier.replace('ABS_', '') 136 | abs_input = current_node.forward_star(abs_input, self.bounds[identifier]) 137 | else: 138 | abs_input = current_node.forward_star(abs_input) 139 | 140 | time_end = time.perf_counter() 141 | 142 | LOGGER_LAYER.info(f"Computing starset for layer {current_node.identifier}. Current starset has dimension " 143 | f"{len(abs_input.stars)}. Time to compute: {time_end - time_start}s.") 144 | 145 | next_node = self.ref_network.get_next_node(self.get_concrete(current_node)) 146 | current_node = self.get_abstract(next_node) if next_node is not None else None 147 | 148 | return abs_input 149 | 150 | 151 | class AbsAcyclicNetwork(AbsNeuralNetwork): 152 | 153 | def __init__(self, ref_network: networks.AcyclicNetwork, parameters: VerificationParameters): 154 | super().__init__(ref_network, parameters) 155 | self.ref_network = ref_network 156 | self.input_ids: dict[str, str | None] = {k: self.get_abstract(v).identifier 157 | for k, v in self.ref_network.input_ids} 158 | 159 | def get_node_inputs(self, node: absnodes.AbsLayerNode) -> list[str]: 160 | c_node = self.get_concrete(node) 161 | 162 | if not self.ref_network.has_parents(c_node): 163 | return [k for k, v in self.input_ids.items() if node.identifier in v] 164 | 165 | return [self.get_abstract(parent).identifier for parent in self.ref_network.get_parents(c_node)] 166 | 167 | def forward(self, abs_inputs: list[AbsElement]) -> list[AbsElement]: 168 | """ 169 | Compute the output AbsElement based on the input AbsElement and the characteristics of the 170 | concrete abstract transformers. 171 | 172 | Parameters 173 | ---------- 174 | abs_inputs : List[AbsElement] 175 | The input abstract element. 176 | 177 | Returns 178 | ---------- 179 | List[AbsElement] 180 | The AbsElements resulting from the computation corresponding to the abstract transformer. 181 | """ 182 | 183 | abs_input_ids = [abs_elem.identifier for abs_elem in abs_inputs] 184 | 185 | if set(abs_input_ids) != set(self.input_ids.keys()): 186 | raise Exception("The IDs of the Abstract Elements do not corresponds to the Keys of the Input Edges Dict!") 187 | 188 | if None in self.input_ids.values(): 189 | raise Exception("Every Input in the should be associated to a Node") 190 | 191 | node_queue: list[absnodes.AbsLayerNode] = [self.get_abstract(n) for n in self.ref_network.get_roots()] 192 | temp_abs_inputs = copy.deepcopy(abs_inputs) 193 | 194 | while len(node_queue) != 0: 195 | 196 | current_node = node_queue.pop(0) 197 | input_ids = self.get_node_inputs(current_node) 198 | 199 | current_node_inputs = filter(lambda i: i.identifier in input_ids, temp_abs_inputs) 200 | 201 | current_abs_output = current_node.forward_star(current_node_inputs) 202 | 203 | current_abs_output.identifier = current_node.identifier 204 | temp_abs_inputs.append(current_abs_output) 205 | 206 | current_children = [self.get_abstract(n) for n in 207 | self.ref_network.get_children(self.get_concrete(current_node))] 208 | for child in current_children: 209 | if child not in node_queue: 210 | node_queue.append(child) 211 | 212 | leaves_ids = [self.get_abstract(leaf).identifier for leaf in self.ref_network.get_leaves()] 213 | final_outputs = filter(lambda fo: fo.identifier in leaves_ids, temp_abs_inputs) 214 | 215 | return final_outputs 216 | -------------------------------------------------------------------------------- /pynever/strategies/conversion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/conversion/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/conversion/converters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/conversion/converters/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/conversion/representation.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import copy 3 | import os 4 | from abc import abstractmethod 5 | 6 | import onnx 7 | import torch 8 | from multipledispatch import dispatch 9 | 10 | import pynever.networks as networks 11 | 12 | 13 | class AlternativeRepresentation(abc.ABC): 14 | """ 15 | An abstract class used to represent an alternative representation for a neural network. 16 | 17 | Attributes 18 | ---------- 19 | identifier: str 20 | identifier for the alternative representation 21 | 22 | """ 23 | 24 | def __init__(self, path: str, identifier: str | None = None): 25 | self.path = path 26 | self.identifier = identifier 27 | 28 | if identifier is None: 29 | self.identifier = '.'.join(os.path.basename(path).split('.')[:-1]) 30 | 31 | @abstractmethod 32 | def save(self, new_path: str): 33 | raise NotImplementedError 34 | 35 | 36 | class ONNXNetwork(AlternativeRepresentation): 37 | """ 38 | A class used to represent a ONNX representation for a neural network. 39 | 40 | Attributes 41 | ---------- 42 | onnx_network: onnx.ModelProto 43 | Real ONNX network. 44 | 45 | """ 46 | 47 | @dispatch(str) 48 | def __init__(self, path: str): 49 | super().__init__(path) 50 | 51 | try: 52 | self.onnx_network = onnx.load(self.path) 53 | except Exception: 54 | raise ValueError('Incorrect file for ONNX network') 55 | 56 | @dispatch(str, onnx.ModelProto) 57 | def __init__(self, identifier: str, model: onnx.ModelProto): 58 | super().__init__(f'{identifier}.onnx', identifier) 59 | 60 | self.onnx_network = copy.deepcopy(model) 61 | 62 | def save(self, new_path: str): 63 | onnx.save(self.onnx_network, new_path) 64 | 65 | 66 | class PyTorchNetwork(AlternativeRepresentation): 67 | """ 68 | A class used to represent a PyTorch representation for a neural network. 69 | 70 | Attributes 71 | ---------- 72 | pytorch_network: torch.nn.Module 73 | Real PyTorch network. 74 | 75 | """ 76 | 77 | @dispatch(str) 78 | def __init__(self, path: str): 79 | super().__init__(path) 80 | 81 | try: 82 | self.pytorch_network = torch.load(self.path) 83 | except Exception: 84 | raise ValueError('Incorrect file for PyTorch network') 85 | 86 | @dispatch(str, torch.nn.Module) 87 | def __init__(self, identifier: str, model: torch.nn.Module): 88 | super().__init__(f'{identifier}.pt', identifier) 89 | 90 | self.pytorch_network = copy.deepcopy(model) 91 | 92 | def save(self, new_path: str): 93 | torch.save(self.pytorch_network, new_path) 94 | 95 | 96 | class ConversionStrategy(abc.ABC): 97 | """ 98 | An abstract class used to represent a Conversion Strategy. 99 | """ 100 | 101 | @abc.abstractmethod 102 | def from_neural_network(self, network: networks.NeuralNetwork) -> AlternativeRepresentation: 103 | """ 104 | Convert the neural network of interest to an alternative representation determined in the concrete children. 105 | 106 | Parameters 107 | ---------- 108 | network: NeuralNetwork 109 | The neural network to convert. 110 | 111 | Returns 112 | ---------- 113 | AlternativeRepresentation 114 | The alternative representation resulting from the conversion of the original network. 115 | """ 116 | raise NotImplementedError 117 | 118 | @abc.abstractmethod 119 | def to_neural_network(self, alt_rep: AlternativeRepresentation) -> networks.NeuralNetwork: 120 | """ 121 | Convert the alternative representation of interest to the internal one. 122 | 123 | Parameters 124 | ---------- 125 | alt_rep: AlternativeRepresentation 126 | The Alternative Representation to convert. 127 | 128 | Returns 129 | ---------- 130 | NeuralNetwork 131 | The Neural Network resulting from the conversion of Alternative Representation. 132 | """ 133 | raise NotImplementedError 134 | 135 | 136 | def load_network_path(path: str) -> AlternativeRepresentation: 137 | """ 138 | Method to load a network from a path in an Alternative Representation. 139 | 140 | Parameters 141 | ---------- 142 | path: str 143 | Path to the network. 144 | 145 | Returns 146 | ------- 147 | Optional[AlternativeRepresentation] 148 | The AlternativeRepresentation object if the network is supported, None otherwise. 149 | 150 | """ 151 | 152 | extension = path.split('.')[-1] 153 | 154 | if extension in ['pt', 'pth']: 155 | return PyTorchNetwork(path) 156 | 157 | if extension == 'onnx': 158 | return ONNXNetwork(path) 159 | 160 | raise Exception('Only PyTorch and ONNX are currently supported') 161 | -------------------------------------------------------------------------------- /pynever/strategies/verification/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | VERIFICATION_LOGGER = logging.getLogger("pynever.strategies.verification") 4 | -------------------------------------------------------------------------------- /pynever/strategies/verification/parameters.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | from pynever.strategies.verification.ssbp.constants import ( 4 | RefinementStrategy, 5 | BoundsBackend, 6 | IntersectionStrategy, 7 | BoundsDirection 8 | ) 9 | 10 | 11 | # TODO add documentation 12 | 13 | class VerificationParameters(abc.ABC): 14 | pass 15 | 16 | 17 | class SSLPVerificationParameters(VerificationParameters): 18 | def __init__(self, heuristic: str = 'complete', neurons_to_refine: list | None = None): 19 | if heuristic not in ['overapprox', 'complete', 'mixed']: 20 | raise Exception(f'Selected heuristic {heuristic} is not valid') 21 | 22 | if heuristic == 'mixed' and neurons_to_refine is None: 23 | raise Exception('neurons_to_refine cannot be None if heuristic is mixed') 24 | 25 | self.heuristic = heuristic 26 | self.neurons_to_refine = neurons_to_refine 27 | self.compute_areas = True if self.heuristic == 'mixed' else False 28 | 29 | 30 | class SSBPVerificationParameters(VerificationParameters): 31 | def __init__(self, 32 | heuristic: RefinementStrategy = RefinementStrategy.INPUT_BOUNDS_CHANGE, 33 | bounds: BoundsBackend = BoundsBackend.SYMBOLIC, 34 | bounds_direction: BoundsDirection = BoundsDirection.FORWARDS, 35 | intersection: IntersectionStrategy = IntersectionStrategy.ADAPTIVE, 36 | timeout: int = 60): 37 | self.heuristic = heuristic 38 | self.bounds = bounds 39 | self.bounds_direction = bounds_direction 40 | self.intersection = intersection 41 | self.timeout = timeout 42 | -------------------------------------------------------------------------------- /pynever/strategies/verification/properties.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines neural network verification properties. The most general representation 3 | defines a single input property expressed as a linear combination of input variables, while 4 | the output property is a list of linear inequalities. 5 | """ 6 | from fractions import Fraction 7 | 8 | import torch 9 | 10 | import pynever.strategies.smt_reading as reading 11 | from pynever.exceptions import InvalidDimensionError 12 | from pynever.strategies.abstraction.bounds_propagation.bounds import HyperRectangleBounds 13 | from pynever.strategies.abstraction.star import Star 14 | 15 | 16 | # TODO handle sets of properties? 17 | # TODO specify convex properties 18 | 19 | class NeverProperty: 20 | """ 21 | An abstract class used to represent a generic property for a :class:`~pynever.networks.NeuralNetwork`. 22 | 23 | Attributes 24 | ---------- 25 | in_coef_mat: torch.Tensor 26 | Matrix of the coefficients for the input constraints. 27 | in_bias_mat: torch.Tensor 28 | Matrix of the biases for the input constraints. 29 | out_coef_mat: list[torch.Tensor] 30 | Matrices of the coefficients for the output constraints. 31 | out_bias_mat: list[torch.Tensor] 32 | Matrices of the biases for the output constraints. 33 | """ 34 | 35 | def __init__(self, in_coef_mat: torch.Tensor = None, in_bias_mat: torch.Tensor = None, 36 | out_coef_mat: list[torch.Tensor] = None, out_bias_mat: list[torch.Tensor] = None): 37 | self.in_coef_mat = in_coef_mat 38 | self.in_bias_mat = in_bias_mat 39 | self.out_coef_mat = out_coef_mat 40 | self.out_bias_mat = out_bias_mat 41 | 42 | def to_numeric_bounds(self) -> HyperRectangleBounds: 43 | """ 44 | This method creates a :class:`~pynever.strategies.abstraction.bounds_propagation.bounds.HyperRectangleBounds` 45 | object from the property specification. 46 | If the property is already a hyper rectangle it just initializes the object, otherwise 47 | it returns the hyper rectangle approximation of the input property. 48 | 49 | Returns 50 | ------- 51 | HyperRectangleBounds 52 | The hyper rectangle approximation of the input property 53 | """ 54 | # TODO approximate if not already a hyperrectangle 55 | lbs = [] 56 | ubs = [] 57 | 58 | for i in range(self.in_bias_mat.shape[0]): 59 | if 1 in self.in_coef_mat[i, :]: 60 | lbs.append(self.in_bias_mat[i, :]) 61 | else: 62 | ubs.append(self.in_bias_mat[i, :]) 63 | 64 | # debug 65 | assert len(lbs) == len(ubs) == self.in_bias_mat.shape[0] // 2 66 | 67 | return HyperRectangleBounds(torch.Tensor(lbs), torch.Tensor(ubs)) 68 | 69 | def to_star(self) -> Star: 70 | """ 71 | This method creates the input star based on the property specification 72 | 73 | Returns 74 | ------- 75 | Star 76 | The input star 77 | """ 78 | return Star(self.in_coef_mat, self.in_bias_mat) 79 | 80 | def to_smt_file(self, filepath: str, input_id: str = 'X', output_id: str = 'Y'): 81 | """ 82 | This method builds the SMT-LIB representation of the :class:`~pynever.strategies.verification.properties.NeVerProperty`, 83 | expressing the variables and the matrices as constraints in the corresponding logic 84 | 85 | Parameters 86 | ---------- 87 | input_id: str, Optional 88 | Identifier of the input node (default: 'X') 89 | output_id: str, Optional 90 | Identifier of the output node (default: 'Y') 91 | filepath: str 92 | Path to the SMT-LIB file to create 93 | """ 94 | with open(filepath, 'w+') as f: 95 | # Variables definition 96 | input_vars = [f"{input_id}_{i}" for i in range(self.in_coef_mat.shape[1])] 97 | if self.out_coef_mat: 98 | output_vars = [f"{output_id}_{i}" for i in range(self.out_coef_mat[0].shape[1])] 99 | else: 100 | output_vars = [] 101 | 102 | f.write(';; --- INPUT VARIABLES ---\n') 103 | for v_name in input_vars: 104 | f.write(f"(declare-const {v_name} Real)\n") 105 | 106 | f.write('\n;; --- OUTPUT VARIABLES ---\n') 107 | for v_name in output_vars: 108 | f.write(f"(declare-const {v_name} Real)\n") 109 | 110 | # Constraints definition 111 | f.write('\n;; --- INPUT CONSTRAINTS ---\n') 112 | 113 | infix_in_constraints = self.__create_infix_constraints(input_vars, self.in_coef_mat, self.in_bias_mat) 114 | for c in infix_in_constraints: 115 | prefix_smt_row = reading.ExpressionTreeConverter().build_from_infix(c).as_prefix() 116 | f.write(f"(assert {prefix_smt_row})\n") 117 | 118 | f.write('\n;; --- OUTPUT CONSTRAINTS ---\n') 119 | 120 | # Allow multiple output properties 121 | infix_output_properties = [] 122 | for out_mat, out_bias in zip(self.out_coef_mat, self.out_bias_mat): 123 | infix_constraints = self.__create_infix_constraints(output_vars, out_mat, out_bias) 124 | infix_output_properties.append(infix_constraints) 125 | 126 | if len(infix_output_properties) == 1: 127 | for c in infix_output_properties[0]: 128 | prefix_smt_row = reading.ExpressionTreeConverter().build_from_infix(c).as_prefix() 129 | f.write(f"(assert {prefix_smt_row})\n") 130 | else: 131 | s = '(assert (or ' 132 | for p in infix_output_properties: 133 | if len(p) == 1: 134 | prefix_smt_row = reading.ExpressionTreeConverter().build_from_infix(p[0]).as_prefix() 135 | s = s + '\n' + prefix_smt_row 136 | else: 137 | s = s + '(and ' 138 | for c in p: 139 | prefix_smt_row = reading.ExpressionTreeConverter().build_from_infix(c).as_prefix() 140 | s = s + '\n' + prefix_smt_row 141 | s = s + ')\n' 142 | s = s + '))' 143 | f.write(s) 144 | 145 | @staticmethod 146 | def __create_infix_constraints(variables: list, coef_mat: torch.Tensor, bias_mat: torch.Tensor) -> list[str]: 147 | c_list = [] 148 | 149 | for row in range(coef_mat.shape[0]): 150 | coef = coef_mat[row, :] 151 | bias = bias_mat[row][0] 152 | s = '(' 153 | 154 | # Assign coefficients 155 | for k in range(len(coef)): 156 | c = coef[k] 157 | if c != 0: 158 | s = s + f"({float(c)} * {variables[k]})" 159 | if k < len(coef) - 1 and any(coef[k + 1:]): 160 | s = s + ' + ' 161 | 162 | # Add bias preventing exponential representation 163 | bias_repr = float(bias) 164 | 165 | if 'e' in str(bias_repr): 166 | bias_repr = Fraction(bias_repr) 167 | 168 | s = s + f") <= ({bias_repr})" 169 | c_list.append(s) 170 | 171 | return c_list 172 | 173 | 174 | class VnnLibProperty(NeverProperty): 175 | """ 176 | A class used to represent a VNN-LIB property. It directly loads 177 | the property from a `.vnnlib` file. 178 | """ 179 | 180 | def __init__(self, filepath: str): 181 | smt_parser = reading.SmtPropertyParser(filepath) 182 | 183 | super().__init__(*smt_parser.parse_property()) 184 | 185 | 186 | class LocalRobustnessProperty(NeverProperty): 187 | """ 188 | TODO 189 | 190 | sample: torch.Tensor 191 | epsilon: float 192 | label: str 193 | max_output: bool 194 | """ 195 | 196 | def __init__(self, sample: torch.Tensor, epsilon: float, n_outputs: int, label: int, max_output: bool): 197 | super().__init__(*LocalRobustnessProperty.build_matrices(sample, epsilon, n_outputs, label, max_output)) 198 | 199 | @staticmethod 200 | def build_matrices(sample: torch.Tensor, epsilon: float, n_outputs: int, label: int, max_output: bool) -> tuple[ 201 | torch.Tensor, torch.Tensor, list[torch.Tensor], list[torch.Tensor]]: 202 | 203 | if sample.shape[1] != 1: 204 | raise InvalidDimensionError('Wrong shape for the sample, should be single-dimensional') 205 | 206 | # Input property 207 | n_dims = sample.shape[0] 208 | in_coef_mat = torch.zeros((2 * n_dims, n_dims)) 209 | in_bias_mat = torch.zeros((2 * n_dims, 1)) 210 | 211 | for i, x_i in enumerate(sample): 212 | 213 | if not isinstance(x_i, float): 214 | raise Exception 215 | 216 | in_coef_mat[2 * i, i] = 1 217 | in_coef_mat[2 * i + 1, i] = -1 218 | 219 | in_bias_mat[2 * i] = x_i + epsilon 220 | in_bias_mat[2 * i + 1] = -x_i + epsilon 221 | 222 | # Output property 223 | if label >= n_outputs: 224 | raise Exception 225 | 226 | out_coef_mat = torch.zeros((n_outputs - 1, n_outputs)) 227 | out_bias_mat = torch.zeros((n_outputs - 1, 1)) 228 | 229 | outputs = set(range(n_outputs)) 230 | outputs.remove(label) 231 | for i in outputs: 232 | out_coef_mat[i, label] = 1 233 | out_coef_mat[i, i] = -1 234 | 235 | if not max_output: 236 | out_coef_mat = -out_coef_mat 237 | 238 | return in_coef_mat, in_bias_mat, [out_coef_mat], [out_bias_mat] 239 | -------------------------------------------------------------------------------- /pynever/strategies/verification/ssbp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/pynever/strategies/verification/ssbp/__init__.py -------------------------------------------------------------------------------- /pynever/strategies/verification/ssbp/constants.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | 4 | class RefinementTarget: 5 | """ 6 | This class represents the refinement target for the verification. 7 | """ 8 | def __init__(self, layer_id: str, neuron_n: int): 9 | self.layer_id = layer_id 10 | self.neuron_idx = neuron_n 11 | 12 | def __repr__(self): 13 | return f'({self.layer_id}, {self.neuron_idx})' 14 | 15 | def to_pair(self): 16 | return self.layer_id, self.neuron_idx 17 | 18 | 19 | class NeuronSplit(enum.Enum): 20 | """ 21 | This class is used as an enumerator to distinguish the two outputs 22 | of a neuron split (positive or negative) 23 | """ 24 | NEGATIVE = 0 25 | POSITIVE = 1 26 | 27 | 28 | class BoundsDirection(enum.Enum): 29 | """ 30 | This class is used as an enumerator to choose the direction of 31 | the bounds computation (forwards or backwards) 32 | """ 33 | FORWARDS = 0 34 | BACKWARDS = 1 35 | 36 | 37 | class BoundsBackend(enum.Enum): 38 | """ 39 | This class is used as an enumerator to represent different bounds propagation 40 | strategies. 41 | 42 | - symbolic bounds propagation from the Venus2 verifier 43 | """ 44 | SYMBOLIC = 1 45 | 46 | 47 | class RefinementStrategy(enum.Enum): 48 | """ 49 | This class is used as an enumerator to represent different refinement strategies. 50 | 51 | - sequential refinement refines each neuron in the order they appear in the network 52 | - lowest approx selects the neuron that contributes with the lowest approximation 53 | - lowest approx - current layer selects the neuron that contributes with the lowest approximation in the layer 54 | - input bounds change selects the neuron that contributes most to change the input bounds when refined 55 | """ 56 | SEQUENTIAL = 1 57 | LOWEST_APPROX = 2 58 | LOWEST_APPROX_CURRENT_LAYER = 3 59 | INPUT_BOUNDS_CHANGE = 4 60 | 61 | 62 | class IntersectionStrategy(enum.Enum): 63 | """ 64 | This class is used as an enumerator to represent different intersection strategies. 65 | 66 | - star with LP propagates the star abstraction and uses an LP to check for an intersection 67 | - adaptive uses different (precise or approximate) intersection checks based on the state of the search 68 | """ 69 | STAR_LP = 1 70 | ADAPTIVE = 2 71 | -------------------------------------------------------------------------------- /pynever/strategies/verification/ssbp/propagation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pynever import networks, nodes 4 | from pynever.strategies.abstraction.bounds_propagation import util 5 | from pynever.strategies.verification.statistics import VerboseBounds 6 | from pynever.strategies.abstraction.linearfunctions import LinearFunctions 7 | from pynever.strategies.abstraction.star import ExtendedStar 8 | 9 | 10 | def abs_propagation(star: ExtendedStar, network: networks.SequentialNetwork, bounds: VerboseBounds) -> ExtendedStar: 11 | """ 12 | This method performs the abstract propagation of a single star starting 13 | from a specific layer and neuron. The output is a single star that uses 14 | approximation in the next layers 15 | 16 | Parameters 17 | ---------- 18 | star: ExtendedStar 19 | The star to process 20 | network: networks.SequentialNetwork 21 | The neural network to propagate through 22 | bounds: VerboseBounds 23 | The bounds of the network layers 24 | 25 | Returns 26 | ---------- 27 | ExtendedStar 28 | The resulting star approximate with the abstract propagation 29 | """ 30 | if star.ref_layer is None: 31 | return star 32 | 33 | start_index = network.get_index_from_id(star.ref_layer) 34 | for layer in network.layers_iterator(start_index): 35 | 36 | # Propagate fully connected entirely 37 | if isinstance(layer, nodes.FullyConnectedNode): 38 | # Need to expand bias since they are memorized like one-dimensional vectors in FC nodes. 39 | bias = layer.get_layer_bias_as_two_dimensional() 40 | star = star.single_fc_forward(layer.weight, bias) 41 | 42 | elif isinstance(layer, nodes.ConvNode): 43 | # The star generated by the convolution is interpreted as the linearization 44 | # provided by symbolic propagation 45 | star = make_star_from_bounds(bounds, layer.identifier) 46 | 47 | # Propagate ReLU starting from target 48 | elif isinstance(layer, nodes.ReLUNode): 49 | star = star.approx_relu_forward(bounds, layer.identifier) 50 | 51 | elif isinstance(layer, nodes.FlattenNode): 52 | # Do nothing 53 | continue 54 | 55 | # ======================================= 56 | # There is a network with two useless 57 | # Reshape layers that do nothing, here 58 | # we filter them when this occurs 59 | # ======================================= 60 | elif ((isinstance(layer, nodes.ReshapeNode) and isinstance(network.get_next_node(layer), nodes.ReshapeNode)) or 61 | (isinstance(layer, nodes.ReshapeNode) and isinstance(network.get_previous_node(layer), 62 | nodes.ReshapeNode))): 63 | # Do nothing 64 | continue 65 | 66 | else: 67 | raise NotImplementedError(f'Unsupported layer {layer.__class__}') 68 | 69 | return star 70 | 71 | 72 | def propagate_and_init_star_before_relu_layer(star: ExtendedStar, bounds: VerboseBounds, 73 | network: networks.SequentialNetwork, skip: bool = True) -> ExtendedStar: 74 | """ 75 | Compute the initial star which will always start from the first layer and 76 | where we will use the bounds to determine the inactive nodes, 77 | so that we could set the transformation for them to 0. 78 | """ 79 | new_star, relu_layer = propagate_until_relu(star, bounds, network, skip=skip) 80 | relu_layer_id = new_star.ref_layer 81 | 82 | if relu_layer is not None: 83 | layer_inactive = util.compute_layer_inactive_from_bounds_and_fixed_neurons(bounds, new_star.fixed_neurons, 84 | relu_layer_id) 85 | 86 | new_transformation = new_star.mask_for_inactive_neurons(layer_inactive) 87 | 88 | return ExtendedStar(new_star.get_predicate_equation(), new_transformation, ref_layer=relu_layer_id, 89 | fixed_neurons=new_star.fixed_neurons, enforced_constraints=star.enforced_constraints, 90 | input_differences=star.input_differences) 91 | 92 | return new_star 93 | 94 | 95 | def propagate_until_relu(star: ExtendedStar, bounds: VerboseBounds, network: networks.SequentialNetwork, skip: bool) \ 96 | -> tuple[ExtendedStar, nodes.ReLUNode | None]: 97 | """ 98 | This function performs the star propagation throughout Fully Connected layers 99 | only, until a ReLU layer is encountered. This is used in order to process 100 | Fully Connected layers only once per cycle 101 | 102 | Parameters 103 | ---------- 104 | star: ExtendedStar 105 | The star to process 106 | bounds: VerboseBounds 107 | The bounds collection 108 | network: networks.SequentialNetwork 109 | The neural network 110 | skip: bool 111 | Flag to signal end of propagation 112 | 113 | Returns 114 | ---------- 115 | tuple[ExtendedStar, nodes.ReLUNode] 116 | The resulting star before the next ReLU layer and the ReLU layer 117 | """ 118 | relu_layer = None 119 | for layer in network.layers_iterator(): 120 | if skip: 121 | if layer.identifier == star.ref_layer: 122 | skip = False 123 | 124 | else: 125 | # Propagate fully connected entirely 126 | if isinstance(layer, nodes.FullyConnectedNode): 127 | # Need to expand bias since they are memorized like one-dimensional vectors in FC nodes. 128 | bias = layer.get_layer_bias_as_two_dimensional() 129 | star = star.single_fc_forward(layer.weight, bias) 130 | 131 | elif isinstance(layer, nodes.ConvNode): 132 | # The star generated by the convolution is interpreted as the linearization 133 | # provided by symbolic propagation 134 | star = make_star_from_bounds(bounds, layer.identifier) 135 | 136 | elif isinstance(layer, nodes.ReLUNode): 137 | relu_layer = layer 138 | break 139 | 140 | elif isinstance(layer, nodes.FlattenNode): 141 | # Do nothing 142 | pass 143 | 144 | # ======================================= 145 | # There is a network with two useless 146 | # Reshape layers that do nothing, here 147 | # we filter them when this occurs 148 | # ======================================= 149 | elif ((isinstance(layer, nodes.ReshapeNode) and isinstance(network.get_next_node(layer), 150 | nodes.ReshapeNode)) or 151 | (isinstance(layer, nodes.ReshapeNode) and isinstance(network.get_previous_node(layer), 152 | nodes.ReshapeNode))): 153 | # Do nothing 154 | pass 155 | 156 | else: 157 | raise NotImplementedError(f'Unsupported layer {layer.__class__}') 158 | 159 | # Set reference layer 160 | if relu_layer is not None: 161 | star.ref_layer = relu_layer.identifier 162 | else: 163 | # No ReLU layer means we are at the end 164 | star.ref_layer = network.get_last_node().identifier 165 | 166 | return star, relu_layer 167 | 168 | 169 | def make_star_from_bounds(bounds: VerboseBounds, layer_id: str) -> ExtendedStar: 170 | """ 171 | This function creates an ExtendedStar from the symbolic equations of the bounds at the 172 | layer specified by layer_id 173 | 174 | Parameters 175 | ---------- 176 | bounds: VerboseBounds 177 | The collection of the symbolic and concrete bounds 178 | layer_id: str 179 | The identifier of the current layer 180 | 181 | Returns 182 | ---------- 183 | ExtendedStar 184 | The ExtendedStar with the constraints specified by the bounds 185 | """ 186 | symbolic_bounds = bounds.symbolic_bounds[layer_id] 187 | numeric_bounds = bounds.numeric_post_bounds[layer_id] 188 | 189 | layer_size = symbolic_bounds.lower.matrix.shape[0] 190 | input_size = symbolic_bounds.lower.matrix.shape[1] 191 | 192 | predicate_matrix = [] 193 | predicate_bias = [] 194 | 195 | # Build the predicate in normal form Cx <= d 196 | for i in range(layer_size): 197 | predicate_matrix.append(-symbolic_bounds.lower.matrix[i]) 198 | predicate_matrix.append(symbolic_bounds.upper.matrix[i]) 199 | 200 | predicate_bias.append(-symbolic_bounds.lower.offset[i] - numeric_bounds.lower[i]) 201 | predicate_bias.append(symbolic_bounds.upper.offset[i] + numeric_bounds.upper[i]) 202 | 203 | predicate = LinearFunctions(torch.Tensor(predicate_matrix), torch.Tensor(predicate_bias)) 204 | identity_transformation = LinearFunctions(torch.eye(input_size), torch.zeros((input_size, 1))) 205 | 206 | return ExtendedStar(predicate, identity_transformation, ref_layer=layer_id) 207 | -------------------------------------------------------------------------------- /pynever/strategies/verification/statistics.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from pynever.strategies.abstraction.bounds_propagation import ReLUStatus 4 | 5 | 6 | class BoundsStats: 7 | """ 8 | This class collects the statistics about stability and 9 | approximation obtained through Bounds Propagation 10 | 11 | Attributes 12 | ---------- 13 | stability_info: dict[dict[str, list[int]] | dict[str, int]] 14 | Container for the statistics about neuron stability 15 | approximation_info: dict[tuple[str, int], float] 16 | Container for the statistics about the approximation area 17 | """ 18 | 19 | def __init__(self): 20 | self.stability_info = { 21 | # These dictionaries are structured as 22 | # -> list[neuron: int] 23 | ReLUStatus.ACTIVE: dict(), 24 | ReLUStatus.INACTIVE: dict(), 25 | ReLUStatus.UNSTABLE: dict(), 26 | 'stable_count': 0 27 | } 28 | 29 | # This dictionary is structured as 30 | # <(layer_id: str, neuron: int)> -> area: float 31 | self.approximation_info = dict() 32 | 33 | def count_unstable(self) -> int: 34 | """Procedure to count the unstable neurons""" 35 | return sum([len(v) for v in self.stability_info[ReLUStatus.UNSTABLE].values()]) 36 | 37 | 38 | class VerboseBounds: 39 | """ 40 | This class is a complete collection of different bounds and information 41 | about them, used in the bounds propagation loop 42 | 43 | Attributes 44 | ---------- 45 | identifiers: list[str] 46 | The list of identifiers of explored layers 47 | symbolic_bounds: dict[layer_id: str, bounds: SymbolicLinearBounds] 48 | The dictionary of the symbolic bounds for the network 49 | numeric_pre_bounds: dict[layer_id: str, bounds: HyperRectangleBounds] 50 | The dictionary of the numeric pre-activation bounds for the network 51 | numeric_post_bounds: dict[layer_id: str, bounds: HyperRectangleBounds] 52 | The dictionary of the numeric post-activation bounds for the network 53 | statistics: BoundsStats 54 | The data structure of stability and approximation statistics 55 | """ 56 | 57 | def __init__(self, 58 | identifiers: list = None, 59 | symbolic: dict = None, 60 | numeric_pre: dict = None, 61 | numeric_post: dict = None): 62 | self.identifiers = copy.deepcopy(identifiers) if identifiers is not None else list() 63 | self.symbolic_bounds = copy.deepcopy(symbolic) if symbolic is not None else dict() 64 | self.numeric_pre_bounds = copy.deepcopy(numeric_pre) if numeric_pre is not None else dict() 65 | self.numeric_post_bounds = copy.deepcopy(numeric_post) if numeric_post is not None else dict() 66 | self.statistics = BoundsStats() 67 | -------------------------------------------------------------------------------- /pynever/utilities.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains utility functions which may be used throughout the code. 3 | """ 4 | import copy 5 | 6 | import numpy 7 | import torch 8 | from torch import Tensor 9 | 10 | from pynever import networks 11 | from pynever.strategies.conversion.converters import pytorch_layers as ptl 12 | from pynever.strategies.conversion.converters.pytorch import PyTorchConverter 13 | from pynever.strategies.conversion.representation import PyTorchNetwork 14 | 15 | 16 | def xor(a: bool, b: bool) -> bool: 17 | return (a and not b) or (b and not a) 18 | 19 | def xnor(a: bool, b: bool) -> bool: 20 | return not xor(a, b) 21 | 22 | 23 | def execute_network(network: networks.NeuralNetwork, input_t: Tensor) -> Tensor: 24 | """Applies the neural network function to an input Tensor using the pyTorch backend. 25 | 26 | Parameters 27 | ---------- 28 | network: NeuralNetwork 29 | The network to execute 30 | input_t: Tensor 31 | The input value to feed 32 | 33 | Returns 34 | ------- 35 | Tensor 36 | The computed output 37 | """ 38 | py_net = PyTorchConverter().from_neural_network(network) 39 | py_net.pytorch_network.eval() 40 | py_net.pytorch_network.float() 41 | 42 | # ?? 43 | output = py_net.pytorch_network(input_t.float().permute(*torch.arange(input_t.ndim - 1, -1, -1))) 44 | return output.detach() 45 | 46 | 47 | def combine_batchnorm1d(linear: ptl.Linear, batchnorm: ptl.BatchNorm1d) -> ptl.Linear: 48 | """ 49 | Utility function to combine a BatchNorm1D node with a Linear node in a corresponding Linear node. 50 | 51 | Parameters 52 | ---------- 53 | linear: Linear 54 | Linear to combine. 55 | batchnorm: BatchNorm1D 56 | BatchNorm1D to combine. 57 | 58 | Return 59 | ---------- 60 | Linear 61 | The Linear resulting from the fusion of the two input nodes. 62 | """ 63 | l_weight = linear.weight 64 | l_bias = linear.bias 65 | bn_running_mean = batchnorm.running_mean 66 | bn_running_var = batchnorm.running_var 67 | bn_weight = batchnorm.weight 68 | bn_bias = batchnorm.bias 69 | bn_eps = batchnorm.eps 70 | 71 | fused_bias = torch.div(bn_weight, torch.sqrt(bn_running_var + bn_eps)) 72 | fused_bias = torch.mul(fused_bias, torch.sub(l_bias, bn_running_mean)) 73 | fused_bias = torch.add(fused_bias, bn_bias) 74 | 75 | fused_weight = torch.diag(torch.div(bn_weight, torch.sqrt(bn_running_var + bn_eps))) 76 | fused_weight = torch.matmul(fused_weight, l_weight) 77 | 78 | has_bias = linear.bias is not None 79 | fused_linear = ptl.Linear(linear.identifier, linear.in_dim, linear.out_dim, linear.in_features, linear.out_features, 80 | has_bias) 81 | 82 | p_fused_weight = torch.nn.Parameter(fused_weight, requires_grad=False) 83 | p_fused_bias = torch.nn.Parameter(fused_bias, requires_grad=False) 84 | 85 | fused_linear.weight = p_fused_weight 86 | fused_linear.bias = p_fused_bias 87 | 88 | return fused_linear 89 | 90 | 91 | def combine_batchnorm1d_net(network: networks.SequentialNetwork) -> networks.NeuralNetwork: 92 | """ 93 | Utilities function to combine all the FullyConnectedNodes followed by BatchNorm1DNodes in corresponding 94 | FullyConnectedNodes. 95 | 96 | Parameters 97 | ---------- 98 | network: SequentialNetwork 99 | Sequential Network of interest of which we want to combine the nodes. 100 | 101 | Return 102 | ---------- 103 | SequentialNetwork 104 | Corresponding Sequential Network with the combined nodes. 105 | """ 106 | py_net = PyTorchConverter().from_neural_network(network) 107 | 108 | modules = [m for m in py_net.pytorch_network.modules()] 109 | modules = modules[1:] 110 | num_modules = len(modules) 111 | current_index = 0 112 | 113 | new_modules = [] 114 | 115 | while current_index + 1 < num_modules: 116 | 117 | current_node = modules[current_index] 118 | next_node = modules[current_index + 1] 119 | 120 | if isinstance(current_node, ptl.Linear) and isinstance(next_node, ptl.BatchNorm1d): 121 | combined_node = combine_batchnorm1d(current_node, next_node) 122 | new_modules.append(combined_node) 123 | current_index = current_index + 1 124 | 125 | elif isinstance(current_node, ptl.Linear): 126 | new_modules.append(copy.deepcopy(current_node)) 127 | 128 | elif isinstance(current_node, ptl.ReLU): 129 | new_modules.append(copy.deepcopy(current_node)) 130 | 131 | else: 132 | raise Exception("Combine Batchnorm supports only ReLU, Linear and BatchNorm1D layers.") 133 | 134 | current_index = current_index + 1 135 | 136 | if not isinstance(modules[current_index], ptl.BatchNorm1d): 137 | new_modules.append(copy.deepcopy(modules[current_index])) 138 | 139 | temp_pynet = ptl.Sequential(py_net.pytorch_network.identifier, py_net.pytorch_network.input_id, new_modules) 140 | combined_pynet = PyTorchNetwork(py_net.identifier, temp_pynet) 141 | combined_network = PyTorchConverter().to_neural_network(combined_pynet) 142 | 143 | return combined_network 144 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools", 4 | "setuptools-scm>=8.0" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | 8 | [project] 9 | name = "pyNeVer" 10 | version = "1.2.2" 11 | dependencies = [ 12 | "numpy", 13 | "onnx", 14 | "torch", 15 | "torchvision", 16 | "ortools", 17 | "pysmt", 18 | "multipledispatch" 19 | ] 20 | requires-python = ">=3.11" 21 | authors = [ 22 | { name = "Dario Guidotti", email = "dguidotti@uniss.it" }, 23 | { name = "Stefano Demarchi", email = "stefano.demarchi@edu.unige.it" }, 24 | ] 25 | maintainers = [ 26 | { name = "Stefano Demarchi", email = "stefano.demarchi@edu.unige.it" }, 27 | { name = "Andrea Gimelli", email = "andrea.gimelli@edu.unige.it" }, 28 | ] 29 | description = "API for the design, training and verification of neural networks." 30 | readme = "README.md" 31 | license = "GPL-3.0-or-later" 32 | license-files = ["LICENSE.txt"] 33 | keywords = ["artificial intelligence", "formal verification", "neural networks"] 34 | classifiers = [ 35 | "Development Status :: 4 - Beta", 36 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 37 | "Programming Language :: Python :: 3.11", 38 | "Operating System :: OS Independent" 39 | ] 40 | 41 | [project.optional-dependencies] 42 | docs = [ 43 | "sphinx", 44 | "sphinx-autodoc-typehints", 45 | "sphinx-rtd-theme" 46 | ] 47 | 48 | [tool.setuptools] 49 | packages = ["pynever"] 50 | 51 | [project.urls] 52 | Homepage = "https://neuralverification.org" 53 | Documentation = "https://neuralverification.org/pynever" 54 | Repository = "https://github.com/nevertools/pynever.git" 55 | Issues = "https://github.com/nevertools/pynever/issues" -------------------------------------------------------------------------------- /test/logs/placeholder.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/test/logs/placeholder.txt -------------------------------------------------------------------------------- /test/regression/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/test/regression/__init__.py -------------------------------------------------------------------------------- /test/regression/run_regression_test.py: -------------------------------------------------------------------------------- 1 | """This test module runs standard tests to confirm modifications to the software""" 2 | import os 3 | import subprocess 4 | 5 | if __name__ == '__main__': 6 | # Clone regression tests repository in this directory 7 | print('Regression test started.') 8 | if not os.path.exists('Regression'): 9 | print('Downloading repository...') 10 | subprocess.run(['./download_benchmarks.sh', 'Regression']) 11 | 12 | print('Benchmarks repository found.') 13 | for dirname in ['ACAS_XU', 'RL']: # CIFAR and resnet to come 14 | print('Running benchmarks for {}...'.format(dirname)) 15 | subprocess.run( 16 | ['python', 'never2_batch.py', 17 | '-o', f'results_batch_{dirname}.csv', 18 | f'Regression/{dirname}/instances.csv', 19 | 'ssbp']) 20 | print('Completed.') 21 | 22 | # Check all verified 23 | with open(f'results_batch_{dirname}.csv', 'r') as f: 24 | for line in f: 25 | if line.strip('\n').split(',')[2] != 'Verified': 26 | print('Test failed!') 27 | exit(1) 28 | 29 | print('Regression test completed. All tests passed.') 30 | exit(0) 31 | -------------------------------------------------------------------------------- /test/unittests/2d_prop.vnnlib: -------------------------------------------------------------------------------- 1 | (declare-const X_0 Real) 2 | (declare-const X_1 Real) 3 | 4 | (declare-const Y_0 Real) 5 | (declare-const Y_1 Real) 6 | 7 | (assert (<= X_0 1.0)) 8 | (assert (>= X_0 -1.0)) 9 | (assert (<= X_1 1.0)) 10 | (assert (>= X_1 -1.0)) 11 | 12 | (assert (>= Y_0 2.0)) -------------------------------------------------------------------------------- /test/unittests/2d_propagation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pynever import nodes 4 | from pynever.networks import SequentialNetwork 5 | from pynever.strategies.verification.algorithms import SSBPVerification 6 | from pynever.strategies.verification.parameters import SSBPVerificationParameters 7 | from pynever.strategies.verification.properties import VnnLibProperty 8 | 9 | # NETWORK DEFINITION 10 | # W = np.array([[1, 1], [1, -1]]) 11 | # W2 = np.array([[1, 1], [0, 1]]) 12 | # b2 = np.array([1, 0]) 13 | W = torch.Tensor([[1, 1], [-1, 1]]) 14 | W2 = torch.eye(2, 2) 15 | b2 = torch.zeros(2) 16 | 17 | fc_1 = nodes.FullyConnectedNode('FC_1', (2,), 2, W, torch.zeros(2)) 18 | rl_1 = nodes.ReLUNode('ReLU_1', (2,)) 19 | fc_2 = nodes.FullyConnectedNode('FC_2', (2,), 2, W, torch.zeros(2)) 20 | rl_2 = nodes.ReLUNode('ReLU_2', (2,)) 21 | fc_3 = nodes.FullyConnectedNode('Y', (2,), 2, W2, b2) 22 | 23 | nn = SequentialNetwork('NN', 'X') 24 | nn.append_node(fc_1) 25 | nn.append_node(rl_1) 26 | # nn.append_node(fc_2) 27 | # nn.append_node(rl_2) 28 | # nn.append_node(fc_3) 29 | 30 | print(nn) 31 | print(nn.get_topological_order()) 32 | print(nn.get_roots()) 33 | print(nn.get_leaves()) 34 | 35 | prop = VnnLibProperty('2d_prop.vnnlib') 36 | 37 | print(SSBPVerification(SSBPVerificationParameters()).verify(nn, prop)) 38 | -------------------------------------------------------------------------------- /test/unittests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeVerTools/pyNeVer/fbd41aee5bc6582057b5f9c080e24308fbf36fa6/test/unittests/__init__.py -------------------------------------------------------------------------------- /test/unittests/topological.py: -------------------------------------------------------------------------------- 1 | """Test for topological sort""" 2 | 3 | from pynever import nodes 4 | from pynever.networks import SequentialNetwork, AcyclicNetwork 5 | 6 | nn_seq = SequentialNetwork('seq', 'x') 7 | nn_res = AcyclicNetwork('res', ['x']) 8 | 9 | nn_seq.append_node(nodes.ReLUNode('R1', (1,))) 10 | nn_seq.append_node(nodes.ReLUNode('R2', (1,))) 11 | nn_seq.append_node(nodes.ReLUNode('R3', (1,))) 12 | 13 | rl_1 = nodes.ReLUNode('R1', (1,)) 14 | rl_2 = nodes.ReLUNode('R2', (1,)) 15 | rl_3 = nodes.ReLUNode('R3', (1,)) 16 | rl_4 = nodes.ReLUNode('R4', (1,)) 17 | rl_5 = nodes.ReLUNode('R5', (1,)) 18 | rl_6 = nodes.ReLUNode('R6', (1,)) 19 | 20 | nn_res.add_node(rl_1) 21 | nn_res.add_node(rl_2, [rl_1]) 22 | nn_res.add_node(rl_3, [rl_2]) 23 | nn_res.add_node(rl_4, [rl_2]) 24 | nn_res.add_node(rl_5, [rl_3]) 25 | nn_res.add_node(rl_6, [rl_4, rl_5]) 26 | 27 | print(nn_seq) 28 | print(nn_res) 29 | 30 | print(nn_seq.get_topological_order()) 31 | print(nn_res.get_topological_order()) 32 | 33 | print(nn_res.get_children(rl_2)) 34 | -------------------------------------------------------------------------------- /test/unittests/training.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import torch 4 | import torchvision.transforms as tr 5 | from torch import nn 6 | from torch.optim import Adam, lr_scheduler 7 | 8 | from pynever.datasets import GenericFileDataset, TorchMNIST 9 | from pynever.strategies.conversion.converters.onnx import ONNXConverter 10 | from pynever.strategies.conversion.representation import ONNXNetwork 11 | from pynever.strategies.training import PytorchTraining, PytorchMetrics 12 | from strategies.training import PytorchTesting 13 | 14 | # Logger Setup 15 | logger = logging.getLogger("pynever") 16 | logger.setLevel(logging.INFO) 17 | ch = logging.StreamHandler() 18 | ch.setLevel(logging.INFO) 19 | formatter = logging.Formatter('%(message)s') 20 | ch.setFormatter(formatter) 21 | logger.addHandler(ch) 22 | 23 | train_batch_size = 128 24 | validation_batch_size = 64 25 | test_batch_size = 64 26 | learning_rate = 0.01 27 | validation_percentage = 0.3 28 | scheduler_patience = 5 29 | weight_decay = 0.001 30 | checkpoint_root = "" 31 | scheduler_params = {"patience": scheduler_patience} 32 | opt_params = {"lr": learning_rate, "weight_decay": weight_decay} 33 | 34 | data_train = GenericFileDataset('D:/Documents/MNIST/MNIST_training.csv', 784) 35 | # data_train = TorchMNIST('mnist', train=True, download=True, 36 | # transform=tr.Compose([tr.ToTensor(), 37 | # tr.Normalize(1, 0.5), 38 | # tr.Lambda(lambda x: torch.flatten(x)) 39 | # ]) 40 | # ) 41 | data_test = GenericFileDataset('D:/Documents/MNIST/MNIST_test.csv', 784) 42 | 43 | net = ONNXConverter().to_neural_network(ONNXNetwork('../../../mnist_fc.onnx')) 44 | 45 | train_strategy = PytorchTraining(Adam, opt_params, nn.CrossEntropyLoss(), 50, 46 | validation_percentage, train_batch_size, validation_batch_size, 47 | scheduler_con=lr_scheduler.ReduceLROnPlateau, sch_params=scheduler_params, 48 | precision_metric=PytorchMetrics.inaccuracy, 49 | device='cpu', checkpoints_root=checkpoint_root) 50 | test_strategy = PytorchTesting(PytorchMetrics.inaccuracy, {}, test_batch_size, 'cpu') 51 | 52 | trained = train_strategy.train(net, data_train) 53 | test_strategy.test(trained, data_test) 54 | -------------------------------------------------------------------------------- /vnncomp_scripts/config.yaml: -------------------------------------------------------------------------------- 1 | name: NeVer2 2 | ami: ami-0aff18ec83b712f05 3 | scripts_dir: vnncomp_scripts 4 | manual_installation_step: True 5 | run_installation_script_as_root: True 6 | run_post_installation_script_as_root: False 7 | run_toolkit_as_root: False 8 | description: > 9 | NeVer2 is a neural network verifier developed at University of Genova that enables design, learning and verification 10 | of feed-forward neural networks with Fully Connected, Convolutional and ReLU layers 11 | -------------------------------------------------------------------------------- /vnncomp_scripts/install_tool.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NeVer2 4 | # install_tool.sh script for VNN-COMP 2024 5 | 6 | TOOL_NAME=NeVer2 7 | VERSION_STRING=v1 8 | 9 | # check arguments 10 | if [ "$1" != ${VERSION_STRING} ]; then 11 | echo "Expected first argument (version string) '$VERSION_STRING', got '$1'" 12 | exit 1 13 | fi 14 | 15 | echo "Installing $TOOL_NAME" 16 | DIR=$(dirname $(dirname $(realpath $0))) 17 | 18 | apt-get update && 19 | apt-get install -y python3 python3-pip && 20 | apt-get install -y psmisc && # for killall, used in prepare_instance.sh script 21 | pip3 install -r "$DIR/vnncomp_scripts/requirements.txt" -------------------------------------------------------------------------------- /vnncomp_scripts/prepare_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NeVer2 4 | # prepare_instance.sh script for VNN-COMP 2024 5 | 6 | TOOL_NAME=NeVer2 7 | VERSION_STRING=v1 8 | 9 | # check arguments 10 | if [ "$1" != ${VERSION_STRING} ]; then 11 | echo "Expected first argument (version string) '$VERSION_STRING', got '$1'" 12 | exit 1 13 | fi 14 | 15 | CATEGORY=$2 16 | ONNX_FILE=$3 17 | VNNLIB_FILE=$4 18 | 19 | echo "Preparing $TOOL_NAME for benchmark instance in category '$CATEGORY' with onnx file '$ONNX_FILE' and vnnlib file '$VNNLIB_FILE'" 20 | 21 | # kill any zombie processes 22 | killall -q python3 23 | 24 | # script returns a 0 exit code if successful. If you want to skip a benchmark category you can return non-zero. 25 | exit 0 -------------------------------------------------------------------------------- /vnncomp_scripts/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | onnx 3 | torch 4 | torchvision 5 | ortools 6 | pysmt 7 | multipledispatch 8 | -------------------------------------------------------------------------------- /vnncomp_scripts/run_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NeVer2 4 | # run_instance.sh script for VNN-COMP 2024 5 | 6 | TOOL_NAME=NeVer2 7 | VERSION_STRING=v1 8 | 9 | # check arguments 10 | if [ "$1" != ${VERSION_STRING} ]; then 11 | echo "Expected first argument (version string) '$VERSION_STRING', got '$1'" 12 | exit 1 13 | fi 14 | 15 | CATEGORY=$2 16 | ONNX_FILE=$3 17 | VNNLIB_FILE=$4 18 | RESULTS_FILE=$5 19 | TIMEOUT=$6 20 | 21 | echo "Running $TOOL_NAME on benchmark instance in category '$CATEGORY' with onnx file '$ONNX_FILE', vnnlib file '$VNNLIB_FILE', results file $RESULTS_FILE, and timeout $TIMEOUT" 22 | 23 | # setup environment variable for tool (doing it earlier won't be persistent with docker)" 24 | DIR=$(dirname $(dirname $(realpath $0))) 25 | export PYTHONPATH="$PYTHONPATH:$DIR" 26 | 27 | export OPENBLAS_NUM_THREADS=1 28 | export OMP_NUM_THREADS=1 29 | 30 | # run the tool to produce the results file 31 | python3 -m never2_launcher -o "$RESULTS_FILE" -t "$TIMEOUT" "$ONNX_FILE" "$VNNLIB_FILE" ssbp --------------------------------------------------------------------------------