├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── config.yml │ └── feature_request.md └── workflows │ ├── docs.yml │ ├── pre-commit.yml │ ├── pypi-publish.yml │ ├── test.yml │ └── testpypi-publish.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── AUTHORS.rst ├── CITATION.cff ├── LICENSE ├── README.md ├── docs ├── Makefile ├── _static │ └── .gitignore ├── _templates │ └── apidoc │ │ ├── module.rst_t │ │ ├── package.rst_t │ │ └── toc.rst_t ├── authors.rst ├── changelog.rst ├── conf.py ├── genindex.rst ├── index.rst ├── license.rst ├── overview.rst ├── qonnx-comparison.png ├── qonnx-custom-ops │ ├── bipolar_quant_op.md │ ├── floatquant_op.md │ ├── intquant_op.md │ └── trunc_op.md ├── readme.rst ├── requirements.txt └── tutorials.rst ├── notebooks ├── 0_how_to_work_with_onnx.ipynb ├── 1_custom_analysis_pass.ipynb ├── 2_custom_transformation_pass.ipynb ├── 3_custom_op.ipynb ├── 4_quant_lstm.ipynb ├── 4_quant_lstm_helper │ ├── function.py │ └── handler.py └── README.md ├── pyproject.toml ├── setup.cfg ├── setup.py ├── src └── qonnx │ ├── __init__.py │ ├── analysis │ ├── __init__.py │ ├── base.py │ ├── inference_cost.py │ ├── tensor_stats.py │ └── topology.py │ ├── converters │ ├── __init__.py │ ├── keras.py │ └── qkeras │ │ ├── __init__.py │ │ ├── onnx.py │ │ ├── qlayers.py │ │ └── quantizers.py │ ├── core │ ├── __init__.py │ ├── data_layout.py │ ├── datatype.py │ ├── execute_custom_node.py │ ├── modelwrapper.py │ └── onnx_exec.py │ ├── custom_op │ ├── __init__.py │ ├── base.py │ ├── channels_last │ │ ├── __init__.py │ │ ├── base_wrapped_op.py │ │ ├── batch_normalization.py │ │ ├── conv.py │ │ └── max_pool.py │ ├── general │ │ ├── __init__.py │ │ ├── bipolar_quant.py │ │ ├── debugmarker.py │ │ ├── floatquant.py │ │ ├── genericpartition.py │ │ ├── im2col.py │ │ ├── intquant.py │ │ ├── maxpoolnhwc.py │ │ ├── multithreshold.py │ │ ├── quant.py │ │ ├── quantavgpool2d.py │ │ ├── trunc.py │ │ └── xnorpopcount.py │ └── registry.py │ ├── data │ ├── __init__.py │ └── onnx │ │ ├── bsd300x3-espcn │ │ ├── nn_resize │ │ │ ├── float_model.onnx │ │ │ └── quant_model.onnx │ │ ├── subpixel │ │ │ ├── float_model.onnx │ │ │ └── quant_model.onnx │ │ └── test_data │ │ │ └── input_0.pb │ │ ├── eltwise_chanlast_testcase.onnx │ │ ├── floatquant_exec │ │ ├── README.md │ │ ├── qonnx_act_weight_fp8.onnx │ │ └── test_data │ │ │ ├── activation.npz │ │ │ ├── input.npy │ │ │ └── output.npy │ │ ├── matmul_update │ │ └── sdp.onnx │ │ ├── mnist-conv │ │ ├── README.md │ │ ├── model.onnx │ │ └── test_data_set_0 │ │ │ ├── input_0.pb │ │ │ └── output_0.pb │ │ └── residual_block_clean.onnx │ ├── transformation │ ├── __init__.py │ ├── base.py │ ├── batchnorm_to_affine.py │ ├── bipolar_to_xnor.py │ ├── change_3d_tensors_to_4d.py │ ├── change_batchsize.py │ ├── change_datalayout.py │ ├── channels_last.py │ ├── create_generic_partitions.py │ ├── double_to_single_float.py │ ├── expose_intermediate.py │ ├── extend_partition.py │ ├── extract_conv_bias.py │ ├── extract_quant_scale_zeropt.py │ ├── fold_constants.py │ ├── gemm_to_matmul.py │ ├── general.py │ ├── infer_data_layouts.py │ ├── infer_datatypes.py │ ├── infer_shapes.py │ ├── insert.py │ ├── insert_topk.py │ ├── lower_convs_to_matmul.py │ ├── make_input_chanlast.py │ ├── merge_onnx_models.py │ ├── pruning.py │ ├── qcdq_to_qonnx.py │ ├── qonnx_to_qcdq.py │ ├── quant_constant_folding.py │ ├── quantize_graph.py │ ├── rebalance_conv.py │ ├── remove.py │ ├── resize_conv_to_deconv.py │ └── subpixel_to_deconv.py │ └── util │ ├── __init__.py │ ├── basic.py │ ├── cleanup.py │ ├── config.py │ ├── convert.py │ ├── exec_qonnx.py │ ├── inference_cost.py │ ├── onnx.py │ ├── prune_channels.py │ ├── random_reseed.py │ ├── range_analysis.py │ ├── test.py │ └── to_channels_last.py ├── tests ├── analysis │ ├── test_inference_cost.py │ ├── test_inference_cost_breakdown.py │ ├── test_is_linear.py │ ├── test_matmul_mac_cost.py │ ├── test_range_analysis.py │ └── test_topology_checks.py ├── conftest.py ├── core │ ├── test_basic_onnx_exec.py │ ├── test_custom_onnx_exec.py │ ├── test_datatypes.py │ ├── test_mixed_onnx_exec.py │ └── test_modelwrapper.py ├── custom_op │ ├── test_attr.py │ ├── test_floatquant.py │ ├── test_im2col.py │ ├── test_multithreshold.py │ ├── test_runding_mode.py │ └── test_xnorpopcountmatmul.py ├── keras │ └── test_keras_convert.py ├── test_dummy.py ├── transformation │ ├── test_4d_conversion.py │ ├── test_batchnorm_to_affine.py │ ├── test_change_batchsize.py │ ├── test_change_datalayout.py │ ├── test_channelslast.py │ ├── test_channelslast_eltwise.py │ ├── test_channelslast_residual.py │ ├── test_conv_lowering.py │ ├── test_expose_intermediate.py │ ├── test_extend_partition.py │ ├── test_extract_quant_scale_zeropt.py │ ├── test_fold_constants.py │ ├── test_general_transformation.py │ ├── test_generic_partitioning.py │ ├── test_infer_data_layouts.py │ ├── test_infer_datatypes.py │ ├── test_infer_shapes.py │ ├── test_insert_identity.py │ ├── test_make_input_chanlast.py │ ├── test_merge_onnx_models.py │ ├── test_nn_resize_to_deconv.py │ ├── test_nodelocal_transform.py │ ├── test_pruning.py │ ├── test_qcdq_to_qonnx.py │ ├── test_qonnx_cleanup.py │ ├── test_qonnx_to_qcdq.py │ ├── test_quant_constant_folding.py │ ├── test_quantize_graph.py │ ├── test_rebalance_conv.py │ ├── test_remove_identity_ops.py │ ├── test_remove_unused.py │ ├── test_renaming.py │ ├── test_sort_commutative_inputs_initializer_last.py │ ├── test_sort_graph.py │ ├── test_subpixel_to_deconv.py │ └── test_topk_insert.py └── util │ ├── test_gen_finn_dt_tensor.py │ ├── test_matvec_range.py │ ├── test_padding.py │ └── test_shape_utils.py └── tox.ini /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Something isn't working as expected 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | ## Prerequisites 12 | Please make sure to check off these prerequisites before submitting a bug report. 13 | - [ ] Test that the bug appears on the current version of the main branch. Make sure to include the commit hash of the commit you checked out. 14 | - [ ] Check that the issue hasn't already been reported, by checking the currently open issues. 15 | - [ ] If there are steps to reproduce the problem, make sure to write them down below. 16 | - [ ] If relevant, please include the ONNX files, which were created directly before and/or after the bug. 17 | 18 | ## Quick summary 19 | Please give a brief and concise description of the bug. 20 | 21 | ## Details 22 | Please add to the following sections to describe the bug as accurately as possible. 23 | 24 | ### Steps to Reproduce 25 | Add what needs to be done to reproduce the bug. Add code examples where useful 26 | and make sure to include the resulting ONNX files, and the commit hash you are working on. 27 | 28 | 1. Clone the qonnx repository 29 | 2. Checkout the main branch, with commit hash: [...] 30 | 3. Run transformation [...] on ONNX file [...] 31 | 4. [Further steps ...] 32 | 33 | ### Expected behavior 34 | Please add a brief description of what you expected to happen. 35 | 36 | ### Actual behavior 37 | Describe what actually happens instead. 38 | 39 | ## Optional 40 | 41 | ### Possible fix 42 | If you already know where the issue stems from, or you have a hint please let us know. 43 | 44 | ### Additional context 45 | Add any other context about the problem here. 46 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Talk and engage with the comunity 4 | url: https://github.com/fastmachinelearning/qonnx/discussions/categories/general 5 | about: Check out the GitHub discusisons page for QONNX. This is the best way to get in touch with us. In particular, if you have a question about QONNX or a general problem that is likely not a bug. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for QONNX 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Prerequisites 11 | Please talk to us before creating a new feature request. So that you can check that the idea is not already in active development. 12 | 13 | You can present your idea over here at the GitHub discussions page for QONNX: https://github.com/fastmachinelearning/qonnx/discussions/categories/ideas 14 | 15 | Even if an idea is already being worked on you can still create a feature request, 16 | if you would like to open a discussion about the feature or want to contribute to it. 17 | 18 | ## Details 19 | Please add to the following sections to describe the feature as accurately as possible. 20 | 21 | ### New behavior 22 | Please add a brief and concise description of what you would like to happen in QONNX in the future. 23 | 24 | ### Motivation 25 | Please tell us why this feature is important to the community. 26 | 27 | ### Parts of QONNX being affected 28 | Please describe which parts of QONNX would be affected by this feature. 29 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: [ main, dev ] 7 | push: 8 | branches: [ main, dev ] 9 | 10 | 11 | jobs: 12 | docs: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v1 16 | - uses: ammaraskar/sphinx-action@master 17 | with: 18 | docs-folder: "docs/" 19 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: Run pre-commit 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: [ main, dev ] 7 | push: 8 | branches: [ main, dev ] 9 | 10 | jobs: 11 | lint: 12 | name: Lint PR or Push to DEV 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | python-version: ["3.10"] 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v5 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Run Lint 29 | uses: pre-commit-ci/lite-action@v1.1.0 30 | if: always() 31 | -------------------------------------------------------------------------------- /.github/workflows/pypi-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when launched 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload to PyPI 5 | 6 | on: workflow_dispatch 7 | 8 | jobs: 9 | deploy: 10 | 11 | runs-on: ubuntu-latest 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | python-version: ["3.10"] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install setuptools wheel twine 27 | - name: Build and publish 28 | env: 29 | TWINE_USERNAME: __token__ 30 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 31 | run: | 32 | python setup.py sdist bdist_wheel 33 | twine upload dist/* 34 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: [ main, dev ] 7 | push: 8 | branches: [ main, dev ] 9 | 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: ['3.10'] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install -e .[testing,qkeras,brevitas] 30 | 31 | - name: Run tests 32 | run: | 33 | pytest -n auto --verbose 34 | -------------------------------------------------------------------------------- /.github/workflows/testpypi-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when launched 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload to TestPyPI 5 | 6 | on: workflow_dispatch 7 | 8 | jobs: 9 | deploy: 10 | 11 | runs-on: ubuntu-latest 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | python-version: ["3.10"] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install setuptools wheel twine 27 | - name: Build and publish 28 | env: 29 | TWINE_USERNAME: __token__ 30 | TWINE_PASSWORD: ${{ secrets.TESTPYPI_API_TOKEN }} 31 | run: | 32 | python setup.py sdist bdist_wheel 33 | twine upload --repository testpypi dist/* 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | docs/api/* 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 99 | __pypackages__/ 100 | 101 | # Celery stuff 102 | celerybeat-schedule 103 | celerybeat.pid 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | 135 | # pytype static type analyzer 136 | .pytype/ 137 | 138 | # Cython debug symbols 139 | cython_debug/ 140 | 141 | # IDE stuff 142 | .vscode 143 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | line_length=125 3 | indent=' ' 4 | skip=.tox,.venv,build,dist 5 | known_standard_library=setuptools,pkg_resources 6 | known_test=pytest 7 | known_first_party=finn,hls4ml 8 | sections=FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 9 | default_section=THIRDPARTY 10 | multi_line_output=3 11 | profile=black 12 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^docs/conf.py' 2 | 3 | default_language_version: 4 | python: python3.10 5 | 6 | repos: 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v4.4.0 9 | hooks: 10 | - id: trailing-whitespace 11 | - id: check-added-large-files 12 | - id: check-ast 13 | - id: check-json 14 | - id: check-merge-conflict 15 | - id: check-xml 16 | - id: check-yaml 17 | - id: debug-statements 18 | - id: end-of-file-fixer 19 | - id: requirements-txt-fixer 20 | - id: mixed-line-ending 21 | args: ['--fix=no'] 22 | 23 | - repo: https://github.com/PyCQA/isort 24 | rev: 5.12.0 25 | hooks: 26 | - id: isort 27 | 28 | - repo: https://github.com/psf/black 29 | rev: 23.1.0 30 | hooks: 31 | - id: black 32 | language_version: python3.10 33 | args: [--line-length=125] 34 | 35 | - repo: https://github.com/PyCQA/flake8 36 | rev: 6.0.0 37 | hooks: 38 | - id: flake8 39 | # black-compatible flake-8 config 40 | args: ['--max-line-length=125', # github viewer width 41 | '--extend-ignore=E203'] # E203 is not PEP8 compliant 42 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.10" 12 | 13 | # Build documentation in the docs/ directory with Sphinx 14 | sphinx: 15 | configuration: docs/conf.py 16 | 17 | # If using Sphinx, optionally build your docs in additional formats such as PDF 18 | # formats: 19 | # - pdf 20 | 21 | # Optionally declare the Python requirements required to build your docs 22 | python: 23 | install: 24 | - requirements: docs/requirements.txt 25 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributors 3 | ============ 4 | 5 | * Yaman Umuroglu @maltanar 6 | * Hendrik Borras @HenniOVP 7 | * Javier Duarte @jmduarte 8 | * Vladimir Loncar @vloncar 9 | * Sioni Summers @thesps 10 | * Jovan Mitrevski @jmitrevs 11 | * Ian Colbert @i-colbert 12 | * Jakoba Petri-Koenig @auphelia 13 | * Javier Campos @jicampos 14 | * Mirza Mrahorovic @mmrahorovic 15 | * @thephysicsboi 16 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: QONNX 6 | message: 'If you use this software, please cite it as below.' 7 | type: software 8 | doi: 10.5281/zenodo.7622236 9 | license: Apache-2.0 10 | date-released: 2022-06-24 11 | version: 0.1 12 | url: "https://github.com/fastmachinelearning/qonnx" 13 | authors: 14 | - given-names: Yaman 15 | family-names: Umuroglu 16 | affiliation: AMD 17 | orcid: 'https://orcid.org/0000-0002-3700-5935' 18 | - given-names: Hendrik 19 | family-names: Borras 20 | affiliation: Heidelberg University 21 | orcid: 'https://orcid.org/0000-0002-2411-2416' 22 | - given-names: Vladimir 23 | family-names: Loncar 24 | affiliation: MIT 25 | orcid: 'https://orcid.org/0000-0003-3651-0232' 26 | - given-names: Sioni 27 | family-names: Summers 28 | affiliation: CERN 29 | orcid: 'https://orcid.org/0000-0003-4244-2061' 30 | - given-names: Javier 31 | family-names: Duarte 32 | affiliation: UC San Diego 33 | orcid: 'https://orcid.org/0000-0002-5076-7096' 34 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | AUTODOCDIR = api 11 | 12 | # User-friendly check for sphinx-build 13 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $?), 1) 14 | $(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/") 15 | endif 16 | 17 | .PHONY: help clean Makefile 18 | 19 | # Put it first so that "make" without argument is like "make help". 20 | help: 21 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | 23 | clean: 24 | rm -rf $(BUILDDIR)/* $(AUTODOCDIR) 25 | 26 | # Catch-all target: route all unknown targets to Sphinx using the new 27 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 28 | %: Makefile 29 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 30 | -------------------------------------------------------------------------------- /docs/_static/.gitignore: -------------------------------------------------------------------------------- 1 | # Empty directory 2 | -------------------------------------------------------------------------------- /docs/_templates/apidoc/module.rst_t: -------------------------------------------------------------------------------- 1 | {%- if show_headings %} 2 | {{- [basename, "(module)"] | join(' ') | e | heading }} 3 | 4 | {% endif -%} 5 | .. automodule:: {{ qualname }} 6 | {%- for option in automodule_options %} 7 | :{{ option }}: 8 | {%- endfor %} 9 | -------------------------------------------------------------------------------- /docs/_templates/apidoc/package.rst_t: -------------------------------------------------------------------------------- 1 | {%- macro automodule(modname, options) -%} 2 | .. automodule:: {{ modname }} 3 | {%- for option in options %} 4 | :{{ option }}: 5 | {%- endfor %} 6 | {%- endmacro %} 7 | 8 | {%- macro toctree(docnames) -%} 9 | .. toctree:: 10 | :maxdepth: {{ maxdepth }} 11 | {% for docname in docnames %} 12 | {{ docname }} 13 | {%- endfor %} 14 | {%- endmacro %} 15 | 16 | {%- if is_namespace %} 17 | {{- [pkgname, "(namespace)"] | join(" ") | e | heading }} 18 | {% else %} 19 | {{- [pkgname, "(package)"] | join(" ") | e | heading }} 20 | {% endif %} 21 | 22 | {%- if modulefirst and not is_namespace %} 23 | {{ automodule(pkgname, automodule_options) }} 24 | {% endif %} 25 | 26 | {%- if subpackages %} 27 | Subpackages: 28 | 29 | {{ toctree(subpackages) }} 30 | {% endif %} 31 | 32 | {%- if submodules %} 33 | Submodules: 34 | 35 | {% if separatemodules %} 36 | {{ toctree(submodules) }} 37 | {% else %} 38 | {%- for submodule in submodules %} 39 | {% if show_headings %} 40 | {{- [submodule, "(module)"] | join(" ") | e | heading(2) }} 41 | {% endif %} 42 | {{ automodule(submodule, automodule_options) }} 43 | {% endfor %} 44 | {%- endif %} 45 | {%- endif %} 46 | 47 | {%- if not modulefirst and not is_namespace %} 48 | Module contents: 49 | 50 | {{ automodule(pkgname, automodule_options) }} 51 | {% endif %} 52 | -------------------------------------------------------------------------------- /docs/_templates/apidoc/toc.rst_t: -------------------------------------------------------------------------------- 1 | {{ header | heading }} 2 | 3 | The QONNX compiler infrastructure sources are divided into different modules. They are listed below. 4 | 5 | .. toctree:: 6 | :maxdepth: {{ maxdepth }} 7 | {% for docname in docnames %} 8 | {{ docname }} 9 | {%- endfor %} 10 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. _authors: 2 | .. include:: ../AUTHORS.rst 3 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changes: 2 | -------------------------------------------------------------------------------- /docs/genindex.rst: -------------------------------------------------------------------------------- 1 | .. This file is a placeholder and will be replaced 2 | 3 | ***** 4 | Index 5 | ***** 6 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | QONNX 3 | ====== 4 | 5 | .. note:: **QONNX** is currently under active development. APIs will likely change. 6 | 7 | QONNX (Quantized ONNX) introduces three new custom operators -- `Quant `_, `BipolarQuant `_ and `Trunc `_ -- in order to represent arbitrary-precision uniform quantization in ONNX. This enables: 8 | 9 | * Representation of binary, ternary, 3-bit, 4-bit, 6-bit or any other quantization. 10 | 11 | * Quantization is an operator itself, and can be applied to any parameter or layer input. 12 | 13 | * Flexible choices for scaling factor and zero-point granularity. 14 | 15 | * Quantized values are carried using standard `float` datatypes to remain ONNX protobuf-compatible. 16 | 17 | This repository contains a set of Python utilities to work with QONNX models, including but not limited to: 18 | 19 | * executing QONNX models for (slow) functional verification 20 | 21 | * shape inference, constant folding and other basic optimizations 22 | 23 | * summarizing the inference cost of a QONNX model in terms of mixed-precision MACs, parameter and activation volume 24 | 25 | * Python infrastructure for writing transformations and defining executable, shape-inferencable custom ops 26 | 27 | * (experimental) data layout conversion from standard ONNX NCHW to custom QONNX NHWC ops 28 | 29 | 30 | Quickstart 31 | ----------- 32 | 33 | Operator definitions 34 | +++++++++++++++++++++ 35 | 36 | * `Quant `_ for 2-to-arbitrary-bit quantization, with scaling and zero-point 37 | 38 | * `BipolarQuant `_ for 1-bit (bipolar) quantization, with scaling and zero-point 39 | 40 | * `Trunc `_ for truncating to a specified number of bits, with scaling and zero-point 41 | 42 | Installation 43 | +++++++++++++ 44 | 45 | Install latest release from PyPI: 46 | 47 | :: 48 | 49 | pip install qonnx 50 | 51 | 52 | Development 53 | ++++++++++++ 54 | 55 | Install in editable mode in a venv: 56 | 57 | :: 58 | 59 | git clone https://github.com/fastmachinelearning/qonnx 60 | cd qonnx 61 | virtualenv -p python3.10 venv 62 | source venv/bin/activate 63 | pip install -e .[testing, docs, notebooks] 64 | 65 | 66 | Test suite 67 | ++++++++++ 68 | 69 | Run entire test suite, parallelized across CPU cores: 70 | 71 | :: 72 | 73 | pytest -n auto --verbose 74 | 75 | 76 | 77 | Run a particular test and fall into pdb if it fails: 78 | 79 | :: 80 | 81 | pytest --pdb -k "test*extend*partition.py::test*extend*partition[extend_id1-2]" 82 | 83 | 84 | 85 | QONNX also uses GitHub actions to run the full test suite on PRs. 86 | 87 | .. toctree:: 88 | :maxdepth: 2 89 | :hidden: 90 | 91 | ONNX-Based Compiler Infrastructure 92 | Tutorials 93 | API 94 | License 95 | Contributors 96 | Index 97 | 98 | 99 | * :ref:`modindex` 100 | * :ref:`search` 101 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | .. _license: 2 | 3 | ======== 4 | License 5 | ======== 6 | 7 | .. include:: ../LICENSE 8 | -------------------------------------------------------------------------------- /docs/qonnx-comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/docs/qonnx-comparison.png -------------------------------------------------------------------------------- /docs/qonnx-custom-ops/bipolar_quant_op.md: -------------------------------------------------------------------------------- 1 | ### **BipolarQuant** 2 | 3 | Calculates the binary quantized values of one input data (Tensor) and produces one output data (Tensor). 4 | Additionally, takes one float as input, which define the scaling. 5 | 6 | #### Version 7 | 8 | This operator is not part of the ONNX standard and is not currently versioned. 9 | 10 | #### Attributes 11 | 12 |
13 |
14 | 15 | #### Inputs 16 | 17 |
18 |
X (differentiable) : tensor(float32)
19 |
input tensor to quantize
20 |
scale : float32
21 |
The scale factor
22 |
23 | 24 | 25 | #### Outputs 26 | 27 |
28 |
Y (differentiable) : tensor(float32)
29 |
Output tensor
30 |
31 | 32 | 33 | #### Examples 34 |
35 | BipolarQuant 36 | 37 | ```python 38 | from onnx import helper 39 | import numpy as np 40 | 41 | # Define node settings and input 42 | x = np.random.randn(100).astype(np.float32)*10. 43 | scale = np.array(1.) 44 | 45 | # Create node 46 | node = helper.make_node( 47 | 'BipolarQuant', 48 | domain='finn.custom_op.general', 49 | inputs=['x', 'scale'], 50 | outputs=['y'], 51 | ) 52 | 53 | # Execute the same settings with the reference implementation (quant) 54 | # See the sample implementation for more details on quant. 55 | output_ref = binary_quant(x, scale) 56 | 57 | # Execute node and compare 58 | expect(node, inputs=[x, scale], outputs=[output_ref], name='test_binary_quant') 59 | 60 | ``` 61 | 62 |
63 | 64 | 65 | #### Sample Implementation 66 | 67 |
68 | BipolarQuant 69 | 70 | ```python 71 | # SPDX-License-Identifier: Apache-2.0 72 | 73 | from __future__ import absolute_import 74 | from __future__ import division 75 | from __future__ import print_function 76 | from __future__ import unicode_literals 77 | 78 | import numpy as np 79 | 80 | def binary_quant(inp_tensor, scale): 81 | # Quantizing 82 | y_int = inp_tensor 83 | y_ones = np.ones(y_int.shape, dtype=y_int.dtype) 84 | y_int = np.where(y_int >= 0.0, y_ones, -y_ones) 85 | # Scaling 86 | out_tensor = y_int * scale 87 | 88 | return out_tensor 89 | 90 | ``` 91 | 92 |
93 | -------------------------------------------------------------------------------- /docs/qonnx-custom-ops/trunc_op.md: -------------------------------------------------------------------------------- 1 | ### **Trunc** 2 | 3 | Truncates the values of one input data (Tensor) at a specified bitwidth and produces one output data (Tensor). 4 | Additionally, takes four float tensors as input, which define the scale, zero-point, input bit-width and output bit-width of the quantization. 5 | The attribute rounding_mode defines how truncated values are rounded. 6 | 7 | #### Version 8 | 9 | This operator is not part of the ONNX standard and is not currently versioned. 10 | 11 | #### Attributes 12 | 13 |
14 |
rounding_mode : string (default is "FLOOR")
15 |
Defines how rounding should be applied during truncation. Currently available modes are: "ROUND", "CEIL" and "FLOOR". Here "ROUND" implies a round-to-even operation. Lowercase variants for the rounding mode string are also supported: "round", "ceil", "floor".
16 |
17 | 18 | #### Inputs 19 | 20 |
21 |
X (differentiable) : tensor(float32)
22 |
input tensor to truncate
23 |
scale : float32
24 |
The scale factor
25 |
zeropt : float32
26 |
The zero-point
27 |
in_bitwidth : int32
28 |
The number of bits used at the input of the truncation
29 |
out_bitwidth : int32
30 |
The number of bits used at the output of the truncation
31 |
32 | 33 | 34 | #### Outputs 35 | 36 |
37 |
Y (differentiable) : tensor(float32)
38 |
Output tensor
39 |
40 | 41 | 42 | #### Examples 43 |
44 | Trunc 45 | 46 | ```python 47 | from onnx import helper 48 | import numpy as np 49 | 50 | # Define node settings and input 51 | x = np.random.randn(100).astype(np.float32)*10. 52 | scale = np.array(1.) 53 | zeropt = np.array(0.) 54 | in_bitwidth = np.array(10) 55 | out_bitwidth = np.array(4) 56 | rounding_mode = "ROUND" 57 | 58 | # Create node 59 | node = helper.make_node( 60 | 'Trunc', 61 | domain='finn.custom_op.general', 62 | inputs=['x', 'scale', 'zeropt', 'in_bitwidth', 'out_bitwidth'], 63 | outputs=['y'], 64 | rounding_mode=rounding_mode, 65 | ) 66 | 67 | # Execute the same settings with the reference implementation (trunc) 68 | # See the sample implementation for more details on trunc. 69 | output_ref = trunc(inp_tensor, scale, zeropt, in_bitwidth, out_bitwidth, rounding_mode) 70 | 71 | # Execute node and compare 72 | expect(node, inputs=[x, scale, zeropt, bitwidth], outputs=[output_ref], name='test_trunc') 73 | 74 | ``` 75 | 76 |
77 | 78 | 79 | #### Sample Implementation 80 | 81 |
82 | Trunc 83 | 84 | ```python 85 | # SPDX-License-Identifier: Apache-2.0 86 | 87 | from __future__ import absolute_import 88 | from __future__ import division 89 | from __future__ import print_function 90 | from __future__ import unicode_literals 91 | 92 | import numpy as np 93 | 94 | def trunc(inp_tensor, scale, zeropt, input_bit_width, output_bit_width, rounding_mode): 95 | # Port of TruncIntQuant class from Brevitas: https://bit.ly/3wzIpTR 96 | 97 | # Scaling 98 | y = inp_tensor / scale 99 | y = y + zeropt 100 | # Rounding 101 | y = np.round(y) 102 | # Truncate 103 | trunc_bit_width = input_bit_width - output_bit_width 104 | trunc_scale = 2.0 ** trunc_bit_width 105 | y = y / trunc_scale 106 | 107 | # To int 108 | rounding_fx = resolve_rounding_mode(rounding_mode) 109 | y = rounding_fx(y) 110 | 111 | # Rescale 112 | y = y - zeropt 113 | y = y * scale 114 | 115 | return y 116 | 117 | def resolve_rounding_mode(mode_string): 118 | """Resolve the rounding mode string of Quant and Trunc ops 119 | to the corresponding numpy functions.""" 120 | if mode_string == "ROUND": 121 | return np.round 122 | elif mode_string == "CEIL": 123 | return np.ceil 124 | elif mode_string == "FLOOR": 125 | return np.floor 126 | else: 127 | raise ValueError(f"Could not resolve rounding mode called: {mode_string}") 128 | 129 | ``` 130 | 131 |
132 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. _readme: 2 | .. include:: ../README.rst 3 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | bitstring>=3.1.7 2 | clize==4.1.1 3 | importlib-metadata 4 | numpy==1.24.1 5 | onnx==1.11.0 6 | onnxruntime==1.11.1 7 | protobuf==3.20.1 8 | # Requirements file for ReadTheDocs, check .readthedocs.yml. 9 | # To build the module reference correctly, make sure every external package 10 | # under `install_requires` in `setup.cfg` is also listed here! 11 | sigtools==2.0.3 12 | sphinx==4.0.3 13 | sphinx_rtd_theme==1.1.1 14 | toposort==1.7.0 15 | -------------------------------------------------------------------------------- /docs/tutorials.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | ******************** 4 | Developer Tutorials 5 | ******************** 6 | 7 | The QONNX repository provides several Jupyter notebooks tutorials. 8 | These can help new developers get up-to-speed on QONNX internals. 9 | All Jupyter notebooks can be found under the `notebook folder `_. 10 | 11 | 12 | * 0_how_to_work_with_onnx 13 | 14 | * This notebook can help you to learn how to create and manipulate a simple ONNX model, also by using QONNX 15 | 16 | * 1_custom_analysis_pass 17 | 18 | * Explains what an analysis pass is and how to write one for QONNX. 19 | 20 | * 2_custom_transformation_pass 21 | 22 | * Explains what a transformation pass is and how to write one for QONNX. 23 | 24 | * 3_custom_op 25 | 26 | * Explains the basics of QONNX custom ops and how to define a new one. 27 | -------------------------------------------------------------------------------- /notebooks/4_quant_lstm_helper/handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | import torch 5 | from abc import ABC 6 | from brevitas.export.common.handler.qcdq import ( 7 | DQMixin, 8 | QCDQActQuantProxyHandlerMixin, 9 | QCDQBiasQuantProxyHandlerMixin, 10 | QCDQDecoupledWeightQuantProxyHandlerMixin, 11 | QCDQMixin, 12 | QCDQTruncQuantProxyHandlerMixin, 13 | QCDQWeightQuantProxyHandlerMixin, 14 | ) 15 | from brevitas.export.onnx.handler import ONNXBaseHandler, QuantLSTMLayerHandler 16 | 17 | from ..function import BrevitasQuantLSTMCellFn, DequantizeLinearFn, IntClipFn, QuantizeLinearFn 18 | 19 | 20 | class StdDQONNXMixin(DQMixin, ABC): 21 | def dequantize_fn(self, x, scale, zero_point, axis): 22 | return DequantizeLinearFn.apply(x, scale, zero_point, axis) 23 | 24 | @property 25 | def flatten_dequantize_params(self): 26 | return True 27 | 28 | @property 29 | def itemize_quantize_scalar_params(self): 30 | return False 31 | 32 | 33 | class StdQCDQONNXMixin(QCDQMixin, StdDQONNXMixin, ABC): 34 | @property 35 | def clip_over_integers(self): 36 | return True 37 | 38 | @classmethod 39 | def int8_dtype(cls): 40 | return torch.int8 41 | 42 | @classmethod 43 | def uint8_dtype(cls): 44 | return torch.uint8 45 | 46 | @classmethod 47 | def int32_dtype(cls): 48 | return torch.int32 49 | 50 | def validate(self, module): 51 | self.validate_8b_bit_width(module.bit_width(), le_then=True) 52 | assert module.bit_width() > 1.0, "Binary quant not supported" 53 | assert module.rounding_mode.upper() == "ROUND", "Only round to nearest even supported" 54 | 55 | def quantize_fn(self, x, scale, zero_point, dtype, axis): 56 | return QuantizeLinearFn.apply(x, scale, zero_point, dtype, axis) 57 | 58 | def clip_fn(self, x, min_val, max_val): 59 | return IntClipFn.apply(x, min_val, max_val) 60 | 61 | 62 | class StdQCDQONNXWeightQuantProxyHandler(StdQCDQONNXMixin, QCDQWeightQuantProxyHandlerMixin, ONNXBaseHandler): 63 | pass 64 | 65 | 66 | class StdQCDQONNXDecoupledWeightQuantProxyHandler( 67 | StdQCDQONNXMixin, QCDQDecoupledWeightQuantProxyHandlerMixin, ONNXBaseHandler 68 | ): 69 | pass 70 | 71 | 72 | class StdQCDQONNXActQuantProxyHandler(StdQCDQONNXMixin, QCDQActQuantProxyHandlerMixin, ONNXBaseHandler): 73 | pass 74 | 75 | 76 | class StdQCDQONNXBiasQuantProxyHandler(StdDQONNXMixin, QCDQBiasQuantProxyHandlerMixin, ONNXBaseHandler): 77 | pass 78 | 79 | 80 | class StdQCDQONNXTruncQuantProxyHandler(StdQCDQONNXMixin, QCDQTruncQuantProxyHandlerMixin, ONNXBaseHandler): 81 | pass 82 | 83 | 84 | class StdQCDQONNXQuantLSTMLayerHandler(QuantLSTMLayerHandler): 85 | def quantized_cell_symbolic_execution( 86 | self, 87 | quant_input, 88 | quant_hidden_state, 89 | quant_cell_state, 90 | quant_weight_ii, 91 | quant_weight_if, 92 | quant_weight_ic, 93 | quant_weight_io, 94 | quant_weight_hi, 95 | quant_weight_hf, 96 | quant_weight_hc, 97 | quant_weight_ho, 98 | quant_bias_input, 99 | quant_bias_forget, 100 | quant_bias_cell, 101 | quant_bias_output, 102 | ): 103 | return BrevitasQuantLSTMCellFn.apply( 104 | quant_input, 105 | quant_hidden_state, 106 | quant_cell_state, 107 | quant_weight_ii, 108 | quant_weight_if, 109 | quant_weight_ic, 110 | quant_weight_io, 111 | quant_weight_hi, 112 | quant_weight_hf, 113 | quant_weight_hc, 114 | quant_weight_ho, 115 | quant_bias_input, 116 | quant_bias_forget, 117 | quant_bias_cell, 118 | quant_bias_output, 119 | *self.symbolic_kwargs.values() 120 | ) 121 | # raise RuntimeError( 122 | # "Quantized LSTM cell is not supported for ONNX QCDQ " 123 | # "(weights only quantization is). Use export_qonnx.") 124 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # QONNX Tutorials 2 | 3 | This folder contains a collection of Jupyter Notebook tutorials that cover QONNX internals, intended for new developers. 4 | We recommend installing QONNX in development mode with notebooks option enabled, as follows: 5 | 6 | ``` 7 | git clone https://github.com/fastmachinelearning/qonnx 8 | cd qonnx 9 | virtualenv -p python3.10 venv 10 | source venv/bin/activate 11 | pip install -e .[testing,docs,notebooks] 12 | cd notebooks 13 | jupyter notebook . 14 | # follow the link printed in the console to bring up Jupyter 15 | ``` 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | # AVOID CHANGING REQUIRES: IT WILL BE UPDATED BY PYSCAFFOLD! 3 | requires = ["setuptools>=46.1.0", "setuptools_scm[toml]>=5", "wheel"] 4 | build-backend = "setuptools.build_meta" 5 | 6 | [tool.setuptools_scm] 7 | # See configuration details in https://github.com/pypa/setuptools_scm 8 | version_scheme = "no-guess-dev" 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup file for qonnx. 3 | Use setup.cfg to configure your project. 4 | 5 | This file was generated with PyScaffold 4.0.2. 6 | PyScaffold helps you to put up the scaffold of your new Python project. 7 | Learn more under: https://pyscaffold.org/ 8 | """ 9 | from setuptools import setup 10 | 11 | if __name__ == "__main__": 12 | try: 13 | setup( 14 | use_scm_version={"version_scheme": "no-guess-dev"}, 15 | setup_requires=["setuptools_scm"], 16 | ) 17 | except: # noqa 18 | print( 19 | "\n\nAn error occurred while building the project, " 20 | "please ensure you have the most updated version of setuptools, " 21 | "setuptools_scm and wheel with:\n" 22 | " pip install -U setuptools setuptools_scm wheel\n\n" 23 | ) 24 | raise 25 | -------------------------------------------------------------------------------- /src/qonnx/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/__init__.py -------------------------------------------------------------------------------- /src/qonnx/analysis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/analysis/__init__.py -------------------------------------------------------------------------------- /src/qonnx/analysis/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | 30 | """ 31 | How to write an analysis pass for QONNX 32 | ---------------------------------------- 33 | 34 | An analysis pass traverses the graph structure and produces information about 35 | certain properties. The convention is to take in a ModelWrapper, and return 36 | a dictionary of named properties that the analysis extracts. 37 | """ 38 | -------------------------------------------------------------------------------- /src/qonnx/converters/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .keras import from_keras # noqa: F401 3 | except ImportError: 4 | pass 5 | -------------------------------------------------------------------------------- /src/qonnx/converters/qkeras/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/converters/qkeras/__init__.py -------------------------------------------------------------------------------- /src/qonnx/converters/qkeras/quantizers.py: -------------------------------------------------------------------------------- 1 | import qkeras 2 | import six 3 | 4 | 5 | def get_quant_params(tensor, qkeras_quantizer): 6 | if isinstance(qkeras_quantizer, str): 7 | qkeras_quantizer = qkeras.get_quantizer(qkeras_quantizer) 8 | 9 | return handler_map[qkeras_quantizer.__class__.__name__](tensor, qkeras_quantizer) 10 | 11 | 12 | def _get_scale_from_alpha(tensor, quantizer): 13 | alpha = quantizer.get_config()["alpha"] 14 | 15 | if alpha is None: 16 | return 1 17 | elif isinstance(alpha, six.string_types): 18 | raise Exception(f"Cannot parse alpha = {alpha}.") 19 | return 1 20 | else: 21 | return alpha 22 | 23 | 24 | def _get_quantizer_scale(tensor, quantizer): 25 | # call the quantizer on the tensor to get its scale 26 | import numpy as np 27 | 28 | quantizer(np.array(tensor).astype(np.float32)) 29 | return quantizer.scale 30 | 31 | 32 | def convert_quantized_bits(tensor, quantizer): 33 | config = quantizer.get_config() 34 | signed = int(config["keep_negative"]) 35 | narrow = int(config["symmetric"]) 36 | qscale = _get_quantizer_scale(tensor, quantizer) 37 | assert qscale == 1, "Non-unity alpha is not yet supported" 38 | scale = 1.0 / 2 ** (int(config["bits"]) - int(config["integer"] + signed)) 39 | zero_point = 0 40 | bit_width = int(config["bits"]) 41 | rounding_mode = "ROUND" 42 | 43 | settings = { 44 | "attributes": {"signed": signed, "narrow": narrow, "rounding_mode": rounding_mode}, 45 | "inputs": {"scale": scale, "zero_point": zero_point, "bit_width": bit_width}, 46 | } 47 | return settings 48 | 49 | 50 | def convert_quantized_relu(tensor, quantizer): 51 | config = quantizer.get_config() 52 | 53 | signed = int(config["negative_slope"] != 0.0) 54 | narrow = int(False) 55 | scale = 1.0 / 2 ** (int(config["bits"]) - int(config["integer"] + signed)) 56 | zero_point = 0 57 | bit_width = int(config["bits"]) 58 | rounding_mode = "ROUND" 59 | 60 | settings = { 61 | "attributes": {"signed": signed, "narrow": narrow, "rounding_mode": rounding_mode}, 62 | "inputs": {"scale": scale, "zero_point": zero_point, "bit_width": bit_width}, 63 | } 64 | return settings 65 | 66 | 67 | def convert_binary(tensor, quantizer): 68 | signed = 1 69 | narrow = 1 70 | qscale = _get_quantizer_scale(tensor, quantizer) 71 | assert qscale == 1, "binary - non-unity alpha is not yet supported" 72 | scale = 1 73 | zero_point = 0 74 | bit_width = 1 75 | rounding_mode = "ROUND" 76 | 77 | settings = { 78 | "attributes": {"signed": signed, "narrow": narrow, "rounding_mode": rounding_mode}, 79 | "inputs": {"scale": scale, "zero_point": zero_point, "bit_width": bit_width}, 80 | } 81 | return settings 82 | 83 | 84 | def convert_ternary(tensor, quantizer): 85 | config = quantizer.get_config() 86 | signed = 1 87 | narrow = 1 88 | qscale = _get_quantizer_scale(tensor, quantizer) 89 | assert qscale == 1, "ternary - non-unity alpha is not yet supported" 90 | # qkeras ternary quantizer has threshold parameter to change rounding point 91 | # here we could scale such that normal 'ROUND' op gives the same result, but doesn't work with re-scaling 92 | t = config["threshold"] 93 | if t is None: 94 | ternary = qkeras.ternary() 95 | t = ternary.default_threshold 96 | assert t == 0.5, "ternary - only threshold 0.5 is supported" 97 | # note that if assertions fail, Quant node is not inserted, but model is still converted 98 | # this seems to be unexpected behavior 99 | scale = 1.0 100 | zero_point = 0 101 | bit_width = 2 102 | rounding_mode = "ROUND" 103 | 104 | settings = { 105 | "attributes": {"signed": signed, "narrow": narrow, "rounding_mode": rounding_mode}, 106 | "inputs": {"scale": scale, "zero_point": zero_point, "bit_width": bit_width}, 107 | } 108 | return settings 109 | 110 | 111 | handler_map = { 112 | "quantized_bits": convert_quantized_bits, 113 | "quantized_relu": convert_quantized_relu, 114 | "binary": convert_binary, 115 | "ternary": convert_ternary, 116 | } 117 | -------------------------------------------------------------------------------- /src/qonnx/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/core/__init__.py -------------------------------------------------------------------------------- /src/qonnx/core/data_layout.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | # predefined lists of strings to have a cannonical way of expresing data layout 30 | # annotations 31 | 32 | NHWC = ["N", "H", "W", "C"] 33 | NCHW = ["N", "C", "H", "W"] 34 | NCW = ["N", "C", "W"] 35 | NWC = ["N", "W", "C"] 36 | NC = ["N", "C"] 37 | UNKNOWN = [] 38 | 39 | 40 | def is_channels_last(layout): 41 | return layout[-1] == "C" 42 | 43 | 44 | def get_channels_last_layout_for_ndims(ndims): 45 | return {4: NHWC, 3: NWC, 2: NC}[ndims] 46 | 47 | 48 | def get_channels_first_layout_for_ndims(ndims): 49 | return {4: NCHW, 3: NCW, 2: NC}[ndims] 50 | -------------------------------------------------------------------------------- /src/qonnx/core/execute_custom_node.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import qonnx.custom_op.registry as registry 30 | from qonnx.util.basic import get_preferred_onnx_opset 31 | 32 | 33 | def execute_custom_node(node, context, graph, onnx_opset_version=get_preferred_onnx_opset()): 34 | """Call custom implementation to execute a single custom node. 35 | Input/output provided via context.""" 36 | op_type = node.op_type 37 | try: 38 | # lookup op_type in registry of CustomOps 39 | inst = registry.getCustomOp(node, onnx_opset_version=onnx_opset_version) 40 | inst.execute_node(context, graph) 41 | except KeyError: 42 | # exception if op_type is not supported 43 | raise Exception("Custom op_type %s is currently not supported." % op_type) 44 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/custom_op/__init__.py -------------------------------------------------------------------------------- /src/qonnx/custom_op/channels_last/__init__.py: -------------------------------------------------------------------------------- 1 | from qonnx.custom_op.channels_last.batch_normalization import BatchNormalization 2 | from qonnx.custom_op.channels_last.conv import Conv 3 | from qonnx.custom_op.channels_last.max_pool import MaxPool 4 | 5 | custom_op = dict() 6 | 7 | custom_op["Conv"] = Conv 8 | custom_op["MaxPool"] = MaxPool 9 | custom_op["BatchNormalization"] = BatchNormalization 10 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.custom_op.general.bipolar_quant import BipolarQuant 30 | from qonnx.custom_op.general.debugmarker import DebugMarker 31 | from qonnx.custom_op.general.floatquant import FloatQuant 32 | from qonnx.custom_op.general.genericpartition import GenericPartition 33 | from qonnx.custom_op.general.im2col import Im2Col 34 | from qonnx.custom_op.general.intquant import IntQuant 35 | from qonnx.custom_op.general.maxpoolnhwc import MaxPoolNHWC 36 | from qonnx.custom_op.general.multithreshold import MultiThreshold 37 | from qonnx.custom_op.general.quantavgpool2d import QuantAvgPool2d 38 | from qonnx.custom_op.general.trunc import Trunc 39 | from qonnx.custom_op.general.xnorpopcount import XnorPopcountMatMul 40 | 41 | custom_op = dict() 42 | 43 | custom_op["DebugMarker"] = DebugMarker 44 | custom_op["QuantAvgPool2d"] = QuantAvgPool2d 45 | custom_op["MaxPoolNHWC"] = MaxPoolNHWC 46 | custom_op["GenericPartition"] = GenericPartition 47 | custom_op["MultiThreshold"] = MultiThreshold 48 | custom_op["XnorPopcountMatMul"] = XnorPopcountMatMul 49 | custom_op["Im2Col"] = Im2Col 50 | custom_op["IntQuant"] = IntQuant 51 | custom_op["Quant"] = IntQuant 52 | custom_op["Trunc"] = Trunc 53 | custom_op["BipolarQuant"] = BipolarQuant 54 | custom_op["FloatQuant"] = FloatQuant 55 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/bipolar_quant.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.helper as helper 31 | 32 | from qonnx.core.datatype import DataType 33 | from qonnx.custom_op.base import CustomOp 34 | 35 | 36 | def binary_quant(inp_tensor, scale): 37 | # ToDo: Update this link, when the PR gets merged 38 | # Port of IntQuant class from Brevitas: https://bit.ly/2S6qvZJ 39 | 40 | # Quantizing 41 | y_int = inp_tensor 42 | y_ones = np.ones(y_int.shape, dtype=y_int.dtype) 43 | y_int = np.where(y_int >= 0.0, y_ones, -y_ones) 44 | # Scaling 45 | out_tensor = y_int * scale 46 | 47 | return out_tensor 48 | 49 | 50 | class BipolarQuant(CustomOp): 51 | """Bipolar quantization operation for QONNX. Takes four inputs: 52 | - input tensor to quantize 53 | - the scale 54 | 55 | The output is a tensor of the same shape as the input tensor, with quantized 56 | values. 57 | """ 58 | 59 | def get_nodeattr_types(self): 60 | return dict() 61 | 62 | def make_shape_compatible_op(self, model): 63 | node = self.onnx_node 64 | return helper.make_node("Identity", [node.input[0]], [node.output[0]]) 65 | 66 | def get_integer_datatype(self, model): 67 | return DataType["BIPOLAR"] 68 | 69 | def get_output_dtype(self, model): 70 | node = self.onnx_node 71 | # scale must be read from initializers 72 | scale = model.get_initializer(node.input[1]) 73 | # determine the QONNX DataType 74 | unit_scale = np.all(scale == 1.0) 75 | if unit_scale: 76 | finn_dt = self.get_integer_datatype(model) 77 | else: 78 | finn_dt = DataType["FLOAT32"] 79 | 80 | return finn_dt 81 | 82 | def infer_node_datatype(self, model): 83 | try: 84 | finn_dt = self.get_output_dtype(model) 85 | except AssertionError: 86 | finn_dt = DataType["FLOAT32"] 87 | node = self.onnx_node 88 | model.set_tensor_datatype(node.output[0], finn_dt) 89 | 90 | def execute_node(self, context, graph): 91 | node = self.onnx_node 92 | # save inputs 93 | inp_tensor = context[node.input[0]] 94 | scale = context[node.input[1]] 95 | # calculate output 96 | ret = binary_quant(inp_tensor, scale) 97 | # ensure output is ndarray (even if 0d) 98 | # since numpy silently flattens 0d arrays to scalars 99 | # more: https://github.com/numpy/numpy/issues/13105 100 | if not isinstance(ret, np.ndarray): 101 | ret = np.asarray(ret, dtype=np.float32) 102 | # set context according to output name 103 | context[node.output[0]] = ret 104 | 105 | def verify_node(self): 106 | pass 107 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/debugmarker.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from onnx import helper 30 | 31 | from qonnx.custom_op.base import CustomOp 32 | 33 | 34 | class DebugMarker(CustomOp): 35 | def get_nodeattr_types(self): 36 | return {"export_debug_name": ("s", True, "")} 37 | 38 | def make_shape_compatible_op(self, model): 39 | node = self.onnx_node 40 | return helper.make_node("Identity", [node.input[0]], [node.output[0]]) 41 | 42 | def infer_node_datatype(self, model): 43 | node = self.onnx_node 44 | # data type stays the same 45 | dtype = model.get_tensor_datatype(node.input[0]) 46 | model.set_tensor_datatype(node.output[0], dtype) 47 | # create quantization annotation for debug marker 48 | model.set_tensor_datatype(self.get_nodeattr("export_debug_name"), dtype) 49 | 50 | def execute_node(self, context, graph): 51 | node = self.onnx_node 52 | inp_name = node.input[0] 53 | out_name = node.output[0] 54 | inp = context[inp_name] 55 | context[out_name] = inp 56 | # insert debug marker output as separate tensor 57 | context[self.get_nodeattr("export_debug_name")] = inp 58 | 59 | def verify_node(self): 60 | info_messages = [] 61 | return info_messages 62 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/genericpartition.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.core.modelwrapper import ModelWrapper 30 | from qonnx.core.onnx_exec import execute_onnx 31 | from qonnx.custom_op.base import CustomOp 32 | 33 | 34 | class GenericPartition(CustomOp): 35 | """Class that corresponds to the meta/container node GenericPartition 36 | which is a placeholder for a group of nodes that have been separated 37 | out into an ONNX model of its own.""" 38 | 39 | def get_nodeattr_types(self): 40 | return { 41 | "model": ("s", True, ""), 42 | "return_full_exec_context": ("i", False, 0), 43 | } 44 | 45 | def make_shape_compatible_op(self, model): 46 | pass 47 | 48 | def infer_node_datatype(self, model): 49 | pass 50 | 51 | def execute_node(self, context, graph): 52 | model = ModelWrapper(self.get_nodeattr("model")) 53 | return_full_exec_context = self.get_nodeattr("return_full_exec_context") == 1 54 | node = self.onnx_node 55 | inp_ctx = dict(filter(lambda x: x[0] in node.input, context.items())) 56 | # inputs may have been renamed in partition 57 | for i, old_iname in enumerate(node.input): 58 | new_iname = model.graph.input[i].name 59 | if old_iname != new_iname: 60 | inp_ctx[new_iname] = inp_ctx[old_iname] 61 | del inp_ctx[old_iname] 62 | ret = execute_onnx(model, inp_ctx, return_full_exec_context) 63 | # outputs may have been renamed in partition 64 | for i, node_oname in enumerate(node.output): 65 | model_oname = model.graph.output[i].name 66 | context[node_oname] = ret[model_oname] 67 | # prefix and insert exec context entries 68 | if return_full_exec_context: 69 | for tname in ret.keys(): 70 | if tname not in [x.name for x in model.graph.output]: 71 | context[node.name + "_" + tname] = ret[tname] 72 | 73 | def verify_node(self): 74 | info_messages = [] 75 | 76 | # verify number of attributes 77 | num_of_attr = 1 78 | if len(self.onnx_node.attribute) == num_of_attr: 79 | info_messages.append("The number of attributes is correct") 80 | else: 81 | info_messages.append( 82 | """The number of attributes is incorrect, 83 | {} should have {} attributes""".format( 84 | self.onnx_node.op_type, num_of_attr 85 | ) 86 | ) 87 | # verify that all necessary attributes exist 88 | try: 89 | self.get_nodeattr("model") 90 | info_messages.append("All necessary attributes exist") 91 | except Exception: 92 | info_messages.append( 93 | """The necessary attributes do not exist. 94 | GenericPartition needs the following attribute(s): 95 | model""" 96 | ) 97 | 98 | return info_messages 99 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/quant.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.custom_op.general.intquant import IntQuant as Quant 30 | from qonnx.custom_op.general.intquant import int_quant as quant 31 | from qonnx.custom_op.general.intquant import max_int, min_int, resolve_rounding_mode 32 | 33 | Quant = Quant 34 | quant = quant 35 | max_int = max_int 36 | min_int = min_int 37 | resolve_rounding_mode = resolve_rounding_mode 38 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/general/trunc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.helper as helper 31 | 32 | from qonnx.core.datatype import DataType 33 | from qonnx.custom_op.base import CustomOp 34 | from qonnx.custom_op.general.quant import resolve_rounding_mode 35 | 36 | 37 | def trunc(inp_tensor, scale, zeropt, input_bit_width, output_bit_width, rounding_mode): 38 | # Port of TruncIntQuant class from Brevitas: https://bit.ly/3wzIpTR 39 | 40 | # Scaling 41 | y = inp_tensor / scale 42 | y = y + zeropt 43 | # Rounding 44 | y = np.round(y) 45 | # Truncate 46 | trunc_bit_width = input_bit_width - output_bit_width 47 | trunc_scale = 2.0**trunc_bit_width 48 | y = y / trunc_scale 49 | 50 | # To int 51 | rounding_fx = resolve_rounding_mode(rounding_mode) 52 | y = rounding_fx(y) 53 | 54 | # Rescale 55 | y = y - zeropt 56 | y = y * scale 57 | 58 | return y 59 | 60 | 61 | class Trunc(CustomOp): 62 | """Generic truncation operation for QONNX. Takes four inputs: 63 | - input tensor to truncate 64 | - the scale 65 | - the zero-point 66 | - the truncation bit-width 67 | 68 | The output is a tensor of the same shape as the input tensor, with truncated 69 | values. 70 | """ 71 | 72 | def get_nodeattr_types(self): 73 | return { 74 | # The rounding mode, which is used for the trunc function 75 | "rounding_mode": ("s", True, "FLOOR"), 76 | } 77 | 78 | def make_shape_compatible_op(self, model): 79 | node = self.onnx_node 80 | return helper.make_node("Identity", [node.input[0]], [node.output[0]]) 81 | 82 | def infer_node_datatype(self, model): 83 | node = self.onnx_node 84 | model.set_tensor_datatype(node.output[0], DataType["FLOAT32"]) 85 | 86 | def execute_node(self, context, graph): 87 | node = self.onnx_node 88 | # save inputs 89 | inp_tensor = context[node.input[0]] 90 | scale = context[node.input[1]] 91 | zeropt = context[node.input[2]] 92 | input_bit_width = context[node.input[3]] 93 | output_bit_width = context[node.input[4]] 94 | # save attributes 95 | rounding_mode = self.get_nodeattr("rounding_mode") 96 | # calculate output 97 | ret = trunc(inp_tensor, scale, zeropt, input_bit_width, output_bit_width, rounding_mode) 98 | # set context according to output name 99 | context[node.output[0]] = ret 100 | 101 | def verify_node(self): 102 | pass 103 | -------------------------------------------------------------------------------- /src/qonnx/custom_op/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import importlib 30 | 31 | from qonnx.util.basic import get_preferred_onnx_opset 32 | 33 | 34 | def getCustomOp(node, onnx_opset_version=get_preferred_onnx_opset(), brevitas_exception=True): 35 | "Return a QONNX CustomOp instance for the given ONNX node, if it exists." 36 | op_type = node.op_type 37 | domain = node.domain 38 | if brevitas_exception: 39 | # transparently resolve Brevitas domain ops to qonnx ones 40 | domain = domain.replace("onnx.brevitas", "qonnx.custom_op.general") 41 | try: 42 | opset_module = importlib.import_module(domain) 43 | assert type(opset_module.custom_op) is dict, "custom_op dict not found in Python module %s" % domain 44 | inst_wrapper = opset_module.custom_op[op_type] 45 | inst = inst_wrapper(node, onnx_opset_version=onnx_opset_version) 46 | return inst 47 | except ModuleNotFoundError: 48 | raise Exception("Could not load custom opset %s, check your PYTHONPATH" % domain) 49 | except KeyError: 50 | raise Exception("Op %s not found in custom opset %s" % (op_type, domain)) 51 | -------------------------------------------------------------------------------- /src/qonnx/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/float_model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/float_model.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/quant_model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/quant_model.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/bsd300x3-espcn/subpixel/float_model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/bsd300x3-espcn/subpixel/float_model.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/bsd300x3-espcn/subpixel/quant_model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/bsd300x3-espcn/subpixel/quant_model.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/bsd300x3-espcn/test_data/input_0.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/bsd300x3-espcn/test_data/input_0.pb -------------------------------------------------------------------------------- /src/qonnx/data/onnx/eltwise_chanlast_testcase.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/eltwise_chanlast_testcase.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/floatquant_exec/README.md: -------------------------------------------------------------------------------- 1 | Sample model for testing FloatQuant execution with exported graph. Generated with Brevitas (Commit: 904bbeaafaae5adb5c965af8d6b95120b7d1589a), using the code below. 2 | 3 | ```python 4 | # Create the Brevitas model 5 | brevitas_model = qnn.QuantLinear( 6 | 3, 16, weight_quant=Fp8e4m3OCPWeightPerTensorFloat, input_quant=Fp8e4m3OCPActPerTensorFloat 7 | ) 8 | # important to put into eval mode before export 9 | brevitas_model.eval() 10 | # Export the Brevitas model to QONNX format 11 | export_path = "qonnx_act_weight_fp8.onnx" 12 | input_shape = (1, 3) # Example input shape, adjust as needed 13 | dummy_input = torch.randn(input_shape) 14 | export_qonnx(brevitas_model, dummy_input, export_path) 15 | 16 | input_values = np.random.rand(*input_shape).astype(np.float32) 17 | np.save("input.npy", input_values) 18 | 19 | activation = {} 20 | 21 | def get_activation(name): 22 | def hook(model, input, output): 23 | activation[name] = output.detach().value.numpy() 24 | 25 | return hook 26 | 27 | brevitas_model.input_quant.register_forward_hook(get_activation("input_quant")) 28 | brevitas_model.weight_quant.register_forward_hook(get_activation("weight_quant")) 29 | 30 | # Get the output from the Brevitas model 31 | brevitas_output = brevitas_model(torch.tensor(input_values)).detach().numpy() 32 | np.save("output.npy", brevitas_output) 33 | np.savez("activation.npz", **activation) 34 | ``` 35 | -------------------------------------------------------------------------------- /src/qonnx/data/onnx/floatquant_exec/qonnx_act_weight_fp8.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/floatquant_exec/qonnx_act_weight_fp8.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/floatquant_exec/test_data/activation.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/floatquant_exec/test_data/activation.npz -------------------------------------------------------------------------------- /src/qonnx/data/onnx/floatquant_exec/test_data/input.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/floatquant_exec/test_data/input.npy -------------------------------------------------------------------------------- /src/qonnx/data/onnx/floatquant_exec/test_data/output.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/floatquant_exec/test_data/output.npy -------------------------------------------------------------------------------- /src/qonnx/data/onnx/matmul_update/sdp.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/matmul_update/sdp.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/mnist-conv/README.md: -------------------------------------------------------------------------------- 1 | 2 | Source: https://github.com/onnx/models/tree/master/vision/classification/mnist#model 3 | -------------------------------------------------------------------------------- /src/qonnx/data/onnx/mnist-conv/model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/mnist-conv/model.onnx -------------------------------------------------------------------------------- /src/qonnx/data/onnx/mnist-conv/test_data_set_0/input_0.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/mnist-conv/test_data_set_0/input_0.pb -------------------------------------------------------------------------------- /src/qonnx/data/onnx/mnist-conv/test_data_set_0/output_0.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/mnist-conv/test_data_set_0/output_0.pb -------------------------------------------------------------------------------- /src/qonnx/data/onnx/residual_block_clean.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/data/onnx/residual_block_clean.onnx -------------------------------------------------------------------------------- /src/qonnx/transformation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/transformation/__init__.py -------------------------------------------------------------------------------- /src/qonnx/transformation/change_batchsize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.core.modelwrapper import ModelWrapper 30 | from qonnx.transformation.base import Transformation 31 | 32 | 33 | class ChangeBatchSize(Transformation): 34 | """Change the batch size dimension to the given value for the entire graph 35 | by changing it for the global input/output and removing all intermediate 36 | shapes (will need a call to shape inference to restore shapes). 37 | Will attempt to handle any Reshape nodes with constant shape parameters by 38 | changing the batch size dimension value in the parameter.""" 39 | 40 | def __init__(self, bsize): 41 | super().__init__() 42 | self.bsize = int(bsize) 43 | 44 | def apply(self, model: ModelWrapper): 45 | onnx_model = model.model 46 | bsize = self.bsize 47 | onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_value = bsize 48 | onnx_model.graph.output[0].type.tensor_type.shape.dim[0].dim_value = bsize 49 | while len(onnx_model.graph.value_info) > 0: 50 | onnx_model.graph.value_info.remove(onnx_model.graph.value_info[0]) 51 | reshape_nodes = model.get_nodes_by_op_type("Reshape") 52 | for reshape_node in reshape_nodes: 53 | rs_param_name = reshape_node.input[1] 54 | rs_param = model.get_initializer(rs_param_name) 55 | if rs_param is not None: 56 | rs_param = rs_param.copy() 57 | rs_param[0] = bsize 58 | model.set_initializer(rs_param_name, rs_param) 59 | return (model, False) 60 | -------------------------------------------------------------------------------- /src/qonnx/transformation/double_to_single_float.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | 31 | from qonnx.transformation.base import Transformation 32 | 33 | 34 | class DoubleToSingleFloat(Transformation): 35 | """Convert any float64 initializers to float32.""" 36 | 37 | def apply(self, model): 38 | graph_modified = False 39 | init_names = [x.name for x in model.graph.initializer] 40 | for nm in init_names: 41 | init = model.get_initializer(nm) 42 | if init.dtype == np.float64: 43 | init_f32 = init.astype(np.float32) 44 | model.set_initializer(nm, init_f32) 45 | graph_modified = True 46 | return (model, graph_modified) 47 | -------------------------------------------------------------------------------- /src/qonnx/transformation/expose_intermediate.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.core.modelwrapper import ModelWrapper 30 | from qonnx.transformation.base import Transformation 31 | 32 | 33 | class ExposeIntermediateTensorsLambda(Transformation): 34 | def __init__(self, tensor_filter=lambda tname, model: True): 35 | super().__init__() 36 | self.tensor_filter = tensor_filter 37 | 38 | def apply(self, model: ModelWrapper): 39 | all_tensor_names = model.get_all_tensor_names() 40 | for tname in all_tensor_names: 41 | if self.tensor_filter(tname, model): 42 | # check whether this tensor is already in the outputs 43 | if tname in [x.name for x in model.graph.output]: 44 | # already part of outputs, skip 45 | continue 46 | else: 47 | # append ValueInfo to outputs 48 | tensor_vi = model.get_tensor_valueinfo(tname) 49 | model.graph.output.append(tensor_vi) 50 | # remove existing ValueInfo to avoid duplicate 51 | model.graph.value_info.remove(tensor_vi) 52 | 53 | return (model, False) 54 | 55 | 56 | class ExposeIntermediateTensorsPatternList(ExposeIntermediateTensorsLambda): 57 | def pattern_filter(self, tname, model): 58 | if self.dynamic_only: 59 | return any([(pat in tname) and (model.get_initializer(tname) is None) for pat in self.pattern_list]) 60 | else: 61 | return any([(pat in tname) for pat in self.pattern_list]) 62 | 63 | def __init__(self, pattern_list, dynamic_only=True): 64 | self.pattern_list = pattern_list 65 | self.dynamic_only = dynamic_only 66 | super().__init__(tensor_filter=self.pattern_filter) 67 | -------------------------------------------------------------------------------- /src/qonnx/transformation/extend_partition.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from qonnx.core.modelwrapper import ModelWrapper 30 | from qonnx.transformation.base import Transformation 31 | from qonnx.transformation.general import SortGraph 32 | from qonnx.util.basic import get_by_name 33 | 34 | 35 | class ExtendPartition(Transformation): 36 | """Extends GenericPartition type nodes by inserting the graph pointed to by 37 | the model attribute. 38 | Argument 0: extend_index 39 | * List that contains the node indices of the GenericPartition nodes 40 | """ 41 | 42 | def __init__(self, extend_index): 43 | super().__init__() 44 | self.extend_index = extend_index 45 | 46 | def apply(self, model): 47 | graph = model.graph 48 | graph_modified = False 49 | 50 | partition_nodes_dict = {ind: n for ind, n in enumerate(graph.node) if n.op_type == "GenericPartition"} 51 | 52 | for k, v in partition_nodes_dict.items(): 53 | if k in self.extend_index: 54 | path_to_model = get_by_name(v.attribute, "model", "name").s.decode("utf-8") 55 | model_partition = ModelWrapper(path_to_model) 56 | 57 | # Append nodes 58 | for partition_node in model_partition.graph.node: 59 | graph.node.append(partition_node) 60 | 61 | # Append value infos 62 | partition_valueinfos = [x.name for x in model_partition.graph.value_info] 63 | for vi_name in partition_valueinfos: 64 | vi = model_partition.get_tensor_valueinfo(vi_name) 65 | graph.value_info.append(vi) 66 | 67 | # Append initializers 68 | partition_initializers = [x for x in model_partition.graph.initializer] 69 | for i in partition_initializers: 70 | graph.initializer.append(i) 71 | 72 | # Append tensor annotations, except for the input/output tensors 73 | # of the partitioned graph, as these will be present in the 74 | # 'upper' model. 75 | in_out_names = [x.name for x in model_partition.graph.input] 76 | in_out_names += [x.name for x in model_partition.graph.output] 77 | partition_annotations = [ 78 | x for x in model_partition.graph.quantization_annotation if x.tensor_name not in in_out_names 79 | ] 80 | for a in partition_annotations: 81 | graph.quantization_annotation.append(a) 82 | 83 | graph.node.remove(v) 84 | graph_modified = True 85 | 86 | if graph_modified: 87 | model = model.transform(SortGraph()) 88 | 89 | return (model, graph_modified) 90 | -------------------------------------------------------------------------------- /src/qonnx/transformation/extract_conv_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import warnings 30 | from onnx import helper 31 | 32 | from qonnx.transformation.base import Transformation 33 | 34 | 35 | class ExtractBiasFromConv(Transformation): 36 | """ 37 | Extracts the (optional) Bias from a Conv(Transpose) node and inserts it behind the 38 | Conv(Transpose) node as an Add node. 39 | """ 40 | 41 | def apply(self, model): 42 | graph = model.graph 43 | node_ind = 0 44 | for n in graph.node: 45 | node_ind += 1 46 | if n.op_type in ["Conv", "ConvTranspose"]: 47 | # Check if the node has a bias input 48 | if len(n.input) > 2: 49 | # Extract bias 50 | bias = model.get_initializer(n.input[2]) 51 | if bias is None: 52 | warnings.warn(f"Could not extract bias from node {n}") 53 | continue 54 | 55 | # Insert bias as Add node behind the Conv node 56 | out_shape = model.get_tensor_shape(n.output[0]) 57 | # Reshape bias tensor 58 | add_shape = [1] * len(out_shape) 59 | # ToDo: this must change to "add_shape[-1] = bias.shape[0]" when 60 | # the channels last layout comes around. 61 | bias_shape = model.get_tensor_shape(n.input[2]) 62 | add_shape[1] = bias_shape[0] 63 | if bias is not None: 64 | model.set_initializer(n.input[2], bias.reshape(add_shape)) 65 | 66 | act_add_tensor = helper.make_tensor_value_info( 67 | model.make_new_valueinfo_name(), 68 | model.get_tensor_valueinfo(n.output[0]).type.tensor_type.elem_type, 69 | out_shape, 70 | ) 71 | graph.value_info.append(act_add_tensor) 72 | 73 | add_node = helper.make_node( 74 | "Add", 75 | [act_add_tensor.name, n.input[2]], 76 | [n.output[0]], 77 | ) 78 | graph.node.insert(node_ind, add_node) 79 | 80 | # Repoint Conv output and remove bias tensor 81 | n.output[0] = act_add_tensor.name 82 | n.input.remove(n.input[2]) 83 | 84 | return model, True 85 | 86 | return model, False 87 | -------------------------------------------------------------------------------- /src/qonnx/transformation/infer_shapes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import onnx.shape_inference as si 30 | 31 | import qonnx.custom_op.registry as registry 32 | from qonnx.core.modelwrapper import ModelWrapper 33 | from qonnx.transformation.base import Transformation 34 | from qonnx.util.basic import is_finn_op 35 | 36 | 37 | def _make_shape_compatible_op(node, model): 38 | """Return a shape-compatible non-QONNX op for a given QONNX op. Used for 39 | shape inference with custom ops.""" 40 | assert is_finn_op(node.domain), "Node domain is not set to qonnx.*" 41 | op_type = node.op_type 42 | try: 43 | # lookup op_type in registry of CustomOps 44 | inst = registry.getCustomOp(node) 45 | return inst.make_shape_compatible_op(model) 46 | except KeyError: 47 | # exception if op_type is not supported 48 | raise Exception("Custom op_type %s is currently not supported." % op_type) 49 | 50 | 51 | def _hide_finn_ops(model): 52 | """Replace any QONNX ops by shape-compatible ones, and return a dict that 53 | can be used to map the string representations of the new (shape-compatible) 54 | ops back to the old ops.""" 55 | hidden_ops = {} 56 | node_ind = 0 57 | for node in model.graph.node: 58 | node_ind += 1 59 | if is_finn_op(node.domain): 60 | new_node = _make_shape_compatible_op(node, model) 61 | # keep old node name to help debug shape inference issues 62 | new_node.name = node.name 63 | hidden_ops[str(new_node)] = node 64 | model.graph.node.insert(node_ind, new_node) 65 | model.graph.node.remove(node) 66 | return hidden_ops 67 | 68 | 69 | def _restore_finn_ops(model, hidden_ops): 70 | """Replace any shape-compatible ops with the QONNX ops that originally 71 | generated them.""" 72 | node_ind = 0 73 | for node in model.graph.node: 74 | node_ind += 1 75 | try: 76 | old_node = hidden_ops[str(node)] 77 | model.graph.node.insert(node_ind, old_node) 78 | model.graph.node.remove(node) 79 | except KeyError: 80 | pass 81 | 82 | 83 | class InferShapes(Transformation): 84 | """Ensure every tensor in the model has a specified shape (ValueInfo).""" 85 | 86 | def apply(self, model): 87 | # hide your riches! 88 | hidden_ops = _hide_finn_ops(model) 89 | # call regular ONNX shape inference 90 | model = ModelWrapper(si.infer_shapes(model.model)) 91 | # bring back hidden ops 92 | _restore_finn_ops(model, hidden_ops) 93 | return (model, False) 94 | -------------------------------------------------------------------------------- /src/qonnx/transformation/insert_topk.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | from onnx import TensorProto 31 | from onnx import helper as oh 32 | 33 | from qonnx.core.datatype import DataType 34 | from qonnx.transformation.base import Transformation 35 | 36 | 37 | class InsertTopK(Transformation): 38 | """Add TopK node at the network output and replace the graph output with 39 | the TopK indices.""" 40 | 41 | def __init__(self, k=5, axis=-1, largest=1, sorted=1): 42 | super().__init__() 43 | self.k = k 44 | self.axis = axis 45 | self.largest = largest 46 | self.sorted = sorted 47 | 48 | def apply(self, model): 49 | # get name of output tensor 50 | graph_out_name = model.graph.output[0].name 51 | # find final node 52 | final_node = model.find_producer(graph_out_name) 53 | # if a top-select op is already present, do nothing 54 | if final_node.op_type == "TopK": 55 | return (model, False) 56 | else: 57 | out_shape = model.get_tensor_shape(graph_out_name) 58 | out_dtype = model.get_tensor_datatype(graph_out_name) 59 | # adjust shape 60 | out_shape[self.axis] = self.k 61 | # make new buffer 62 | k_tensor = np.array([self.k]).astype(np.int64) 63 | k_value = oh.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.INT64, [1]) 64 | topk_values = oh.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape) 65 | topk_indices = oh.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.INT64, out_shape) 66 | model.graph.value_info.append(k_value) 67 | model.set_tensor_datatype(k_value.name, out_dtype) # TODO set to int64 68 | model.graph.value_info.append(topk_values) 69 | model.set_tensor_datatype(topk_values.name, out_dtype) 70 | # create and append topk node 71 | model.set_initializer(k_value.name, k_tensor) 72 | topk_node = oh.make_node( 73 | "TopK", 74 | inputs=[graph_out_name, k_value.name], 75 | outputs=[topk_values.name, topk_indices.name], 76 | axis=self.axis, 77 | largest=self.largest, 78 | sorted=self.sorted, 79 | ) 80 | model.graph.node.append(topk_node) 81 | # replace the existing output definition with topk indices 82 | model.graph.output.insert(0, topk_indices) 83 | model.graph.output.pop(1) 84 | # set quantization annotation for indices 85 | # minimal output dtype for TopK indices dependens on num. classes 86 | # assuming UINT32 is large enough for now 87 | model.set_tensor_datatype(topk_indices.name, DataType["UINT32"]) 88 | return (model, True) 89 | -------------------------------------------------------------------------------- /src/qonnx/transformation/make_input_chanlast.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from onnx import helper as oh 30 | 31 | import qonnx.core.data_layout as data_layout 32 | from qonnx.transformation.base import Transformation 33 | 34 | 35 | class MakeInputChannelsLast(Transformation): 36 | """For networks with an input using the NCx data layout, add a transpose node 37 | at the beginning and mark the input as using NxC (channels-last).""" 38 | 39 | def __init__(self): 40 | super().__init__() 41 | 42 | def apply(self, model): 43 | graph_in_name = model.graph.input[0].name 44 | graph_new_in_name = graph_in_name + "_transposed" 45 | orig_ishape = model.get_tensor_shape(graph_in_name) 46 | ndim = len(orig_ishape) 47 | if ndim == 2: 48 | # assume NC layout, no action needed 49 | return (model, False) 50 | elif ndim > 2: 51 | orig_layout = model.get_tensor_layout(graph_in_name) 52 | if orig_layout == data_layout.get_channels_last_layout_for_ndims(ndim): 53 | # already marked as channels-last, no action needed 54 | return (model, False) 55 | else: 56 | # determine channels-last shape and required permutation to 57 | # go from channels-last to previous format 58 | new_perm = list(range(ndim)) 59 | new_perm.remove(ndim - 1) 60 | new_perm.insert(1, ndim - 1) 61 | new_ishape = list(orig_ishape) 62 | new_ishape.remove(orig_ishape[1]) 63 | new_ishape.append(orig_ishape[1]) 64 | # create and insert transpose node 65 | t_trans_node = oh.make_node("Transpose", [graph_in_name], [graph_new_in_name], perm=new_perm) 66 | model.graph.node.insert(0, t_trans_node) 67 | # rewire all consumers of original input to transpose's output 68 | consumers = model.find_consumers(graph_in_name) 69 | for cons in consumers: 70 | if cons == t_trans_node: 71 | continue 72 | for i, ci in enumerate(cons.input): 73 | if ci == graph_in_name: 74 | cons.input[i] = graph_new_in_name 75 | # set tensor shapes and layouts 76 | model.set_tensor_shape(graph_in_name, new_ishape) 77 | model.set_tensor_shape(graph_new_in_name, orig_ishape) 78 | model.set_tensor_layout(graph_in_name, data_layout.get_channels_last_layout_for_ndims(ndim)) 79 | model.set_tensor_layout( 80 | graph_new_in_name, 81 | data_layout.get_channels_first_layout_for_ndims(ndim), 82 | ) 83 | # single iteration is enough so return model_was_changed=False 84 | return (model, False) 85 | -------------------------------------------------------------------------------- /src/qonnx/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fastmachinelearning/qonnx/7bb3a27d8b6fbae8649130cb18095fb2933b54b4/src/qonnx/util/__init__.py -------------------------------------------------------------------------------- /src/qonnx/util/config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import json 30 | 31 | from qonnx.custom_op.registry import getCustomOp 32 | 33 | 34 | def extract_model_config_to_json(model, json_filename, attr_names_to_extract): 35 | """Create a json file with layer name -> attribute mappings extracted from the 36 | model. The created json file can be later applied on a model with 37 | qonnx.transform.general.ApplyConfig.""" 38 | 39 | cfg = dict() 40 | cfg["Defaults"] = dict() 41 | for n in model.graph.node: 42 | oi = getCustomOp(n) 43 | layer_dict = dict() 44 | for attr in attr_names_to_extract: 45 | try: 46 | layer_dict[attr] = oi.get_nodeattr(attr) 47 | except AttributeError: 48 | pass 49 | if len(layer_dict) > 0: 50 | cfg[n.name] = layer_dict 51 | with open(json_filename, "w") as f: 52 | json.dump(cfg, f, indent=2) 53 | -------------------------------------------------------------------------------- /src/qonnx/util/convert.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import clize 30 | 31 | from qonnx.core.modelwrapper import ModelWrapper 32 | from qonnx.transformation.qcdq_to_qonnx import QCDQToQuant 33 | from qonnx.transformation.qonnx_to_qcdq import QuantToQCDQ 34 | 35 | CONVERT_MODE_QCDQ = "qcdq" 36 | CONVERT_MODE_QUANT = "quant" 37 | 38 | convert_modes = {CONVERT_MODE_QCDQ, CONVERT_MODE_QUANT} 39 | 40 | convert_mode_options = clize.parameters.mapped( 41 | [ 42 | (CONVERT_MODE_QCDQ, [CONVERT_MODE_QCDQ], "Convert from Quant to QCDQ"), 43 | (CONVERT_MODE_QUANT, [CONVERT_MODE_QUANT], "Convert from QCDQ to Quant"), 44 | ] 45 | ) 46 | 47 | 48 | def convert(input_model_file, *, output_style: convert_mode_options, output_file: str = None): 49 | """Convert an ONNX file from one style of quantization to another, where possible. 50 | Please see the documentation on the QuantToQCDQ and QCDQToQuant 51 | transformations to learn more about the particular limitations. 52 | 53 | :param input_model_file: Filename for the input ONNX model. 54 | :param output_style: Quantization style for the output. 55 | :param output_file: If specified, write the output ONNX model to this filename. 56 | Otherwise, will default to the input file with an _output_style suffix. 57 | """ 58 | model = ModelWrapper(input_model_file) 59 | if output_style == CONVERT_MODE_QCDQ: 60 | model = model.transform(QuantToQCDQ()) 61 | elif output_style == CONVERT_MODE_QUANT: 62 | model = model.transform(QCDQToQuant()) 63 | else: 64 | print("Unknown output_style for conversion: %s" % output_style) 65 | exit(-1) 66 | if output_file is None: 67 | output_file = input_model_file.replace(".onnx", "_%s.onnx" % output_style) 68 | model.save(output_file) 69 | 70 | 71 | def main(): 72 | clize.run(convert) 73 | 74 | 75 | if __name__ == "__main__": 76 | main() 77 | -------------------------------------------------------------------------------- /src/qonnx/util/onnx.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Advanced Micro Devices, Inc. 2 | # Copyright (c) 2020-21 Xilinx, Inc. 3 | # All rights reserved. 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are met: 7 | # 8 | # * Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | # * Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | # * Neither the name of AMD nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | import numpy as np 31 | import onnx 32 | 33 | import qonnx.core.data_layout as DataLayout 34 | 35 | # optypes with the (elementwise) monotonic property 36 | monotonic_optypes = { 37 | "Identity", 38 | "Relu", 39 | "LeakyRelu", 40 | "Clip", 41 | "Selu", 42 | "Celu", 43 | "Elu", 44 | "Sigmoid", 45 | "HardSigmoid", 46 | "Tanh", 47 | "Softplus", 48 | "Exp", 49 | "Log", 50 | "Sqrt", 51 | "Erf", 52 | "Floor", 53 | "Ceil", 54 | "Round", 55 | "Sign", 56 | } 57 | 58 | # optypes that operate in an elementwise fashion 59 | # (with numpy-style broadcasting when shapes mismatch for binary ops) 60 | eltwise_optypes = monotonic_optypes | { 61 | "Quant", 62 | "Mul", 63 | "Div", 64 | "Sub", 65 | "Add", 66 | "Mod", 67 | "And", 68 | "Or", 69 | "Xor", 70 | "Equal", 71 | "Less", 72 | "LessOrEqual", 73 | "Greater", 74 | "GreaterOrEqual", 75 | "BitwiseAnd", 76 | "BitwiseOr", 77 | "BitwiseXor", 78 | "Maximum", 79 | "Minimum", 80 | "BitShift", 81 | "Pow", 82 | } 83 | 84 | 85 | def is_eltwise_monotonic_optype(optype): 86 | "Checks whether given ONNX optype is a monotonic elementwise op." 87 | return optype in monotonic_optypes 88 | 89 | 90 | def is_eltwise_optype(optype): 91 | "Checks whether given ONNX optype is an elementwise op." 92 | return optype in eltwise_optypes 93 | 94 | 95 | def valueinfo_to_tensor(vi): 96 | """Creates an all-zeroes numpy tensor from a ValueInfoProto.""" 97 | 98 | dims = [x.dim_value for x in vi.type.tensor_type.shape.dim] 99 | return np.zeros(dims, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type]) 100 | 101 | 102 | def nchw_to_nhwc(t, model, idx, reverse=False): 103 | """Converts between NCHW <-> NHWC layouts for tensor t by inserting a transpose. 104 | If reverse=False, t is assumed NCHW and we insert transpose to convert NCHW -> NHWC 105 | If reverse=True, t is assumed NHWC and we insert transpose to convert NHWC -> NCHW. 106 | """ 107 | graph = model.graph 108 | # create new NHWC tensor 109 | t_shape = model.get_tensor_shape(t) 110 | bs = t_shape[0] 111 | ch = t_shape[1] 112 | height = t_shape[2] 113 | width = t_shape[3] 114 | t_trans = onnx.helper.make_tensor_value_info( 115 | model.make_new_valueinfo_name(), 116 | onnx.TensorProto.FLOAT, 117 | (bs, height, width, ch), # NHWC 118 | ) 119 | graph.value_info.append(t_trans) 120 | dt = model.get_tensor_datatype(t) 121 | t_trans = t_trans.name 122 | model.set_tensor_datatype(t_trans, dt) 123 | model.set_tensor_layout(t_trans, DataLayout.NHWC) 124 | # NCHW <-> NHWC transpose 125 | if reverse: 126 | t_trans_node = onnx.helper.make_node("Transpose", [t_trans], [t], perm=[0, 3, 1, 2]) 127 | else: 128 | t_trans_node = onnx.helper.make_node("Transpose", [t], [t_trans], perm=[0, 2, 3, 1]) 129 | graph.node.insert(idx, t_trans_node) 130 | return t_trans 131 | -------------------------------------------------------------------------------- /src/qonnx/util/prune_channels.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import clize 30 | 31 | from qonnx.core.modelwrapper import ModelWrapper 32 | from qonnx.transformation.pruning import PruneChannels 33 | 34 | 35 | def prune_channels(input_filename, prunespec_filename, *, lossy=True, output_filename=""): 36 | """ 37 | Prune channels from specified tensors and their dependencies from a model. 38 | The model must have already been cleaned up by qonnx-cleanup, including the 39 | --extract-conv-bias=True --preserve-qnt-ops=False options. 40 | 41 | :param input_filename: Filename for the input ONNX model 42 | :param prunespec_filename: Filename for the pruning specification, formatted as a Python dict 43 | formatted as {tensor_name : {axis : {channels}}}. See test_pruning.py for examples. 44 | :param lossy: Whether to perform lossy pruning, see the PruneChannels transformation for description. 45 | :param output_filename: If specified, write the resulting pruned model to this filename. Otherwise, 46 | the input_filename will be used with a _pruned suffix. 47 | """ 48 | model = ModelWrapper(input_filename) 49 | with open(prunespec_filename) as f: 50 | prunespec_dict = dict(eval(f.read())) 51 | pruned_model = model.transform(PruneChannels(prunespec_dict, lossy)) 52 | if output_filename == "": 53 | output_filename = input_filename.replace(".onnx", "_pruned.onnx") 54 | pruned_model.save(output_filename) 55 | 56 | 57 | def main(): 58 | clize.run(prune_channels) 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /src/qonnx/util/random_reseed.py: -------------------------------------------------------------------------------- 1 | def reseed(newseed): 2 | import numpy 3 | import onnxruntime 4 | import tensorflow 5 | 6 | print(f"pytest-randomly: reseed with {newseed}") 7 | onnxruntime.set_seed(newseed) 8 | tensorflow.random.set_seed(newseed) 9 | numpy.random.seed(seed=newseed) 10 | -------------------------------------------------------------------------------- /src/qonnx/util/to_channels_last.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import clize 30 | 31 | from qonnx.core.modelwrapper import ModelWrapper 32 | from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean 33 | 34 | 35 | def to_channels_last(in_file, *, make_input_channels_last=False, out_file=None): 36 | """Execute a set of graph transformations to convert an ONNX file to the channels last data format. 37 | The input file have been previously cleaned by the cleanup transformation or commandline tool. 38 | 39 | :param in_file: Filename for the input ONNX model 40 | :param make_input_channels_last: Sets if the input of the model should also be converted to the channels 41 | last data layout (True) or if a transpose node should be left at the beginning of the graph (False). 42 | Defaults to False. 43 | :param out_file: If set, filename for the output ONNX model. Set to in_file with _chan_last 44 | suffix otherwise. 45 | """ 46 | 47 | # Execute transformation 48 | model = ModelWrapper(in_file) 49 | model = model.transform(ConvertToChannelsLastAndClean(make_input_channels_last=make_input_channels_last)) 50 | if out_file is None: 51 | out_file = in_file.replace(".onnx", "_channels_last.onnx") 52 | model.save(out_file) 53 | 54 | 55 | def main(): 56 | clize.run(to_channels_last) 57 | 58 | 59 | if __name__ == "__main__": 60 | main() 61 | -------------------------------------------------------------------------------- /tests/analysis/test_inference_cost_breakdown.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import os 32 | import urllib.request 33 | 34 | from qonnx.analysis.inference_cost import aggregate_dict_keys 35 | from qonnx.core.modelwrapper import ModelWrapper 36 | from qonnx.util.cleanup import cleanup 37 | from qonnx.util.inference_cost import inference_cost as infca 38 | 39 | download_url = "https://github.com/onnx/models/raw/main/validated/vision/" 40 | download_url += "classification/resnet/model/resnet18-v1-7.onnx?download=" 41 | 42 | model_details = { 43 | "resnet18-v1-7": { 44 | "description": "Resnet18 Opset version 7.", 45 | "url": download_url, 46 | "enc": { 47 | "a": "op_mac_FLOAT32_FLOAT32", 48 | "b": "total_mem_w_bits", 49 | "c": "total_mem_w_elems", 50 | "d": "total_mem_o_bits", 51 | "e": "total_mem_o_elems", 52 | }, 53 | }, 54 | } 55 | 56 | 57 | def download_model(test_model, do_cleanup=False, return_modelwrapper=False): 58 | qonnx_url = model_details[test_model]["url"] 59 | # download test data 60 | dl_dir = "/tmp" 61 | dl_file = dl_dir + f"/{test_model}.onnx" 62 | ret = dl_file 63 | if not os.path.isfile(dl_file): 64 | urllib.request.urlretrieve(qonnx_url, dl_file) 65 | if do_cleanup: 66 | out_file = dl_dir + f"/{test_model}_clean.onnx" 67 | cleanup(dl_file, out_file=out_file, override_inpsize=1) 68 | ret = out_file 69 | if return_modelwrapper: 70 | ret = ModelWrapper(ret) 71 | return ret 72 | 73 | 74 | @pytest.mark.parametrize("test_model", model_details.keys()) 75 | def test_inference_cost_breakdown(test_model): 76 | test_details = model_details[test_model] 77 | model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) 78 | inf_cost = infca(model, discount_sparsity=False, cost_breakdown=True) 79 | assert inf_cost["node_cost"]["Conv_0"]["total_macs"] == 118013952 80 | assert inf_cost["node_cost"]["Conv_1"]["total_macs"] == 115605504 81 | assert inf_cost["optype_cost"]["Conv"]["total_macs"] == 1813561344 82 | t_cost = inf_cost["total_cost"] # total cost 83 | op_cost = aggregate_dict_keys(inf_cost["optype_cost"]) # cost per optype 84 | n_cost = aggregate_dict_keys(inf_cost["node_cost"]) # cost per node. 85 | enc = test_details["enc"] 86 | assert t_cost[enc["a"]] == op_cost[enc["a"]] == n_cost[enc["a"]], "inf discrepancy" 87 | assert t_cost[enc["b"]] == op_cost[enc["b"]] == n_cost[enc["b"]], "inf discrepancy" 88 | assert t_cost[enc["c"]] == op_cost[enc["c"]] == n_cost[enc["c"]], "inf discrepancy" 89 | assert t_cost[enc["d"]] == op_cost[enc["d"]] == n_cost[enc["d"]], "inf discrepancy" 90 | assert t_cost[enc["e"]] == op_cost[enc["e"]] == n_cost[enc["e"]], "inf discrepancy" 91 | -------------------------------------------------------------------------------- /tests/analysis/test_is_linear.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import onnx.helper as oh 30 | from onnx import TensorProto 31 | 32 | import qonnx.analysis.topology as ta 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.transformation.infer_shapes import InferShapes 35 | from qonnx.util.basic import qonnx_make_model 36 | 37 | 38 | def test_is_linear_linear(): 39 | top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) 40 | add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) 41 | mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2]) 42 | top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) 43 | modelproto = qonnx_make_model( 44 | oh.make_graph( 45 | name="test", 46 | inputs=[top_in], 47 | outputs=[top_out], 48 | value_info=[add_param, mul_param], 49 | nodes=[ 50 | oh.make_node("Add", ["top_in", "add_param"], ["middle"]), 51 | oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]), 52 | ], 53 | ) 54 | ) 55 | model = ModelWrapper(modelproto) 56 | model = model.transform(InferShapes()) 57 | ret = model.analysis(ta.is_linear) 58 | assert ret["is_linear"] is True 59 | 60 | 61 | def test_is_linear_forked_node_output(): 62 | top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) 63 | add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) 64 | mul0_param = oh.make_tensor_value_info("mul0_param", TensorProto.FLOAT, [2]) 65 | mul1_param = oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT, [2]) 66 | mul0_res = oh.make_tensor_value_info("mul0_res", TensorProto.FLOAT, [2]) 67 | mul1_res = oh.make_tensor_value_info("mul1_res", TensorProto.FLOAT, [2]) 68 | top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) 69 | modelproto = qonnx_make_model( 70 | oh.make_graph( 71 | name="test", 72 | inputs=[top_in], 73 | outputs=[top_out], 74 | value_info=[add_param, mul0_param, mul1_param, mul0_res, mul1_res], 75 | nodes=[ 76 | oh.make_node("Add", ["top_in", "add_param"], ["middle"]), 77 | oh.make_node("Mul", ["middle", "mul0_param"], ["mul0_res"]), 78 | oh.make_node("Mul", ["middle", "mul1_param"], ["mul1_res"]), 79 | oh.make_node("Add", ["mul0_res", "mul1_res"], ["top_out"]), 80 | ], 81 | ) 82 | ) 83 | model = ModelWrapper(modelproto) 84 | model = model.transform(InferShapes()) 85 | ret = model.analysis(ta.is_linear) 86 | assert ret["is_linear"] is False 87 | -------------------------------------------------------------------------------- /tests/analysis/test_matmul_mac_cost.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | 30 | from pkgutil import get_data 31 | 32 | import qonnx.util.inference_cost as infc 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.util.cleanup import cleanup_model 35 | 36 | 37 | def test_matmul_mac_cost(): 38 | raw_model = get_data("qonnx", "data/onnx/matmul_update/sdp.onnx") 39 | model = ModelWrapper(raw_model) 40 | cleaned_model = cleanup_model(model) 41 | # Two Matmul layers with shape (i_shape, w_shape, o_shape), 42 | # L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) 43 | inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False)["total_cost"] 44 | mac_cost = inf_cost_dict["op_mac_FLOAT32_FLOAT32"] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 45 | assert mac_cost == 1048576.0, "Error: discrepancy in mac cost." 46 | -------------------------------------------------------------------------------- /tests/analysis/test_range_analysis.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import numpy as np 32 | 33 | from qonnx.util.range_analysis import range_analysis 34 | from qonnx.util.test import download_model, test_model_details 35 | 36 | model_details_stuckchans = { 37 | "MobileNetv1-w4a4": { 38 | "stuck_chans": { 39 | "Quant_29_out0": [ 40 | (0, 0.4813263), 41 | (4, 0.0), 42 | (6, 0.0), 43 | (10, 0.0), 44 | (13, 0.0), 45 | (15, 0.0), 46 | (16, 0.0), 47 | (19, 0.0), 48 | (26, 0.0), 49 | (28, 0.0), 50 | ], 51 | "Quant_30_out0": [ 52 | (0, 0.0), 53 | (4, 0.0), 54 | (6, 0.0), 55 | (10, 0.0), 56 | (13, 0.15743902), 57 | (15, 0.0), 58 | (16, 0.47231707), 59 | (19, 0.0), 60 | (26, 0.0), 61 | (28, 0.0), 62 | ], 63 | "Quant_31_out0": [(42, 0.0)], 64 | "Quant_32_out0": [(42, 0.0)], 65 | "Quant_35_out0": [(102, 0.0)], 66 | "Quant_36_out0": [(102, 0.0)], 67 | } 68 | }, 69 | "FINN-CNV_W2A2": { 70 | "stuck_chans": { 71 | "Quant_10_out0": [(5, -1.0), (10, 1.0), (26, 1.0), (30, -1.0), (34, -1.0), (54, -1.0)], 72 | "Quant_11_out0": [(30, 1.0), (35, 1.0), (37, -1.0), (42, 1.0), (45, -1.0), (57, -1.0)], 73 | "Quant_13_out0": [(40, -1.0)], 74 | "Quant_14_out0": [(4, 1.0), (175, 1.0), (209, -1.0)], 75 | "Quant_16_out0": [ 76 | (5, -1.0), 77 | (50, 1.0), 78 | (77, -1.0), 79 | (95, -1.0), 80 | (153, 1.0), 81 | (186, 1.0), 82 | (199, 1.0), 83 | (209, -1.0), 84 | (241, 1.0), 85 | (329, 1.0), 86 | (340, 1.0), 87 | (465, -1.0), 88 | (478, -1.0), 89 | (510, -1.0), 90 | ], 91 | "Quant_17_out0": [(101, -0.0), (230, -0.0), (443, 0.0)], 92 | } 93 | }, 94 | } 95 | 96 | # inherit basics for matching testcases from test util 97 | model_details = {k: v for (k, v) in test_model_details.items() if k in model_details_stuckchans.keys()} 98 | model_details = {**model_details, **model_details_stuckchans} 99 | 100 | 101 | @pytest.mark.parametrize("model_name", model_details.keys()) 102 | def test_range_analysis(model_name): 103 | model = download_model(model_name, return_modelwrapper=True) 104 | irange = test_model_details[model_name]["input_range"] 105 | ret = range_analysis(model, irange=irange, report_mode="stuck_channel", key_filter="Quant", do_cleanup=True) 106 | golden_stuck_channels = model_details[model_name]["stuck_chans"] 107 | for tname, ret_chans in ret.items(): 108 | tg_chans = golden_stuck_channels[tname] 109 | for i in range(len(tg_chans)): 110 | tg_ind, tg_val = tg_chans[i] 111 | ret_ind, ret_val = ret_chans[i] 112 | assert tg_ind == ret_ind 113 | assert np.isclose(tg_val, ret_val) 114 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | """ 30 | Dummy conftest.py for qonnx_frontend. 31 | 32 | If you don't know what this is for, just leave it empty. 33 | Read more about conftest.py under: 34 | - https://docs.pytest.org/en/stable/fixture.html 35 | - https://docs.pytest.org/en/stable/writing_plugins.html 36 | """ 37 | 38 | # import pytest 39 | -------------------------------------------------------------------------------- /tests/core/test_basic_onnx_exec.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx 31 | import onnx.numpy_helper as np_helper 32 | from pkgutil import get_data 33 | 34 | import qonnx.core.onnx_exec as oxe 35 | from qonnx.core.datatype import DataType 36 | from qonnx.core.modelwrapper import ModelWrapper 37 | from qonnx.transformation.infer_shapes import InferShapes 38 | from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model 39 | 40 | 41 | def test_mnist_onnx_download_extract_run(): 42 | # load the onnx model 43 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 44 | model = ModelWrapper(raw_m) 45 | model = model.transform(InferShapes()) 46 | # load one of the test vectors 47 | raw_i = get_data("qonnx.data", "onnx/mnist-conv/test_data_set_0/input_0.pb") 48 | raw_o = get_data("qonnx.data", "onnx/mnist-conv/test_data_set_0/output_0.pb") 49 | input_tensor = onnx.load_tensor_from_string(raw_i) 50 | output_tensor = onnx.load_tensor_from_string(raw_o) 51 | # run using QONNX-based execution (full graph) 52 | input_dict = {"Input3": np_helper.to_array(input_tensor)} 53 | output_dict = oxe.execute_onnx(model, input_dict, return_full_exec_context=True) 54 | assert np.isclose(np_helper.to_array(output_tensor), output_dict["Plus214_Output_0"], atol=1e-3).all() 55 | # test subgraph execution 56 | start_node = model.graph.node[1] 57 | end_node = model.graph.node[3] 58 | subgraph_i_dict = {start_node.input[0]: output_dict[start_node.input[0]]} 59 | subgraph_o_dict = oxe.execute_onnx( 60 | model, 61 | subgraph_i_dict, 62 | return_full_exec_context=True, 63 | start_node=start_node, 64 | end_node=end_node, 65 | ) 66 | assert np.isclose(subgraph_o_dict[end_node.output[0]], output_dict[end_node.output[0]], atol=1e-3).all() 67 | 68 | 69 | def test_onnx_exec_internal_rounding(): 70 | inp0 = onnx.helper.make_tensor_value_info("inp0", onnx.TensorProto.FLOAT, [2, 2]) 71 | inp1 = onnx.helper.make_tensor_value_info("inp1", onnx.TensorProto.FLOAT, [1]) 72 | outp = onnx.helper.make_tensor_value_info("outp", onnx.TensorProto.FLOAT, [2, 2]) 73 | mul_node = onnx.helper.make_node("Mul", inputs=["inp0", "inp1"], outputs=["outp"]) 74 | graph = onnx.helper.make_graph(nodes=[mul_node], name="mul_graph", inputs=[inp0, inp1], outputs=[outp]) 75 | 76 | model = qonnx_make_model(graph, producer_name="mul-model") 77 | model = ModelWrapper(model) 78 | idt = DataType["INT2"] 79 | model.set_tensor_datatype("inp0", idt) 80 | model.set_tensor_datatype("inp1", idt) 81 | model.transform(InferShapes()) 82 | 83 | mul_value = np.asarray([-1], dtype=np.float32) 84 | inp_int = gen_finn_dt_tensor(idt, [2, 2]) 85 | scale = np.random.uniform(low=0, high=1, size=(2, 2)).astype(np.float32) 86 | inp_rounded = (inp_int * scale) / (scale + 1e-7) 87 | input_dict = {"inp0": inp_rounded, "inp1": mul_value} 88 | output_dict = oxe.execute_onnx(model, input_dict) 89 | produced = output_dict["outp"] 90 | expected = np.multiply(inp_int, mul_value) 91 | assert (produced == expected).all() 92 | -------------------------------------------------------------------------------- /tests/custom_op/test_attr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.parser as oprs 31 | 32 | import qonnx.custom_op.general as general 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.custom_op.base import CustomOp 35 | from qonnx.custom_op.registry import getCustomOp 36 | 37 | 38 | class AttrTestOp(CustomOp): 39 | def get_nodeattr_types(self): 40 | my_attrs = {"tensor_attr": ("t", True, np.asarray([])), "strings_attr": ("strings", True, [""])} 41 | return my_attrs 42 | 43 | def make_shape_compatible_op(self, model): 44 | param_tensor = self.get_nodeattr("tensor_attr") 45 | return super().make_const_shape_op(param_tensor.shape) 46 | 47 | def infer_node_datatype(self, model): 48 | node = self.onnx_node 49 | # data type stays the same 50 | dtype = model.get_tensor_datatype(node.input[0]) 51 | model.set_tensor_datatype(node.output[0], dtype) 52 | 53 | def execute_node(self, context, graph): 54 | node = self.onnx_node 55 | param_tensor = self.get_nodeattr("tensor_attr") 56 | context[node.output[0]] = param_tensor 57 | 58 | def verify_node(self): 59 | pass 60 | 61 | 62 | def test_attr(): 63 | general.custom_op["AttrTestOp"] = AttrTestOp 64 | ishp = (1, 10) 65 | wshp = (1, 3) 66 | oshp = wshp 67 | ishp_str = str(list(ishp)) 68 | oshp_str = str(list(oshp)) 69 | wshp_str = str(list(wshp)) 70 | w = np.asarray([1, -2, 3], dtype=np.int8) 71 | strarr = np.array2string(w, separator=", ") 72 | w_str = strarr.replace("[", "{").replace("]", "}").replace(" ", "") 73 | tensor_attr_str = f"int8{wshp_str} {w_str}" 74 | strings_attr = ["a", "bc", "def"] 75 | 76 | input = f""" 77 | < 78 | ir_version: 7, 79 | opset_import: ["" : 9] 80 | > 81 | agraph (float{ishp_str} in0) => (int8{oshp_str} out0) 82 | {{ 83 | out0 = qonnx.custom_op.general.AttrTestOp< 84 | tensor_attr={tensor_attr_str} 85 | >(in0) 86 | }} 87 | """ 88 | model = oprs.parse_model(input) 89 | model = ModelWrapper(model) 90 | inst = getCustomOp(model.graph.node[0]) 91 | 92 | w_prod = inst.get_nodeattr("tensor_attr") 93 | assert (w_prod == w).all() 94 | w = w - 1 95 | inst.set_nodeattr("tensor_attr", w) 96 | w_prod = inst.get_nodeattr("tensor_attr") 97 | assert (w_prod == w).all() 98 | 99 | inst.set_nodeattr("strings_attr", strings_attr) 100 | strings_attr_prod = inst.get_nodeattr("strings_attr") 101 | assert strings_attr_prod == strings_attr 102 | strings_attr_prod[0] = "test" 103 | inst.set_nodeattr("strings_attr", strings_attr_prod) 104 | assert inst.get_nodeattr("strings_attr") == ["test"] + strings_attr[1:] 105 | -------------------------------------------------------------------------------- /tests/custom_op/test_runding_mode.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | 5 | from qonnx.custom_op.general.quant import resolve_rounding_mode 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "rmode,exp", 10 | [ 11 | ("ROUND", np.array([6, 2, 2, 1, 1, -1, -1, -2, -2, -6])), 12 | ("CEIL", np.array([6, 3, 2, 2, 1, -1, -1, -1, -2, -5])), 13 | ("FLOOR", np.array([5, 2, 1, 1, 1, -1, -2, -2, -3, -6])), 14 | ("UP", np.array([6, 3, 2, 2, 1, -1, -2, -2, -3, -6])), 15 | ("DOWN", np.array([5, 2, 1, 1, 1, -1, -1, -1, -2, -5])), 16 | ("HALF_UP", np.array([6, 3, 2, 1, 1, -1, -1, -2, -3, -6])), 17 | ("HALF_DOWN", np.array([5, 2, 2, 1, 1, -1, -1, -2, -2, -5])), 18 | ], 19 | ) 20 | def test_rounding_modes(rmode, exp): 21 | test_array = np.array([5.5, 2.5, 1.6, 1.1, 1.0, -1.0, -1.1, -1.6, -2.5, -5.5]) 22 | rounding_fn = resolve_rounding_mode(rmode) 23 | assert np.array_equal(rounding_fn(test_array), exp) 24 | -------------------------------------------------------------------------------- /tests/custom_op/test_xnorpopcountmatmul.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.helper as helper 31 | from onnx import TensorProto 32 | 33 | import qonnx.core.onnx_exec as oxe 34 | from qonnx.core.datatype import DataType 35 | from qonnx.core.modelwrapper import ModelWrapper 36 | from qonnx.transformation.infer_datatypes import InferDataTypes 37 | from qonnx.transformation.infer_shapes import InferShapes 38 | from qonnx.util.basic import qonnx_make_model 39 | 40 | export_onnx_path = "test_xnorpopcountmatmul.onnx" 41 | 42 | 43 | def test_xnorpopcountmatmul(): 44 | M = 1 45 | K = 3 46 | N = 3 47 | x = helper.make_tensor_value_info("x", TensorProto.FLOAT, [M, K]) 48 | W = helper.make_tensor_value_info("W", TensorProto.FLOAT, [K, N]) 49 | out = helper.make_tensor_value_info("out", TensorProto.FLOAT, ["x", "y"]) 50 | node_def = helper.make_node("XnorPopcountMatMul", ["x", "W"], ["out"], domain="qonnx.custom_op.general") 51 | modelproto = qonnx_make_model(helper.make_graph([node_def], "test_model", [x], [out], value_info=[W])) 52 | model = ModelWrapper(modelproto) 53 | model.set_tensor_datatype("x", DataType["BINARY"]) 54 | model.set_tensor_datatype("W", DataType["BINARY"]) 55 | W_data = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32) 56 | model.set_initializer("W", W_data) 57 | # test shape inference 58 | model = model.transform(InferShapes()) 59 | assert model.get_tensor_shape("out") == [M, N] 60 | # test datatype inference 61 | assert model.get_tensor_datatype("out") == DataType["FLOAT32"] 62 | model = model.transform(InferDataTypes()) 63 | assert model.get_tensor_datatype("out") == DataType["UINT32"] 64 | # test execution 65 | x_data = np.asarray([[1, 0, 0]], dtype=np.float32) 66 | inp_dict = {"x": x_data} 67 | out_dict = oxe.execute_onnx(model, inp_dict) 68 | Wb = 2 * W_data - 1 69 | xb = 2 * x_data - 1 70 | rb = np.matmul(xb, Wb) 71 | assert (2 * out_dict["out"] - K == rb).all() 72 | -------------------------------------------------------------------------------- /tests/test_dummy.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | 30 | def test_dummy(): 31 | assert True 32 | -------------------------------------------------------------------------------- /tests/transformation/test_change_batchsize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import numpy as np 32 | 33 | import qonnx.core.onnx_exec as oxe 34 | from qonnx.transformation.change_batchsize import ChangeBatchSize 35 | from qonnx.transformation.infer_shapes import InferShapes 36 | from qonnx.util.onnx import valueinfo_to_tensor 37 | from qonnx.util.test import download_model, test_model_details 38 | 39 | model_details = test_model_details 40 | 41 | 42 | @pytest.mark.parametrize("test_model", model_details.keys()) 43 | def test_change_batchsize(test_model): 44 | test_details = model_details[test_model] 45 | batch_size = 10 46 | old_ishape = test_details["input_shape"] 47 | imin, imax = test_details["input_range"] 48 | # some models spec per-channel ranges, be conservative for those 49 | if isinstance(imin, np.ndarray): 50 | imin = imin.max() 51 | if isinstance(imax, np.ndarray): 52 | imax = imax.min() 53 | model = download_model(test_model=test_model, do_cleanup=True, return_modelwrapper=True) 54 | iname = model.graph.input[0].name 55 | oname = model.graph.output[0].name 56 | example_inp = valueinfo_to_tensor(model.get_tensor_valueinfo(iname)) 57 | assert tuple(model.get_tensor_shape(iname)) == old_ishape 58 | model = model.transform(ChangeBatchSize(batch_size)) 59 | model = model.transform(InferShapes()) 60 | exp_ishape = (batch_size, *old_ishape[1:]) 61 | assert tuple(model.get_tensor_shape(iname)) == exp_ishape 62 | new_inp = np.random.uniform(imin, imax, exp_ishape).astype(example_inp.dtype) 63 | ret = oxe.execute_onnx(model, {iname: new_inp}) 64 | assert ret[oname].shape[0] == batch_size 65 | -------------------------------------------------------------------------------- /tests/transformation/test_channelslast_eltwise.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | from pkgutil import get_data 31 | 32 | import qonnx.core.onnx_exec as oxe 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean 35 | from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul 36 | from qonnx.util.basic import gen_finn_dt_tensor 37 | 38 | 39 | def test_lower_and_channelslast_eltwiseops(): 40 | raw_m = get_data("qonnx.data", "onnx/eltwise_chanlast_testcase.onnx") 41 | model = ModelWrapper(raw_m) 42 | iname = model.graph.input[0].name 43 | idt = model.get_tensor_datatype(iname) 44 | ishape = model.get_tensor_shape(iname) 45 | idict = {iname: gen_finn_dt_tensor(idt, ishape)} 46 | oname = model.graph.output[0].name 47 | expected_out = oxe.execute_onnx(model, idict)[oname] 48 | model = model.transform(LowerConvsToMatMul()) 49 | model = model.transform(ConvertToChannelsLastAndClean(make_input_channels_last=False)) 50 | expected_ops = ["Transpose", "Im2Col", "MatMul", "Mul", "Add", "Relu", "Mul", "Quant", "Transpose"] 51 | ops = [x.op_type for x in model.graph.node] 52 | assert ops == expected_ops, "Did not found expected op sequence after lowering and channels-last" 53 | out = oxe.execute_onnx(model, idict)[oname] 54 | assert np.isclose(expected_out, out, atol=1e-4).all() 55 | -------------------------------------------------------------------------------- /tests/transformation/test_channelslast_residual.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | from pkgutil import get_data 31 | 32 | import qonnx.core.onnx_exec as oxe 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean 35 | from qonnx.util.basic import gen_finn_dt_tensor 36 | 37 | 38 | def test_channelslast_residual(): 39 | raw_m = get_data("qonnx.data", "onnx/residual_block_clean.onnx") 40 | model = ModelWrapper(raw_m) 41 | iname = model.graph.input[0].name 42 | idt = model.get_tensor_datatype(iname) 43 | ishape = model.get_tensor_shape(iname) 44 | idict = {iname: gen_finn_dt_tensor(idt, ishape)} 45 | oname = model.graph.output[0].name 46 | expected_out = oxe.execute_onnx(model, idict)[oname] 47 | model = model.transform(ConvertToChannelsLastAndClean(make_input_channels_last=False)) 48 | expected_ops = ["Transpose", "Conv", "Conv", "Relu", "Conv", "Relu", "Add", "MaxPool", "Transpose"] 49 | ops = [x.op_type for x in model.graph.node] 50 | assert ops == expected_ops, "Did not found expected op sequence after lowering and channels-last" 51 | for node in model.graph.node: 52 | if node.op_type in ["Conv", "MaxPool"]: 53 | assert node.domain == "qonnx.custom_op.channels_last" 54 | out = oxe.execute_onnx(model, idict)[oname] 55 | assert np.isclose(expected_out, out, atol=1e-4).all() 56 | -------------------------------------------------------------------------------- /tests/transformation/test_expose_intermediate.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | from qonnx.transformation.expose_intermediate import ExposeIntermediateTensorsPatternList 32 | from qonnx.transformation.fold_constants import FoldConstants 33 | from qonnx.util.test import download_model, test_model_details 34 | 35 | model_details_expint = { 36 | "FINN-TFC_W2A2": {"n_quant_outputs": 4}, 37 | "FINN-CNV_W2A2": {"n_quant_outputs": 9}, 38 | "MobileNetv1-w4a4": {"n_quant_outputs": 27}, 39 | } 40 | 41 | # inherit basics for matching testcases from test util 42 | model_details = {k: v for (k, v) in test_model_details.items() if k in model_details_expint.keys()} 43 | model_details = {**model_details, **model_details_expint} 44 | 45 | 46 | @pytest.mark.parametrize("model_name", model_details.keys()) 47 | def test_expose_intermediate(model_name): 48 | model = download_model(model_name, do_cleanup=True, return_modelwrapper=True) 49 | # do folding for weights 50 | model = model.transform(FoldConstants(exclude_op_types=[])) 51 | # break out all dynamic (non-weight) quantizer outputs 52 | pattern_list = ["Quant"] 53 | model = model.transform(ExposeIntermediateTensorsPatternList(pattern_list, dynamic_only=True)) 54 | assert len(model.graph.output) == model_details_expint[model_name]["n_quant_outputs"] + 1 55 | -------------------------------------------------------------------------------- /tests/transformation/test_fold_constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx 31 | import onnx.numpy_helper as np_helper 32 | from pkgutil import get_data 33 | 34 | import qonnx.core.onnx_exec as oxe 35 | from qonnx.core.modelwrapper import ModelWrapper 36 | from qonnx.transformation.fold_constants import FoldConstants 37 | from qonnx.transformation.infer_shapes import InferShapes 38 | 39 | 40 | def test_const_folding(): 41 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 42 | model = ModelWrapper(raw_m) 43 | model = model.transform(InferShapes()) 44 | model = model.transform(FoldConstants()) 45 | raw_i = get_data("qonnx.data", "onnx/mnist-conv/test_data_set_0/input_0.pb") 46 | raw_o = get_data("qonnx.data", "onnx/mnist-conv/test_data_set_0/output_0.pb") 47 | input_tensor = onnx.load_tensor_from_string(raw_i) 48 | output_tensor = onnx.load_tensor_from_string(raw_o) 49 | input_dict = {"Input3": np_helper.to_array(input_tensor)} 50 | output_dict = oxe.execute_onnx(model, input_dict) 51 | assert np.isclose(np_helper.to_array(output_tensor), output_dict["Plus214_Output_0"], atol=1e-3).all() 52 | 53 | 54 | def test_const_folding_shapes(): 55 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 56 | model = ModelWrapper(raw_m) 57 | model = model.transform(InferShapes()) 58 | mm_node_w_in = model.get_nodes_by_op_type("MatMul")[0].input[1] 59 | assert model.find_producer(mm_node_w_in) is not None 60 | assert model.find_producer(mm_node_w_in).op_type == "Reshape" 61 | assert model.get_initializer(mm_node_w_in) is None 62 | model = model.transform(FoldConstants()) 63 | assert model.find_producer(mm_node_w_in) is None 64 | assert model.get_initializer(mm_node_w_in) is not None 65 | -------------------------------------------------------------------------------- /tests/transformation/test_infer_data_layouts.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | from pkgutil import get_data 30 | 31 | import qonnx.core.data_layout as DataLayout 32 | from qonnx.core.modelwrapper import ModelWrapper 33 | from qonnx.transformation.fold_constants import FoldConstants 34 | from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames 35 | from qonnx.transformation.infer_data_layouts import InferDataLayouts 36 | from qonnx.transformation.infer_shapes import InferShapes 37 | from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul 38 | 39 | 40 | def test_infer_data_layouts(): 41 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 42 | model = ModelWrapper(raw_m) 43 | model = model.transform(InferShapes()) 44 | model = model.transform(FoldConstants()) 45 | model = model.transform(GiveUniqueNodeNames()) 46 | model = model.transform(GiveReadableTensorNames()) 47 | model = model.transform(InferDataLayouts()) 48 | 49 | assert model.get_tensor_layout("global_in") == DataLayout.NCHW 50 | assert model.get_tensor_layout("Conv_0_out0") == DataLayout.NCHW 51 | assert model.get_tensor_layout("MaxPool_0_out0") == DataLayout.NCHW 52 | assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC 53 | assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NC 54 | assert model.get_tensor_layout("global_out") == DataLayout.NC 55 | 56 | model = model.transform(LowerConvsToMatMul()) 57 | model = model.transform(GiveUniqueNodeNames()) 58 | model = model.transform(GiveReadableTensorNames()) 59 | model = model.transform(InferDataLayouts()) 60 | 61 | assert model.get_tensor_layout("global_in") == DataLayout.NCHW 62 | assert model.get_tensor_layout("Transpose_0_out0") == DataLayout.NHWC 63 | assert model.get_tensor_layout("Im2Col_0_out0") == DataLayout.NHWC 64 | assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NHWC 65 | assert model.get_tensor_layout("MaxPool_0_out0") == DataLayout.NCHW 66 | assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC 67 | assert model.get_tensor_layout("MatMul_2_out0") == DataLayout.NC 68 | assert model.get_tensor_layout("global_out") == DataLayout.NC 69 | -------------------------------------------------------------------------------- /tests/transformation/test_infer_shapes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | from onnx import TensorProto, helper 31 | from pkgutil import get_data 32 | 33 | import qonnx.util.basic as util 34 | from qonnx.core.modelwrapper import ModelWrapper 35 | from qonnx.transformation.infer_shapes import InferShapes 36 | 37 | 38 | def test_infer_shapes(): 39 | # load the onnx model 40 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 41 | model = ModelWrapper(raw_m) 42 | graph = model.graph 43 | 44 | # multi-thresholding node to be inserted between the first Relu and MaxPool node 45 | 46 | # get Relu node to use data 47 | Relu_node = graph.node[3] 48 | assert Relu_node.op_type == "Relu", "The wrong model was chosen for the check" 49 | 50 | # create thresholds tensor as constant 51 | mt_thresh0 = helper.make_tensor_value_info("mt_thresh0", TensorProto.FLOAT, [8, 7]) 52 | 53 | # random numbers for the thresholds 54 | # thresholds for one channel have to be sorted to guarantee the correct behavior 55 | mt_thresh0_values = np.empty([8, 7], dtype=np.float32) 56 | for i in range(len(mt_thresh0_values)): 57 | mt_thresh0_values[i] = np.sort(np.random.random_sample(7) * 10) 58 | 59 | model.set_initializer(mt_thresh0.name, mt_thresh0_values) 60 | 61 | # add multi-thresholding node and change Relu node 62 | mt_node = helper.make_node( 63 | "MultiThreshold", 64 | ["mt_v0", "mt_thresh0"], 65 | [Relu_node.output[0]], 66 | domain="qonnx.custom_op.general", 67 | ) 68 | Relu_node.output[0] = "mt_v0" 69 | 70 | # explicitly remove any present shape from ReLU and MultiThreshold outputs 71 | util.remove_by_name(model.graph.value_info, Relu_node.output[0]) 72 | util.remove_by_name(model.graph.value_info, mt_node.output[0]) 73 | graph.node.insert(4, mt_node) 74 | 75 | # first check routine 76 | # check if at least one shape is not specified 77 | assert not ( 78 | model.check_all_tensor_shapes_specified() 79 | ), "All tensors are already specified before the shape inference execution" 80 | 81 | # perform shape inference on mixed model 82 | model = model.transform(InferShapes()) 83 | 84 | # second check routine 85 | # now all shapes should be specified and mt_node output shape is (1,8,28,28) 86 | assert model.check_all_tensor_shapes_specified(), "There are still tensors that are not specified" 87 | assert (model.get_tensor_shape(mt_node.output[0])) == ( 88 | [1, 8, 28, 28] 89 | ), "output of multi-thresholding node has wrong shape" 90 | -------------------------------------------------------------------------------- /tests/transformation/test_make_input_chanlast.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | 30 | from pkgutil import get_data 31 | 32 | import qonnx.core.data_layout as data_layout 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.transformation.make_input_chanlast import MakeInputChannelsLast 35 | 36 | 37 | def test_make_input_chanlast(): 38 | # load the onnx model 39 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 40 | model = ModelWrapper(raw_m) 41 | iname = model.graph.input[0].name 42 | assert tuple(model.get_tensor_shape(iname)) == (1, 1, 28, 28) 43 | model = model.transform(MakeInputChannelsLast()) 44 | assert model.graph.node[0].op_type == "Transpose" 45 | assert tuple(model.get_tensor_shape(iname)) == (1, 28, 28, 1) 46 | assert model.get_tensor_layout(iname) == data_layout.NHWC 47 | -------------------------------------------------------------------------------- /tests/transformation/test_nodelocal_transform.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, Xilinx 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of QONNX nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.helper as oh 31 | from pkgutil import get_data 32 | 33 | from qonnx.core.modelwrapper import ModelWrapper 34 | from qonnx.transformation.base import NodeLocalTransformation 35 | from qonnx.util.basic import get_by_name 36 | 37 | 38 | class SumAndAnnotateConvWeights(NodeLocalTransformation): 39 | def applyNodeLocal(self, node): 40 | if node.op_type == "Conv": 41 | # read conv weight tensor from model 42 | W = self.ref_input_model.get_initializer(node.input[1]) 43 | sum = float(np.sum(W)) 44 | # add a dummy attribute for verification 45 | attr_proto = oh.make_attribute("conv_w_sum", sum) 46 | node.attribute.append(attr_proto) 47 | return (node, False) 48 | 49 | 50 | def test_nodelocal_transform(): 51 | # load the onnx model 52 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 53 | model = ModelWrapper(raw_m) 54 | model = model.transform(SumAndAnnotateConvWeights()) 55 | for conv_node in model.get_nodes_by_op_type("Conv"): 56 | wsum = float(np.sum(model.get_initializer(conv_node.input[1]))) 57 | assert get_by_name(conv_node.attribute, "conv_w_sum").f == wsum 58 | -------------------------------------------------------------------------------- /tests/transformation/test_qcdq_to_qonnx.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import numpy as np 32 | import os 33 | import urllib.request 34 | 35 | import qonnx.core.onnx_exec as oxe 36 | from qonnx.core.modelwrapper import ModelWrapper 37 | from qonnx.transformation.qcdq_to_qonnx import QCDQToQuant 38 | from qonnx.util.cleanup import cleanup_model 39 | 40 | model_details = { 41 | "MobileNetv2-w8a8": { 42 | "url": ( 43 | "https://github.com/onnx/models/raw/main/validated/vision/classification/mobilenet/model/mobilenetv2-12-qdq.onnx" 44 | ), 45 | "input_shape": (1, 3, 224, 224), 46 | "input_range": (-1, +1), 47 | "exp_q_nodes": 171, 48 | }, 49 | } 50 | 51 | 52 | def download_model(test_model): 53 | qonnx_url = model_details[test_model]["url"] 54 | # download test data 55 | dl_dir = "/tmp" 56 | dl_file = dl_dir + f"/{test_model}.onnx" 57 | urllib.request.urlretrieve(qonnx_url, dl_file) 58 | return dl_file 59 | 60 | 61 | def get_golden_in_and_output(model, test_model): 62 | rng = np.random.RandomState(42) 63 | input_shape = model_details[test_model]["input_shape"] 64 | (low, high) = model_details[test_model]["input_range"] 65 | size = np.prod(np.asarray(input_shape)) 66 | input_tensor = rng.uniform(low=low, high=high, size=size) 67 | input_tensor = input_tensor.astype(np.float32) 68 | input_tensor = input_tensor.reshape(input_shape) 69 | # use batch dim of 1 where needed (appears as 0-valued dim) 70 | for tensor_name in model.get_all_tensor_names(): 71 | ts = model.get_tensor_shape(tensor_name) 72 | if len(ts) > 0 and ts[0] == 0: 73 | ts = list(ts) 74 | ts[0] = 1 75 | model.set_tensor_shape(tensor_name, ts) 76 | model.set_tensor_shape(model.graph.input[0].name, input_shape) 77 | input_dict = {model.graph.input[0].name: input_tensor} 78 | golden_output_dict = oxe.execute_onnx(model, input_dict) 79 | golden_result = golden_output_dict[model.graph.output[0].name] 80 | return input_tensor, golden_result 81 | 82 | 83 | @pytest.mark.parametrize("test_model", model_details.keys()) 84 | def test_qcdq_to_qonnx(test_model): 85 | test_details = model_details[test_model] 86 | dl_file = download_model(test_model=test_model) 87 | assert os.path.isfile(dl_file) 88 | model = ModelWrapper(dl_file) 89 | model = cleanup_model(model) 90 | input_tensor, golden_result = get_golden_in_and_output(model, test_model) 91 | model = model.transform(QCDQToQuant()) 92 | assert len(model.get_nodes_by_op_type("Quant")) == test_details["exp_q_nodes"] 93 | assert len(model.get_nodes_by_op_type("QuantizeLinear")) == 0 94 | assert len(model.get_nodes_by_op_type("DequantizeLinear")) == 0 95 | if test_model == "MobileNetv2-w8a8": 96 | pytest.xfail("MNv2 known to have off-by-one difference in inputs to Conv_30") 97 | model = cleanup_model(model) 98 | input_dict = {model.graph.input[0].name: input_tensor} 99 | produced_output_dict = oxe.execute_onnx(model, input_dict) 100 | produced_result = produced_output_dict[model.graph.output[0].name] 101 | assert np.isclose(golden_result, produced_result).all() 102 | -------------------------------------------------------------------------------- /tests/transformation/test_qonnx_cleanup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib.request 3 | 4 | from qonnx.core.modelwrapper import ModelWrapper 5 | from qonnx.util.cleanup import cleanup 6 | 7 | 8 | def test_cleanup_cnv_w2a2(): 9 | # download test data 10 | dl_dir = "/tmp" 11 | dl_file = dl_dir + "/cnv-w2a2.onnx" 12 | cnv_w2a2_qonnx_url = ( 13 | "https://raw.githubusercontent.com/fastmachinelearning/" 14 | "QONNX_model_zoo/main/models/CIFAR10/Brevitas_FINN_CNV/CNV_2W2A.onnx" 15 | ) 16 | urllib.request.urlretrieve(cnv_w2a2_qonnx_url, dl_file) 17 | assert os.path.isfile(dl_file) 18 | # run cleanup with default settings 19 | out_file = dl_dir + "/cnv-w2a2-clean.onnx" 20 | cleanup(dl_file, out_file=out_file) 21 | assert os.path.isfile(out_file) 22 | model = ModelWrapper(out_file) 23 | # check some names and shapes 24 | assert model.check_all_tensor_shapes_specified() 25 | assert model.graph.output[0].name == "global_out" 26 | assert model.get_tensor_shape(model.graph.output[0].name) == [1, 10] 27 | # constant folding should have replaced Shape -> Gather -> Unsqueeze -> Concat -> Reshape 28 | # with a Reshape w/ shape=(1,-1) 29 | reshape_nodes = model.get_nodes_by_op_type("Reshape") 30 | assert len(reshape_nodes) == 1 31 | reshape_node = reshape_nodes[0] 32 | assert (model.get_initializer(reshape_node.input[1]) == [1, -1]).all() 33 | os.remove(dl_file) 34 | -------------------------------------------------------------------------------- /tests/transformation/test_rebalance_conv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | import onnx.parser as oprs 31 | 32 | import qonnx.core.onnx_exec as oxe 33 | from qonnx.core.datatype import DataType 34 | from qonnx.core.modelwrapper import ModelWrapper 35 | from qonnx.transformation.infer_datatypes import InferDataTypes 36 | from qonnx.transformation.infer_shapes import InferShapes 37 | from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul 38 | from qonnx.transformation.rebalance_conv import RebalanceIm2Col 39 | from qonnx.util.basic import gen_finn_dt_tensor 40 | 41 | 42 | def test_rebalance_conv(): 43 | ch_factor = 4 44 | ifmdim = 64 45 | kdim = 8 46 | ofmdim = ifmdim // kdim 47 | ifm = 1 48 | ofm = 16 49 | ishp = (1, ifm, ifmdim, ifmdim) 50 | oshp = (1, ofm, ofmdim, ofmdim) 51 | wshp = (ofm, ifm, kdim, kdim) 52 | dt0 = DataType["UINT8"] 53 | wdt = DataType["INT4"] 54 | np.random.seed(0) 55 | ishp_str = str(list(ishp)) 56 | oshp_str = str(list(oshp)) 57 | wshp_str = str(list(wshp)) 58 | 59 | input = f""" 60 | < 61 | ir_version: 7, 62 | opset_import: ["" : 9] 63 | > 64 | agraph (float{ishp_str} in0) => (float{oshp_str} out0) 65 | < 66 | float{wshp_str} conv_param 67 | > 68 | {{ 69 | out0 = Conv< 70 | dilations=[1,1], group=1, kernel_shape=[{kdim},{kdim}], 71 | strides=[{kdim},{kdim}], pads=[0,0,0,0] 72 | >(in0, conv_param) 73 | }} 74 | """ 75 | model = oprs.parse_model(input) 76 | model = ModelWrapper(model) 77 | model.set_tensor_datatype("in0", dt0) 78 | w = gen_finn_dt_tensor(wdt, wshp) 79 | model.set_initializer("conv_param", w) 80 | model = model.transform(InferShapes()) 81 | inp = gen_finn_dt_tensor(dt0, ishp) 82 | input_dict = {"in0": inp} 83 | model = model.transform(LowerConvsToMatMul()) 84 | model = model.transform(InferShapes()) 85 | model = model.transform(InferDataTypes()) 86 | im2col_node = model.get_nodes_by_op_type("Im2Col")[0] 87 | old_im2col_ishape = model.get_tensor_shape(im2col_node.input[0]) 88 | old_im2col_oshape = model.get_tensor_shape(im2col_node.output[0]) 89 | assert tuple(old_im2col_ishape) == (1, ifmdim, ifmdim, ifm) 90 | out_expected = oxe.execute_onnx(model, input_dict)["out0"] 91 | model = model.transform(RebalanceIm2Col(ch_factor)) 92 | model = model.transform(InferShapes()) 93 | model = model.transform(InferDataTypes()) 94 | im2col_node = model.get_nodes_by_op_type("Im2Col")[0] 95 | new_im2col_ishape = model.get_tensor_shape(im2col_node.input[0]) 96 | new_im2col_oshape = model.get_tensor_shape(im2col_node.output[0]) 97 | out_produced = oxe.execute_onnx(model, input_dict)["out0"] 98 | assert len(model.get_nodes_by_op_type("Reshape")) == 1 99 | assert tuple(new_im2col_ishape) == (1, ifmdim, ifmdim // ch_factor, ch_factor) 100 | assert old_im2col_oshape == new_im2col_oshape 101 | assert (out_expected == out_produced).all() 102 | -------------------------------------------------------------------------------- /tests/transformation/test_remove_unused.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | 30 | from qonnx.transformation.expose_intermediate import ExposeIntermediateTensorsPatternList 31 | from qonnx.transformation.remove import RemoveUnusedNodes 32 | from qonnx.util.test import download_model 33 | 34 | 35 | def test_remove_unused_nodes(): 36 | model = download_model("FINN-TFC_W2A2", do_cleanup=True, return_modelwrapper=True) 37 | orig_output = model.graph.output[0] 38 | # break out intermediate output 39 | pattern_list = ["MatMul_0_out0"] 40 | model = model.transform(ExposeIntermediateTensorsPatternList(pattern_list, dynamic_only=True)) 41 | # remove original top-level output 42 | model.graph.output.remove(orig_output) 43 | assert len(model.graph.output) == 1 44 | assert model.graph.output[0].name == "MatMul_0_out0" 45 | # call transform to remove the now-dangling tail nodes 46 | model = model.transform(RemoveUnusedNodes()) 47 | assert len(model.graph.node) == 6 48 | -------------------------------------------------------------------------------- /tests/transformation/test_sort_commutative_inputs_initializer_last.py: -------------------------------------------------------------------------------- 1 | # Set pytest parameters 2 | import pytest 3 | 4 | # Numpy for handling simulation of tensor operations 5 | import numpy as np 6 | 7 | # Helper for creating ONNX nodes 8 | from onnx import TensorProto 9 | from onnx import helper as oh 10 | 11 | # QONNX wrapper of ONNX model graphs 12 | from qonnx.core.modelwrapper import ModelWrapper 13 | 14 | # Execute QONNX model graphs 15 | from qonnx.core.onnx_exec import execute_onnx 16 | 17 | # Graph transformation to be tested: Sorts the input list of commutative 18 | # operations to have all dynamic inputs first followed by all initializer inputs 19 | from qonnx.transformation.general import SortCommutativeInputsInitializerLast 20 | 21 | # QONNX utility for creating models from ONNX graphs 22 | from qonnx.util.basic import qonnx_make_model 23 | 24 | 25 | # Specify how many inputs the test should cover 26 | @pytest.mark.parametrize("num_inputs", [4, 5, 6]) 27 | # Specify which inputs should be turned into initializers 28 | @pytest.mark.parametrize( 29 | # fmt: off 30 | "initializers", [[], [0], [1], [0, 1], [0, 3], [0, 1, 2, 3]] 31 | # fmt: on 32 | ) 33 | # Tests the SortCommutativeInputsInitializerLast transformation 34 | def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): 35 | # Generate the input tensor names 36 | inputs = [f"in{i}" for i in range(num_inputs)] 37 | # We will use the Sum ONNX operation to test this behavior, as it allows for 38 | # arbitrary many inputs 39 | node = oh.make_node( 40 | # fmt: off 41 | op_type="Sum", inputs=inputs, outputs=["out"], name="Sum" 42 | # fmt: on 43 | ) 44 | # Create value infos for all input and the output tensor 45 | inputs = [ 46 | # fmt: off 47 | oh.make_tensor_value_info(i, TensorProto.FLOAT, (16,)) for i in inputs 48 | # fmt: on 49 | ] 50 | out = oh.make_tensor_value_info("out", TensorProto.FLOAT, (16,)) 51 | # Make a graph comprising the Sum node and value infos for all inputs and 52 | # the output 53 | graph = oh.make_graph([node], inputs=inputs, outputs=[out], name="Sum") 54 | # Wrap the graph in an QONNX model wrapper 55 | model = ModelWrapper(qonnx_make_model(graph, producer_name="qonnx-tests")) 56 | # Prepare the execution context 57 | context = {f"in{i}": np.random.rand(16) for i in range(num_inputs)} 58 | # Make sure all inputs are of type float32 59 | context = {key: value.astype(np.float32) for key, value in context.items()} 60 | # Turn selected inputs into initializers 61 | for i in initializers: 62 | model.set_initializer(f"in{i}", context[f"in{i}"]) 63 | 64 | # Execute the ONNX model before transforming 65 | out_expected = execute_onnx(model, context)["out"] 66 | # Apply the transformation to be tested 67 | # Note: No cleanup, as the tested transformation is part of the cleanup, and 68 | # we want to test this in isolation 69 | model = model.transform( 70 | # fmt: off 71 | SortCommutativeInputsInitializerLast(), cleanup=False 72 | # fmt: on 73 | ) 74 | # Execute the ONNX model after transforming 75 | out_produced = execute_onnx(model, context)["out"] 76 | 77 | # Start with no initializer input seen so far 78 | seen_initializer = False 79 | # Verify that no "dynamic" input follows an initializer input 80 | for i in model.graph.node[0].input: 81 | # Keep track of when an initializer has been seen 82 | if model.get_initializer(i) is not None: 83 | seen_initializer = True 84 | # If there has already been an initializer, this input must be an 85 | # initializer as well 86 | assert ( 87 | not seen_initializer or model.get_initializer(i) is not None 88 | ), "Non-initializer input following initializer after sorting" 89 | 90 | # Outputs before and after must match 91 | assert np.allclose(out_produced, out_expected) 92 | -------------------------------------------------------------------------------- /tests/transformation/test_topk_insert.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 Advanced Micro Devices, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of qonnx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import numpy as np 32 | import onnx 33 | import onnx.numpy_helper as nph 34 | from pkgutil import get_data 35 | 36 | import qonnx.core.onnx_exec as oxe 37 | from qonnx.core.modelwrapper import ModelWrapper 38 | from qonnx.transformation.fold_constants import FoldConstants 39 | from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames 40 | from qonnx.transformation.infer_datatypes import InferDataTypes 41 | from qonnx.transformation.infer_shapes import InferShapes 42 | from qonnx.transformation.insert_topk import InsertTopK 43 | 44 | 45 | @pytest.mark.parametrize("k", [1, 2]) 46 | def test_topk_insert(k): 47 | raw_m = get_data("qonnx.data", "onnx/mnist-conv/model.onnx") 48 | model = ModelWrapper(raw_m) 49 | model.model.opset_import[0].version = 11 50 | 51 | # do transformations (no topk) 52 | model = model.transform(InferShapes()) 53 | model = model.transform(FoldConstants()) 54 | model = model.transform(GiveUniqueNodeNames()) 55 | model = model.transform(GiveReadableTensorNames()) 56 | model = model.transform(InferDataTypes()) 57 | 58 | # verification: generate random input, run through net, streamline, 59 | # run again, check that output is top-k 60 | raw_i = get_data("qonnx.data", "onnx/mnist-conv/test_data_set_0/input_0.pb") 61 | input_tensor = onnx.load_tensor_from_string(raw_i) 62 | input_tensor = nph.to_array(input_tensor) 63 | input_dict = {"global_in": input_tensor} 64 | output_golden = oxe.execute_onnx(model, input_dict)["global_out"] 65 | output_golden_topk = np.flip(output_golden.flatten().argsort())[:k] 66 | output_golden_topk = output_golden_topk.flatten() 67 | 68 | # insert top-k 69 | model = model.transform(InsertTopK(k)) 70 | model = model.transform(GiveUniqueNodeNames()) 71 | model = model.transform(GiveReadableTensorNames()) 72 | model = model.transform(InferShapes()) 73 | 74 | # verify output of top-k 75 | output_dict_topk = oxe.execute_onnx(model, input_dict) 76 | output_pysim_topk = output_dict_topk[list(output_dict_topk.keys())[0]] 77 | output_pysim_topk = output_pysim_topk.astype(np.int32).flatten() 78 | 79 | assert np.array_equal(output_golden_topk, output_pysim_topk) 80 | -------------------------------------------------------------------------------- /tests/util/test_matvec_range.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import pytest 30 | 31 | import numpy as np 32 | 33 | import qonnx.util.basic as util 34 | from qonnx.core.datatype import DataType 35 | 36 | """Calculate the minimum and maximum possible result (accumulator) values 37 | for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW) 38 | with datatype vec_dt. Returns (acc_min, acc_max). 39 | """ 40 | 41 | np.random.seed(0) 42 | 43 | datatypes = [ 44 | DataType["UINT8"], 45 | DataType["INT8"], 46 | DataType["UINT3"], 47 | DataType["INT3"], 48 | DataType["BIPOLAR"], 49 | DataType["TERNARY"], 50 | ] 51 | 52 | 53 | @pytest.mark.parametrize("wdt", datatypes) 54 | @pytest.mark.parametrize("idt", datatypes) 55 | @pytest.mark.parametrize("size", [1, 10, 100]) 56 | def test_calculate_matvec_accumulator_range(wdt: DataType, idt: DataType, size: int): 57 | weights = util.gen_finn_dt_tensor(wdt, tensor_shape=(size, 1)) # (MW, MH) 58 | acc_min, acc_max = util.calculate_matvec_accumulator_range(weights, idt) 59 | for _ in range(1000): 60 | inputs = util.gen_finn_dt_tensor(idt, tensor_shape=(1, size)) 61 | acc_vals = inputs @ weights 62 | assert acc_vals.min() >= acc_min 63 | assert acc_vals.max() <= acc_max 64 | -------------------------------------------------------------------------------- /tests/util/test_padding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | 31 | from qonnx.util.basic import pad_tensor_to_multiple_of 32 | 33 | 34 | def test_pad_tensor_to_multiple_of(): 35 | A = np.eye(3) 36 | B = pad_tensor_to_multiple_of(A, [2, 2], val=-1) 37 | assert B.shape == (4, 4) 38 | assert (B[:3, :3] == A).all() 39 | assert (B[3, :] == -1).all() 40 | assert (B[:, 3] == -1).all() 41 | B = pad_tensor_to_multiple_of(A, [5, 5], val=-1, distr_pad=True) 42 | assert B.shape == (5, 5) 43 | assert (B[1:4, 1:4] == A).all() 44 | assert (B[0, :] == -1).all() 45 | assert (B[:, 0] == -1).all() 46 | assert (B[4, :] == -1).all() 47 | assert (B[:, 4] == -1).all() 48 | # using -1 in pad_to parameter should give an unpadded dimension 49 | B = pad_tensor_to_multiple_of(A, [-1, 5], val=-1, distr_pad=True) 50 | assert B.shape == (3, 5) 51 | assert (B[:, 1:4] == A).all() 52 | assert (B[:, 0] == -1).all() 53 | assert (B[:, 4] == -1).all() 54 | # if odd number of padding pixels required, 1 more should go after existing 55 | B = pad_tensor_to_multiple_of(A, [6, 6], val=-1, distr_pad=True) 56 | assert B.shape == (6, 6) 57 | assert (B[1:4, 1:4] == A).all() 58 | -------------------------------------------------------------------------------- /tests/util/test_shape_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 Xilinx, Inc. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright notice, this 8 | # list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright notice, 11 | # this list of conditions and the following disclaimer in the documentation 12 | # and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of Xilinx nor the names of its 15 | # contributors may be used to endorse or promote products derived from 16 | # this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | import numpy as np 30 | 31 | import qonnx.util.basic as util 32 | 33 | 34 | def test_interleave_matrix_outer_dim_from_partitions(): 35 | A = np.eye(10) 36 | n_parts = 2 37 | Ax = util.interleave_matrix_outer_dim_from_partitions(A, n_parts) 38 | part_size = 10 // n_parts 39 | assert Ax.shape == (n_parts, part_size, 10) 40 | for r_ind in range(A.shape[0]): 41 | assert (A[r_ind] == Ax[r_ind % n_parts][r_ind // n_parts]).all() 42 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox configuration file 2 | # Read more under https://tox.readthedocs.org/ 3 | # THIS SCRIPT IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS! 4 | 5 | [tox] 6 | minversion = 3.15 7 | envlist = default 8 | 9 | 10 | [testenv] 11 | description = invoke pytest to run automated tests 12 | isolated_build = True 13 | setenv = 14 | TOXINIDIR = {toxinidir} 15 | passenv = 16 | HOME 17 | extras = 18 | testing 19 | commands = 20 | pytest {posargs} 21 | 22 | 23 | [testenv:{clean,build}] 24 | description = 25 | Build (or clean) the package in isolation according to instructions in: 26 | https://setuptools.readthedocs.io/en/latest/build_meta.html#how-to-use-it 27 | https://github.com/pypa/pep517/issues/91 28 | https://github.com/pypa/build 29 | # NOTE: build is still experimental, please refer to the links for updates/issues 30 | skip_install = True 31 | changedir = {toxinidir} 32 | deps = 33 | build: build[virtualenv] 34 | commands = 35 | clean: python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' 36 | build: python -m build . 37 | # By default `build` produces wheels, you can also explicitly use the flags `--sdist` and `--wheel` 38 | 39 | 40 | [testenv:{docs,doctests}] 41 | description = invoke sphinx-build to build the docs/run doctests 42 | setenv = 43 | DOCSDIR = {toxinidir}/docs 44 | BUILDDIR = {toxinidir}/docs/_build 45 | docs: BUILD = html 46 | doctests: BUILD = doctest 47 | deps = 48 | -r {toxinidir}/docs/requirements.txt 49 | # ^ requirements.txt shared with Read The Docs 50 | commands = 51 | sphinx-build -b {env:BUILD} -d "{env:BUILDDIR}/doctrees" "{env:DOCSDIR}" "{env:BUILDDIR}/{env:BUILD}" {posargs} 52 | 53 | 54 | [testenv:publish] 55 | description = 56 | Publish the package you have been developing to a package index server. 57 | By default, it uses testpypi. If you really want to publish your package 58 | to be publicly accessible in PyPI, use the `-- --repository pypi` option. 59 | skip_install = True 60 | changedir = {toxinidir} 61 | passenv = 62 | TWINE_USERNAME 63 | TWINE_PASSWORD 64 | TWINE_REPOSITORY 65 | deps = twine 66 | commands = 67 | python -m twine check dist/* 68 | python -m twine upload {posargs:--repository testpypi} dist/* 69 | --------------------------------------------------------------------------------