├── pyproject.toml ├── _config.yml ├── assets ├── tests │ └── conv.float32.onnx └── conv.svg ├── scripts ├── source-me.sh ├── build-wheel.sh └── upload-pip.sh ├── onnxcli ├── __init__.py ├── check.py ├── infer_shape.py ├── setup.py ├── dispatcher.py ├── common.py ├── optimize.py ├── convert.py ├── draw.py ├── extract.py └── inspect.py ├── requirements.txt ├── .vscode ├── settings.json └── launch.json ├── .github ├── codecov.yml └── workflows │ ├── sanity.yml │ ├── stale.yml │ ├── publish.yml │ ├── build.yml │ └── codeql-analysis.yml ├── Makefile ├── setup.cfg ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── tests └── test_dispatcher.py ├── .gitignore ├── README.md └── LICENSE /pyproject.toml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /assets/tests/conv.float32.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhenhuaw-me/onnxcli/HEAD/assets/tests/conv.float32.onnx -------------------------------------------------------------------------------- /scripts/source-me.sh: -------------------------------------------------------------------------------- 1 | echo "Tip: must be sourced under root directory of the repo!" 2 | export PYTHONPATH=$(pwd):${PYTHONPATH} 3 | -------------------------------------------------------------------------------- /onnxcli/__init__.py: -------------------------------------------------------------------------------- 1 | """ONNX Command Line Toolbox""" 2 | 3 | # package metadata 4 | __name__ = 'onnxcli' 5 | __version__ = '0.2.1' 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | black 2 | build 3 | coverage 4 | flake8 5 | numpy 6 | onnx 7 | onnxoptimizer==0.2.7 8 | protobuf 9 | pytest 10 | twine 11 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": [], 3 | "python.testing.unittestEnabled": false, 4 | "python.testing.pytestEnabled": true 5 | } -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | threshold: 5% 6 | target: auto 7 | base: auto 8 | branches: 9 | - master 10 | only_pulls: true 11 | 12 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "test dispatcher", 6 | "type": "python", 7 | "request": "launch", 8 | "program": "tests/test_dispatcher.py", 9 | "console": "integratedTerminal" 10 | } 11 | ] 12 | } -------------------------------------------------------------------------------- /scripts/build-wheel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$(uname -s)" == "Darwin" ]; then 4 | root_dir=$(dirname $(dirname $(greadlink -f $0}))) 5 | else 6 | root_dir=$(dirname $(dirname $(readlink -f $0}))) 7 | fi 8 | rm -f ${root_dir}/assets/dist/onnxcli-*.whl 9 | 10 | pip install build 11 | 12 | python -m build \ 13 | --outdir ${root_dir}/assets/dist 14 | rm -rf ${root_dir}/onnxcli.egg-info 15 | rm -rf ${root_dir}/build 16 | -------------------------------------------------------------------------------- /.github/workflows/sanity.yml: -------------------------------------------------------------------------------- 1 | name: Sanity 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | pychecker: 8 | name: Check Python Style 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python 3.8 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: 3.8 17 | - name: Check Python Style 18 | run: | 19 | make check 20 | -------------------------------------------------------------------------------- /onnxcli/check.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import onnx 3 | from onnxcli.common import SubCmd 4 | 5 | logger = logging.getLogger('onnxcli') 6 | 7 | 8 | class CheckCmd(SubCmd): 9 | """Check if the given ONNX model is semantically correct.""" 10 | 11 | subcmd = 'check' 12 | 13 | def add_args(self, subparser): 14 | subparser.add_argument('path', type=str, help="The path to the ONNX model") 15 | 16 | def run(self, args): 17 | logger.info("Running on model {}".format(args.path)) 18 | 19 | onnx.checker.check_model(args.path) 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | default: test 2 | 3 | DIST_DIR=./assets/dist 4 | 5 | build: clean 6 | pip3 install build 7 | python -m build --outdir $(DIST_DIR) 8 | -rm -rf ./onnxcli.egg-info 9 | -rm -rf ./build 10 | 11 | test: install 12 | onnx setup 13 | python3 ./tests/test_dispatcher.py 14 | 15 | check: 16 | pip3 install flake8 black 17 | flake8 --max-line-length 120 --max-complexity 20 18 | black --skip-string-normalization --line-length 120 --check . 19 | 20 | format: 21 | black --skip-string-normalization --line-length 120 . 22 | 23 | setup: 24 | pip3 install -r ./requirements.txt 25 | 26 | install: clean build 27 | pip3 install --force-reinstall --no-dependencies $(DIST_DIR)/onnxcli-*-py3-none-any.whl 28 | 29 | clean: 30 | -rm $(DIST_DIR)/* 31 | 32 | 33 | .PHONY: sanity setup test 34 | -------------------------------------------------------------------------------- /scripts/upload-pip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$(uname -s)" == "Darwin" ]; then 4 | root_dir=$(dirname $(dirname $(greadlink -f $0}))) 5 | else 6 | root_dir=$(dirname $(dirname $(readlink -f $0}))) 7 | fi 8 | 9 | ${root_dir}/scripts/build-wheel.sh 10 | 11 | read -p "Will upload to test.pypi.org, for real publishment type \"Release\": " input_str 12 | if [ -z "${input_str}" -o ${input_str} != "Release" ]; then 13 | python3 -m twine upload \ 14 | --repository-url https://test.pypi.org/legacy/ \ 15 | ${root_dir}/assets/dist/onnxcli-* 16 | else 17 | read -p "Will publish the package, are you sure to continue [Y|N] ? " input_str 18 | if [ -n "${input_str}" -a ${input_str} = "Y" ]; then 19 | echo "Uploading..." 20 | python3 -m twine upload ${root_dir}/assets/dist/onnxcli-* 21 | fi 22 | fi 23 | -------------------------------------------------------------------------------- /onnxcli/infer_shape.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import onnx 3 | from onnxcli.common import SubCmd 4 | 5 | logger = logging.getLogger('onnxcli') 6 | 7 | 8 | class InferShapeCmd(SubCmd): 9 | """Run Shape Inference on given ONNX model.""" 10 | 11 | subcmd = 'infershape' 12 | 13 | def add_args(self, subparser): 14 | subparser.add_argument('input_path', type=str, help="The path to the input ONNX model") 15 | subparser.add_argument('output_path', type=str, help="The path to the output ONNX model") 16 | 17 | def run(self, args): 18 | logger.info("Running on model {}".format(args.input_path)) 19 | 20 | if args.output_path: 21 | onnx.shape_inference.infer_shapes_path(args.input_path, args.output_path) 22 | else: 23 | onnx.shape_inference.infer_shapes_path(args.input_path) 24 | -------------------------------------------------------------------------------- /onnxcli/setup.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import subprocess 3 | from onnxcli.common import SubCmd 4 | 5 | logger = logging.getLogger('onnxcli') 6 | 7 | 8 | class SetupCmd(SubCmd): 9 | """Setup python package.""" 10 | 11 | subcmd = 'setup' 12 | 13 | def add_args(self, subparser): 14 | subparser.add_argument('-l', '--list', action='store_true', help="List the packages only.") 15 | 16 | def run(self, args): 17 | logger.info("Running ") 18 | 19 | pkgs = ['onnxoptimizer==0.2.7', 'protobuf'] 20 | 21 | if args.list: 22 | print("Dependent packages:") 23 | for pkg in pkgs: 24 | print(pkg) 25 | return 26 | 27 | for pkg in pkgs: 28 | logger.info("Installing {}".format(pkg)) 29 | subprocess.check_call(["pip", "install", pkg]) 30 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: "Close stale issues" 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v3 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | days-before-stale: 30 14 | days-before-close: 14 15 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 14 days.' 16 | stale-issue-label: 'stale' 17 | exempt-issue-labels: 'Story,help wanted,Enhancement' 18 | stale-pr-message: 'This PR is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 14 days.' 19 | stale-pr-label: 'stale' 20 | exempt-pr-labels: 'Story,help wanted,Enhancement' 21 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 3.8 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: 3.8 21 | 22 | - name: Update pip 23 | run: python -m pip install --upgrade pip 24 | 25 | - name: Build package 26 | run: scripts/build-wheel.sh 27 | shell: bash 28 | 29 | - name: Publish 30 | env: 31 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 32 | TWINE_PASSWORD: ${{ secrets.PYPI_RELEASE_TOKEN }} 33 | run: | 34 | pip install twine 35 | twine upload --verbose assets/dist/* 36 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | test: 8 | name: Build and Test 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | 14 | - name: Set up Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | 19 | - name: Update pip 20 | run: python -m pip install --upgrade pip 21 | 22 | - name: Build package 23 | run: make build 24 | 25 | - name: Install package 26 | run: pip install -U assets/dist/onnxcli-*.whl 27 | 28 | - name: Install dependencies 29 | run: | 30 | sudo apt update 31 | sudo apt install -y graphviz 32 | pip install coverage pytest 33 | onnx setup 34 | 35 | - name: Testing (collecting coverage data) 36 | run: coverage run --source=./onnxcli -m pytest 37 | 38 | - name: Upload coverage report 39 | continue-on-error: true 40 | env: 41 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 42 | run: | 43 | coverage xml 44 | bash <(curl -s https://codecov.io/bash) 45 | -------------------------------------------------------------------------------- /onnxcli/dispatcher.py: -------------------------------------------------------------------------------- 1 | """The main command dispatcher""" 2 | 3 | import argparse 4 | import logging 5 | import sys 6 | 7 | from onnxcli import __doc__ as DESCRIPTION 8 | from onnxcli.check import CheckCmd 9 | from onnxcli.convert import ConvertCmd 10 | from onnxcli.draw import DrawCmd 11 | from onnxcli.extract import ExtractCmd 12 | from onnxcli.infer_shape import InferShapeCmd 13 | from onnxcli.inspect import InspectCmd 14 | from onnxcli.optimize import OptimizeCmd 15 | from onnxcli.setup import SetupCmd 16 | 17 | logger = logging.getLogger('onnxcli') 18 | 19 | 20 | def dispatch(): 21 | dispatch_core(sys.argv[1:]) 22 | 23 | 24 | def dispatch_core(*raw_args): 25 | logger.debug("Running {}".format(*raw_args)) 26 | 27 | parser = argparse.ArgumentParser(description=DESCRIPTION) 28 | subparsers = parser.add_subparsers(title='subcommands') 29 | 30 | CheckCmd(subparsers) 31 | ConvertCmd(subparsers) 32 | DrawCmd(subparsers) 33 | ExtractCmd(subparsers) 34 | InferShapeCmd(subparsers) 35 | InspectCmd(subparsers) 36 | OptimizeCmd(subparsers) 37 | SetupCmd(subparsers) 38 | 39 | args = parser.parse_args(*raw_args) 40 | args.func(args) 41 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name=onnxcli 3 | version = attr: onnxcli.__version__ 4 | description = Lightweight Command Line Toolbox for ONNX 5 | 6 | author = 王振华(Zhenhua WANG) 7 | author_email = hi@zhenhuaw.me 8 | url = https://zhenhuaw.me/onnxcli 9 | 10 | long_description = file: README.md 11 | long_description_content_type = text/markdown 12 | license = Apache License 2.0 13 | license_file = LICENSE 14 | keywords = onnx, deep-learning 15 | 16 | project_urls = 17 | Bug Reports = https://github.com/zhenhuaw-me/onnxcli/issues 18 | Source = https://github.com/zhenhuaw-me/onnxcli 19 | 20 | classifiers = 21 | Development Status :: 3 - Alpha 22 | Programming Language :: Python :: 3 23 | Programming Language :: Python :: 3.6 24 | Programming Language :: Python :: 3.7 25 | Programming Language :: Python :: 3.8 26 | Environment :: Console 27 | Intended Audience :: Developers 28 | License :: OSI Approved :: Apache Software License 29 | Natural Language :: English 30 | Operating System :: OS Independent 31 | Topic :: Scientific/Engineering :: Artificial Intelligence 32 | 33 | 34 | [options] 35 | install_requires = onnx 36 | python_requires = >=3.6 37 | packages = find: 38 | 39 | [options.entry_points] 40 | console_scripts = 41 | onnx = onnxcli.dispatcher:dispatch 42 | onnxcli = onnxcli.dispatcher:dispatch 43 | 44 | 45 | [tool:pytest] 46 | log_level = DEBUG 47 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.208.0/containers/python-3/.devcontainer/base.Dockerfile 2 | 3 | # [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster 4 | ARG VARIANT="3.10-bullseye" 5 | FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} 6 | 7 | # [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 8 | ARG NODE_VERSION="none" 9 | RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi 10 | 11 | # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. 12 | # COPY requirements.txt /tmp/pip-tmp/ 13 | # RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ 14 | # && rm -rf /tmp/pip-tmp 15 | 16 | # [Optional] Uncomment this section to install additional OS packages. 17 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 18 | # && apt-get -y install --no-install-recommends 19 | 20 | # [Optional] Uncomment this line to install global node packages. 21 | # RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 -------------------------------------------------------------------------------- /onnxcli/common.py: -------------------------------------------------------------------------------- 1 | class SubCmd: 2 | subcmd = None 3 | 4 | def __init__(self, subparsers): 5 | if self.subcmd is None: 6 | raise RuntimeError("subcmd must be provided!") 7 | 8 | subparser = subparsers.add_parser(self.subcmd, help=self.__doc__) 9 | self.add_args(subparser) 10 | subparser.set_defaults(func=self.run) 11 | 12 | def add_args(self, subparser): 13 | """Add dedicated arguments to the subcommand. 14 | 15 | Needs to be overrided, and only be called by `__init__()`. 16 | """ 17 | raise RuntimeError("{}.add_args() need to be overrided!".format(self.__name__)) 18 | 19 | def run(self, args): 20 | """Run the utility with args. 21 | 22 | Won't be called explicitly. 23 | """ 24 | raise RuntimeError("{}.run() need to be overrided!".format(self.__name__)) 25 | 26 | 27 | def dtype(Key): 28 | # sync with TensorProto.DataType of https:#github.com/onnx/onnx/blob/master/onnx/onnx.proto 29 | RawMap = [ 30 | # fmt off 31 | 'UNDEFINED', 32 | 'FLOAT', 33 | 'UINT8', 34 | 'INT8', 35 | 'UINT16', 36 | 'INT16', 37 | 'INT32', 38 | 'INT64', 39 | 'STRING', 40 | 'BOOL', 41 | 'FLOAT16', 42 | 'DOUBLE', 43 | 'UINT32', 44 | 'UINT64', 45 | 'COMPLEX64', 46 | 'COMPLEX128', 47 | 'BFLOAT16', 48 | # fmt on 49 | ] 50 | return RawMap[Key] 51 | 52 | 53 | def shape(ShapeProto): 54 | def not_empty_str(s): 55 | return s is not None and len(s) != 0 56 | 57 | return [d.dim_param if not_empty_str(d.dim_param) else d.dim_value for d in ShapeProto.dim] 58 | -------------------------------------------------------------------------------- /onnxcli/optimize.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import onnx 4 | from onnxcli.common import SubCmd 5 | 6 | logger = logging.getLogger('onnxcli') 7 | 8 | 9 | class OptimizeCmd(SubCmd): 10 | """Optimize given ONNX model.""" 11 | 12 | subcmd = 'optimize' 13 | 14 | def add_args(self, subparser): 15 | subparser.add_argument('input_path', type=str, help="The path to the input ONNX model") 16 | subparser.add_argument('output_path', type=str, help="The path to the output ONNX model") 17 | subparser.add_argument( 18 | '-p', 19 | '--passes', 20 | type=str, 21 | nargs="+", 22 | default=[], 23 | help="The passes to run with the optimizer. Will tip avaiable passes if not specified.", 24 | ) 25 | 26 | def run(self, args): 27 | logger.info("Running on model {}".format(args.input_path)) 28 | try: 29 | import onnxoptimizer 30 | except ImportError as err: 31 | logger.error("Failed to import. Try to fix with `onnx setup`.") 32 | raise err 33 | 34 | if not os.path.exists(args.input_path): 35 | raise ValueError("Invalid input model path: {}".format(args.input_path)) 36 | if len(args.passes) == 0: 37 | passes = onnxoptimizer.get_available_passes() 38 | logger.warning("No optimization passes specified, running all available passes: {}".format(passes)) 39 | else: 40 | passes = args.passes 41 | logger.info("Running with passes: {}".format(passes)) 42 | 43 | model = onnx.load(args.input_path) 44 | optimized = onnxoptimizer.optimize(model, passes) 45 | onnx.save(optimized, args.output_path) 46 | -------------------------------------------------------------------------------- /onnxcli/convert.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import onnx 3 | import os 4 | from onnxcli.common import SubCmd 5 | 6 | logger = logging.getLogger('onnxcli') 7 | 8 | 9 | class ConvertCmd(SubCmd): 10 | """Convert the given model to or from ONNX.""" 11 | 12 | subcmd = 'convert' 13 | 14 | def add_args(self, subparser): 15 | subparser.add_argument('input_path', type=str, help="The path to the input model") 16 | subparser.add_argument('output_path', type=str, help="The path to the output model") 17 | subparser.add_argument( 18 | '-i', '--input_type', type=str, default='onnx', choices=['onnx'], help="The type of the input model" 19 | ) 20 | subparser.add_argument( 21 | '-o', '--output_type', type=str, default='onnx', choices=['json'], help="The type of the output model" 22 | ) 23 | 24 | def run(self, args): 25 | logger.info("Running on model {}".format(args.input_path)) 26 | 27 | if not os.path.exists(args.input_path): 28 | raise ValueError("Input model file not existed: {}".format(args.input_path)) 29 | if args.input_type == 'onnx' and args.output_type == 'json': 30 | onnx2json(args.input_path, args.output_path) 31 | else: 32 | raise NotImplementedError( 33 | "Conversion from {} to {} is not supported yet.".format(args.input_type, args.output_type) 34 | ) 35 | 36 | 37 | def onnx2json(input_path, output_path): 38 | """Convert the given ONNX model to JSON.""" 39 | logger.info("Converting on model {}".format(input_path)) 40 | import json 41 | 42 | try: 43 | from google.protobuf.json_format import MessageToJson 44 | except ImportError as err: 45 | logger.error("Failed to import protobuf. Try to fix with `onnx setup`.") 46 | raise err 47 | 48 | m = onnx.load(input_path) 49 | msg = MessageToJson(m) 50 | j = json.loads(msg) 51 | with open(output_path, 'w') as f: 52 | json.dump(j, f, indent=4) 53 | logger.info("JSON model saved as {}".format(output_path)) 54 | -------------------------------------------------------------------------------- /tests/test_dispatcher.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import shlex 3 | import subprocess as sp 4 | 5 | from onnxcli.dispatcher import dispatch_core 6 | 7 | fmt = '%(asctime)s %(levelname).1s [%(name)s][%(filename)s:%(lineno)d] %(message)s' 8 | logging.basicConfig(format=fmt, level=logging.DEBUG) 9 | 10 | logger = logging.getLogger('testing') 11 | 12 | cmds = [ 13 | 'check ./assets/tests/conv.float32.onnx', 14 | 'convert ./assets/tests/conv.float32.onnx converted.json --output_type json', 15 | 'draw ./assets/tests/conv.float32.onnx draw.dot --type dot', 16 | 'draw ./assets/tests/conv.float32.onnx draw.svg', 17 | 'extract ./assets/tests/conv.float32.onnx extract.onnx -i input -o output', 18 | 'infershape ./assets/tests/conv.float32.onnx shape.onnx', 19 | 'inspect ./assets/tests/conv.float32.onnx --meta --node --tensor', 20 | 'inspect ./assets/tests/conv.float32.onnx --node --indices 0 --detail', 21 | 'inspect ./assets/tests/conv.float32.onnx --node --names output --detail', 22 | 'inspect ./assets/tests/conv.float32.onnx --tensor --indices 0 --detail', 23 | 'inspect ./assets/tests/conv.float32.onnx --tensor --names output --detail', 24 | 'inspect ./assets/tests/conv.float32.onnx --io', 25 | 'inspect ./assets/tests/conv.float32.onnx', 26 | 'optimize ./assets/tests/conv.float32.onnx optimized.onnx', 27 | 'setup', 28 | 'setup --list', 29 | ] 30 | 31 | 32 | def test_dispatch_core(): 33 | for cmd in cmds: 34 | logger.debug("Running {}".format(cmd)) 35 | 36 | dispatch_core(shlex.split(cmd)) 37 | 38 | 39 | def test_dispatch_cmd(): 40 | for cmd in cmds: 41 | cmd = 'onnx ' + cmd 42 | logger.debug("Running {}".format(cmd)) 43 | p = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE, encoding='utf-8') 44 | while True: 45 | output = p.stdout.readline() 46 | if p.poll() is not None: 47 | break 48 | if output: 49 | print(output.strip()) 50 | retval = p.poll() 51 | assert retval == 0 52 | 53 | 54 | if __name__ == '__main__': 55 | test_dispatch_core() 56 | test_dispatch_cmd() 57 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.208.0/containers/python-3 3 | { 4 | "name": "Python 3", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | "context": "..", 8 | "args": { 9 | // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6 10 | // Append -bullseye or -buster to pin to an OS version. 11 | // Use -bullseye variants on local on arm64/Apple Silicon. 12 | "VARIANT": "3.8", 13 | // Options 14 | "NODE_VERSION": "none" 15 | } 16 | }, 17 | 18 | // Set *default* container specific settings.json values on container create. 19 | "settings": { 20 | "python.defaultInterpreterPath": "/usr/local/bin/python", 21 | "python.linting.enabled": true, 22 | "python.linting.pylintEnabled": true, 23 | "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", 24 | "python.formatting.blackPath": "/usr/local/py-utils/bin/black", 25 | "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", 26 | "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", 27 | "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", 28 | "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", 29 | "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", 30 | "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", 31 | "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint" 32 | }, 33 | 34 | // Add the IDs of extensions you want installed when the container is created. 35 | "extensions": [ 36 | "ms-python.python", 37 | "ms-python.vscode-pylance" 38 | ], 39 | 40 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 41 | // "forwardPorts": [], 42 | 43 | // Use 'postCreateCommand' to run commands after the container is created. 44 | "postCreateCommand": "pip3 install --user -r requirements.txt", 45 | 46 | // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 47 | "remoteUser": "vscode", 48 | "features": { 49 | "git": "latest", 50 | "git-lfs": "latest", 51 | "github-cli": "latest", 52 | "sshd": "latest" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Project specific 132 | *.onnx 133 | *.dot 134 | *.svg 135 | converted.json 136 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '15 15 * * 4' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v2 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v1 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 52 | 53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 54 | # If this step fails, then you should remove it and run the build manually (see below) 55 | - name: Autobuild 56 | uses: github/codeql-action/autobuild@v1 57 | 58 | # ℹ️ Command-line programs to run using the OS shell. 59 | # 📚 https://git.io/JvXDl 60 | 61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 62 | # and modify them (or add more) to build your code if your project 63 | # uses a compiled language 64 | 65 | #- run: | 66 | # make bootstrap 67 | # make release 68 | 69 | - name: Perform CodeQL Analysis 70 | uses: github/codeql-action/analyze@v1 71 | -------------------------------------------------------------------------------- /assets/conv.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | onnxcli 11 | 12 | 13 | 14 | node_output 15 | 16 | output 17 | <Conv> 18 | 19 | 20 | 21 | tensor_output 22 | 23 | output 24 | FLOAT, [1, 16, 32, 32] 25 | 26 | 27 | 28 | node_output->tensor_output 29 | 30 | 31 | 32 | 33 | 34 | tensor_input 35 | 36 | input 37 | FLOAT, [1, 8, 32, 32] 38 | 39 | 40 | 41 | tensor_input->node_output 42 | 43 | 44 | 45 | 46 | 47 | tensor_Variable/read 48 | 49 | Variable/read 50 | FLOAT, [16, 8, 3, 3] 51 | 52 | 53 | 54 | tensor_Variable/read->node_output 55 | 56 | 57 | 58 | 59 | 60 | tensor_Conv2D_bias 61 | 62 | Conv2D_bias 63 | FLOAT, [16] 64 | 65 | 66 | 67 | tensor_Conv2D_bias->node_output 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /onnxcli/draw.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import tempfile 3 | import subprocess 4 | import shlex 5 | import onnx 6 | from onnxcli.common import SubCmd, dtype, shape 7 | 8 | logger = logging.getLogger('onnxcli') 9 | 10 | 11 | class DrawCmd(SubCmd): 12 | """Draw the graph topology in [svg, dot, png] formats of the given ONNX model. 13 | 14 | Give you quick view of the attributes of the tensors and nodes in addition. 15 | In the figure the node is ellipse and tensor is rectangle (the rounded ones are initializers). 16 | The generated figures can be viewed in browser or image viewer without waiting for the model to load. 17 | It's really hepful for when investigating large models - save you from Netron. 18 | """ 19 | 20 | subcmd = 'draw' 21 | 22 | def add_args(self, subparser): 23 | subparser.add_argument('input_path', type=str, help="The path to the input ONNX model") 24 | subparser.add_argument('output_path', type=str, help="The path to the output drawing graph") 25 | subparser.add_argument( 26 | '-t', 27 | '--type', 28 | type=str, 29 | default='svg', 30 | choices=['svg', 'dot', 'png', 'jpg', 'ps'], 31 | help="The type of the drawing. Default is svg. Require dot (graphviz) to be installed.", 32 | ) 33 | 34 | def run(self, args): 35 | logger.info("Running on model {}".format(args.input_path)) 36 | 37 | dot_str = self.gen_graphviz_str(args.input_path) 38 | if args.type == 'dot': 39 | with open(args.output_path, 'w') as f: 40 | f.write(dot_str) 41 | else: 42 | with tempfile.NamedTemporaryFile(mode='w', suffix='.dot') as tmpfp: 43 | tmpfp.write(dot_str) 44 | tmpfp.flush() 45 | tmpfp.seek(0) 46 | dot_cmd = 'dot -T{} {} -o {}'.format(args.type, tmpfp.name, args.output_path) 47 | logger.debug("Invoking CMD {}".format(dot_cmd)) 48 | try: 49 | subprocess.call(shlex.split(dot_cmd)) 50 | except OSError: 51 | raise OSError("dot is not installed. Please install graphviz and try again.") 52 | logger.info("Drawing graph is saved to {}".format(args.output_path)) 53 | 54 | def gen_graphviz_str(self, input_path): 55 | logger.debug("Generating graphviz string from {}".format(input_path)) 56 | 57 | # handle chars that are key in graphviz 58 | def fixname(name): 59 | return name.replace('\\', '\\\\').replace(':', '\\:') 60 | 61 | # sometimes, a tensor the same name as a node, which will cause issue when rendering the graph 62 | # key used as graphviz node key to build graph, name used as graphviz node label 63 | def tensor_key(name): 64 | return 'tensor_' + fixname(name) 65 | 66 | def node_key(name): 67 | return 'node_' + fixname(name) 68 | 69 | m = onnx.load_model(input_path) 70 | dot_str = "digraph onnxcli {\n" 71 | 72 | # Keep track of the original tensor names. 73 | # Many tensors are not recorded in graph.value_info, thus we need to generate edge for them specially. 74 | tensor_names = set() 75 | 76 | # nodes 77 | for node in m.graph.node: 78 | nname = fixname(node.name) 79 | nkey = node_key(node.name) 80 | dot_str += '"{}" [label="{}\\n<{}>" fonstsize=16 shape=oval];\n'.format(nkey, nname, node.op_type) 81 | for iname in node.input: 82 | tensor_names.add(iname) 83 | dot_str += ' "{}" -> "{}";\n'.format(tensor_key(iname), nkey) 84 | for oname in node.output: 85 | tensor_names.add(oname) 86 | dot_str += ' "{}" -> "{}";\n'.format(nkey, tensor_key(oname)) 87 | 88 | # tensors 89 | for tensor in m.graph.initializer: 90 | tensor_names.remove(tensor.name) 91 | dot_str += '"{}" [label="{}\\n{}, {}" fonstsize=10 style=rounded shape=rectangle];\n'.format( 92 | tensor_key(tensor.name), fixname(tensor.name), dtype(tensor.data_type), tensor.dims 93 | ) 94 | all_value_info = list(m.graph.value_info) + list(m.graph.input) + list(m.graph.output) 95 | for tensor in all_value_info: 96 | if tensor.name in tensor_names: 97 | tensor_names.remove(tensor.name) 98 | dot_str += '"{}" [label="{}\\n{}, {}" fonstsize=10 shape=rectangle];\n'.format( 99 | tensor_key(tensor.name), 100 | fixname(tensor.name), 101 | dtype(tensor.type.tensor_type.elem_type), 102 | shape(tensor.type.tensor_type.shape), 103 | ) 104 | 105 | if len(tensor_names) != 0: 106 | # the tensors that are not in graph.initializer nor graph.value_info 107 | logger.warning("There are tensors that only have name (no data type or shape) in the graph.") 108 | for n in tensor_names: 109 | dot_str += '"{}" [label="{}" fonstsize=10 shape=rectangle];\n'.format(tensor_key(n), fixname(n)) 110 | 111 | dot_str += "}\n" 112 | return dot_str 113 | -------------------------------------------------------------------------------- /onnxcli/extract.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import onnx 3 | import os 4 | 5 | from onnxcli.common import SubCmd 6 | 7 | logger = logging.getLogger('onnxcli') 8 | 9 | 10 | class ExtractCmd(SubCmd): 11 | """Extract sub model that is determined by given input and output tensor names. 12 | 13 | The sub-model is defined by the names of the input and output tensors *exactly*. 14 | Note: For control-flow operators, e.g. If and Loop, the _boundary of sub-model_, 15 | which is defined by the input and output tensors, should not _cut through_ the 16 | subgraph that is connected to the _main graph_ as attributes of these operators. 17 | """ 18 | 19 | subcmd = 'extract' 20 | 21 | def add_args(self, subparser): 22 | subparser.add_argument('input_path', type=str, help="The path to original ONNX model") 23 | subparser.add_argument('output_path', type=str, default=None, help="The path to save the extracted ONNX model") 24 | subparser.add_argument( 25 | '-i', 26 | '--input_names', 27 | nargs='+', 28 | help="The names of the input tensors that to be extracted.", 29 | ) 30 | subparser.add_argument( 31 | '-o', 32 | '--output_names', 33 | nargs='+', 34 | help="The names of the output tensors that to be extracted.", 35 | ) 36 | 37 | def run(self, args): 38 | logger.info("Running on model {}".format(args.input_path)) 39 | if not os.path.exists(args.input_path): 40 | raise ValueError("Invalid input model path: {}".format(args.input_path)) 41 | if args.output_path is None: 42 | raise ValueError("Output model path shall not be empty!") 43 | if len(args.output_names) == 0: 44 | raise ValueError("Output tensor names shall not be empty!") 45 | try: 46 | onnx.checker.check_model(args.input_path) 47 | except Exception as e: 48 | logger.warning("Input model invalid, the resulted model can be invalid too!\n {}".format(e)) 49 | 50 | model = onnx.load(args.input_path) 51 | e = Extractor(model) 52 | extracted = e.extract(args.input_names, args.output_names) 53 | onnx.save(extracted, args.output_path) 54 | 55 | 56 | class Extractor: 57 | def __init__(self, model): 58 | self.model = onnx.shape_inference.infer_shapes(model) 59 | self.graph = self.model.graph 60 | self.wmap = self._build_name2obj_dict(self.graph.initializer) 61 | self.vimap = self._build_name2obj_dict(self.graph.value_info) 62 | 63 | @staticmethod 64 | def _build_name2obj_dict(objs): 65 | return {obj.name: obj for obj in objs} 66 | 67 | def _collect_new_io_core(self, original_io, io_names_to_extract): 68 | original_io_map = self._build_name2obj_dict(original_io) 69 | original_io_names = set(original_io_map.keys()) 70 | s_io_names_to_extract = set(io_names_to_extract) 71 | io_names_to_keep = s_io_names_to_extract & original_io_names 72 | new_io_names_to_add = s_io_names_to_extract - original_io_names 73 | 74 | new_io_tensors = [] 75 | for name in io_names_to_keep: 76 | new_io_tensors.append(original_io_map[name]) 77 | for name in new_io_names_to_add: 78 | # activation become input or output 79 | new_io_tensors.append(self.vimap[name]) 80 | 81 | # adjust sequence 82 | new_io_tensors_map = self._build_name2obj_dict(new_io_tensors) 83 | return [new_io_tensors_map[name] for name in io_names_to_extract] 84 | 85 | def _collect_new_inputs(self, names): 86 | return self._collect_new_io_core(self.graph.input, names) 87 | 88 | def _collect_new_outputs(self, names): 89 | return self._collect_new_io_core(self.graph.output, names) 90 | 91 | def _dfs_search_reachable_nodes(self, node_output_name, graph_input_names, reachable_nodes): 92 | if node_output_name in graph_input_names: 93 | return 94 | for node in self.graph.node: 95 | if node in reachable_nodes: 96 | continue 97 | if node_output_name not in node.output: 98 | continue 99 | reachable_nodes.append(node) 100 | for name in node.input: 101 | self._dfs_search_reachable_nodes(name, graph_input_names, reachable_nodes) 102 | 103 | def _collect_reachable_nodes(self, input_names, output_names): 104 | reachable_nodes = list() 105 | for name in output_names: 106 | self._dfs_search_reachable_nodes(name, input_names, reachable_nodes) 107 | # needs to be topology sorted. 108 | nodes = [n for n in self.graph.node if n in reachable_nodes] 109 | return nodes 110 | 111 | def _collect_reachable_tensors(self, nodes): 112 | all_tensors_name = set() 113 | for node in nodes: 114 | for name in node.input: 115 | all_tensors_name.add(name) 116 | for name in node.output: 117 | all_tensors_name.add(name) 118 | 119 | initializer = [self.wmap[t] for t in self.wmap.keys() if t in all_tensors_name] 120 | value_info = [self.vimap[t] for t in self.vimap.keys() if t in all_tensors_name] 121 | assert len(self.graph.sparse_initializer) == 0 122 | assert len(self.graph.quantization_annotation) == 0 123 | return (initializer, value_info) 124 | 125 | def _make_model(self, nodes, inputs, outputs, initializer, value_info): 126 | name = 'Extracted from {' + self.graph.name + '}' 127 | graph = onnx.helper.make_graph(nodes, name, inputs, outputs, initializer=initializer, value_info=value_info) 128 | 129 | meta = { 130 | 'ir_version': self.model.ir_version, 131 | 'opset_imports': self.model.opset_import, 132 | 'producer_name': 'onnx.utils.extract_model', 133 | } 134 | return onnx.helper.make_model(graph, **meta) 135 | 136 | def extract(self, input_names, output_names): 137 | inputs = self._collect_new_inputs(input_names) 138 | outputs = self._collect_new_outputs(output_names) 139 | nodes = self._collect_reachable_nodes(input_names, output_names) 140 | initializer, value_info = self._collect_reachable_tensors(nodes) 141 | model = self._make_model(nodes, inputs, outputs, initializer, value_info) 142 | return model 143 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ONNX Command Line Toolbox 2 | ========================= 3 | 4 | [![Build and Test](https://github.com/zhenhuaw-me/onnxcli/workflows/Build%20and%20Test/badge.svg)](https://github.com/zhenhuaw-me/onnxcli/actions/workflows/build.yml) 5 | [![CodeQL](https://github.com/zhenhuaw-me/onnxcli/workflows/CodeQL/badge.svg)](https://github.com/zhenhuaw-me/onnxcli/actions/workflows/codeql-analysis.yml) 6 | [![Sanity](https://github.com/zhenhuaw-me/onnxcli/workflows/Sanity/badge.svg)](https://github.com/zhenhuaw-me/onnxcli/actions/workflows/sanity.yml) 7 | [![Coverage](https://codecov.io/gh/zhenhuaw-me/onnxcli/branch/master/graph/badge.svg)](https://codecov.io/gh/zhenhuaw-me/onnxcli) 8 | [![Download](https://img.shields.io/pypi/dm/onnxcli)](https://img.shields.io/pypi/dm/onnxcli) 9 | 10 | 11 | * Aim to improve your experience of investigating ONNX models. 12 | * Use it like `onnx infershape /path/to/input/model.onnx /path/to/output/model.onnx`. (See the [usage section](#usage).) 13 | 14 | 15 | ## Installation 16 | 17 | Recommand to install via [GitHub repo][github] for the latest functionality. 18 | ``` 19 | pip install git+https://github.com/zhenhuaw-me/onnxcli.git 20 | ``` 21 | 22 | _Two alternative ways are:_ 23 | 1. Install via [pypi package][pypi] `pip install onnxcli` 24 | 2. Download and add the code tree to your `$PYTHONPATH`. (For development purpose and the command line is different.) 25 | ``` 26 | git clone https://github.com/zhenhuaw-me/onnxcli.git 27 | export PYTHONPATH=$(pwd)/onnxcli:${PYTHONPATH} 28 | python onnxcli/cli/dispatcher.py 29 | ``` 30 | 31 | **Requirements** 32 | 33 | `onnxcli` depends on different packages w.r.t. different functionality and may extend. 34 | However, we only include only several basic ones (`onnx` for example) since you may use only a small portion of the functionalities, or you may like have a different version. 35 | 36 | Depending on the sub command, error will be raised if the requirements are not met. 37 | Follow the error message to install the requirements. 38 | 39 | 40 | ## Usage 41 | 42 | Once installed, the `onnx` and `onnxcli` commands are avaiable. 43 | You can play with commands such as `onnx infershape /path/to/input/model.onnx /path/to/output/model.onnx`. 44 | 45 | The general format is `onnx `. 46 | The sub commands are as sections below. 47 | 48 | _Check the online help with `onnx --help` and `onnx --help` for latest usage._ 49 | 50 | ### infershape 51 | 52 | `onnx infershape` performs [shape inference](https://github.com/onnx/onnx/blob/master/docs/ShapeInference.md) of the ONNX model. 53 | It's an CLI wrapper of [`onnx.shape_inference`](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md#running-shape-inference-on-an-onnx-model). 54 | You will find it useful to generate shape information for the models that are extracted by [`onnx extract`](#extract). 55 | 56 | ### extract 57 | 58 | `onnx extract` extracts the sub model that is determined by the names of the input and output tensor of the subgraph from the original model. 59 | It's a CLI wrapper of [`onnx.utils.extract_model`](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md#extracting-sub-model-with-inputs-outputs-tensor-names) (which I authorized in the ONNX repo). 60 | 61 | ### inspect 62 | 63 | `onnx inspect` gives you quick view of the information of the given model. 64 | It's inspired by [the tf-onnx tool](https://github.com/onnx/tensorflow-onnx/blob/master/tools/dump-onnx.py). 65 | 66 | When working on deep learning, you may like to take a look at what's inside the model. 67 | [Netron](https://github.com/lutzroeder/netron) is powerful but doesn't provide fine-grain view. 68 | 69 | With `onnx inspect`, you no longer need to scroll the Netron window to look for nodes or tensors. 70 | Instead, you can dump the node attributes and tensor values with a single command. 71 | 72 |
Click here to see a node example 73 |

 74 | $ onnx inspect ./assets/tests/conv.float32.onnx --node --indices 0 --detail
 75 | 
 76 | Inpect of model ./assets/tests/conv.float32.onnx
 77 |   Graph name: 9
 78 |   Graph inputs: 1
 79 |   Graph outputs: 1
 80 |   Nodes in total: 1
 81 |   ValueInfo in total: 2
 82 |   Initializers in total: 2
 83 |   Sparse Initializers in total: 0
 84 |   Quantization in total: 0
 85 | 
 86 | Node information:
 87 |   Node "output": type "Conv", inputs "['input', 'Variable/read', 'Conv2D_bias']", outputs "['output']"
 88 |     attributes: [name: "dilations"
 89 | ints: 1
 90 | ints: 1
 91 | type: INTS
 92 | , name: "group"
 93 | i: 1
 94 | type: INT
 95 | , name: "kernel_shape"
 96 | ints: 3
 97 | ints: 3
 98 | type: INTS
 99 | , name: "pads"
100 | ints: 1
101 | ints: 1
102 | ints: 1
103 | ints: 1
104 | type: INTS
105 | , name: "strides"
106 | ints: 1
107 | ints: 1
108 | type: INTS
109 | ]
110 | 
111 |
112 | 113 |
Click here to see a tensor example 114 |

115 | $ onnx inspect ./assets/tests/conv.float32.onnx --tensor --names Conv2D_bias --detail
116 | 
117 | Inpect of model ./assets/tests/conv.float32.onnx
118 |   Graph name: 9
119 |   Graph inputs: 1
120 |   Graph outputs: 1
121 |   Nodes in total: 1
122 |   ValueInfo in total: 2
123 |   Initializers in total: 2
124 |   Sparse Initializers in total: 0
125 |   Quantization in total: 0
126 | 
127 | Tensor information:
128 |   Initializer "Conv2D_bias": type FLOAT, shape [16],
129 |     float data: [0.4517577290534973, -0.014192663133144379, 0.2946248948574066, -0.9742919206619263, -1.2975586652755737, 0.7223454117774963, 0.7835700511932373, 1.7674627304077148, 1.7242872714996338, 1.1230682134628296, -0.2902531623840332, 0.2627834975719452, 1.0175092220306396, 0.5643373131752014, -0.8244842290878296, 1.2169424295425415]
130 | 
131 |
132 | 133 | ### draw 134 | 135 | `onnx draw` draws the graph in `dot`, `svg`, `png` formats. 136 | It gives you quick view of the type and shape of the tensors that are fed to a specific node. 137 | You can view the model topology in image viewer of browser without waiting for the model to load, 138 | which I found is really helpful for large models. 139 | 140 | If you are viewing `svg` in browser, you can even quick search for the nodes and tensors. 141 | Together with [`onnx inspect`](#inspect), it will be very efficient to understand the issue you are looking into. 142 | 143 | The node are in ellipses and tensors are in rectangles where the rounded ones are initializers. 144 | The node type of the node and the data type and shape of the tenors are also rendered. 145 | Here is a Convolution node example. 146 | 147 | ![conv](assets/conv.svg) 148 | 149 | **Note**: The [`onnx draw`](#draw) requires [`dot` command (graphviz)](https://graphviz.org/) to be avaiable on your machine - which can be installed by command as below on Ubuntu/Debian. 150 | ``` 151 | sudo apt install -y graphviz 152 | ``` 153 | 154 | ### optimize 155 | 156 | `onnx optimize` optimizes the input model with [ONNX Optimizer](https://github.com/onnx/optimizer). 157 | 158 | 159 | ## Contributing 160 | 161 | Welcome to contribute new commands or enhance them. 162 | Let's make our life easier together. 163 | 164 | The workflow is pretty simple: 165 | 166 | 1. Starting with GitHub Codespace or clone locally. 167 | 1. `make setup` to config the dependencies (or `pip install -r ./requirements.txt` if you prefer). 168 | 169 | 2. Create a new subcommand 170 | 1. Starting by copying and modifying [infershape](./onnxcli/infer_shape.py). 171 | 2. Register the command in the [dispatcher](./onnxcli/dispatcher.py) 172 | 3. Create a new command line [test](./tests/test_dispatcher.py) 173 | 4. `make test` to build and test. 174 | 5. `make check` and `make format` to fix any code style issues. 175 | 176 | 3. Try out, debug, commit, push, and open pull request. 177 | 1. The code has been protected by CI. You need to get a pass before merging. 178 | 2. Ask if any questions. 179 | 180 | 181 | ## License 182 | 183 | Apache License Version 2.0. 184 | 185 | 186 | [pypi]: https://pypi.org/project/onnxcli 187 | [github]: https://github.com/zhenhuaw-me/onnxcli 188 | -------------------------------------------------------------------------------- /onnxcli/inspect.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import onnx 3 | from onnxcli.common import SubCmd, dtype, shape 4 | 5 | logger = logging.getLogger('onnxcli') 6 | 7 | 8 | class InspectCmd(SubCmd): 9 | """Prints the information of nodes tensors of the given model. 10 | 11 | When working on deep learning, you may like to take a look at what's inside the model. 12 | You no longer need to scroll the Netron window to look for nodes or tensors. 13 | Instead, you can dump the node attributes and tensor values with a single command. 14 | """ 15 | 16 | subcmd = 'inspect' 17 | 18 | def add_args(self, subparser): 19 | subparser.add_argument('input_path', type=str, help="The path to the input ONNX model") 20 | subparser.add_argument( 21 | '-m', 22 | '--meta', 23 | action='store_true', 24 | help="Print the meta information of the model", 25 | ) 26 | subparser.add_argument( 27 | '-n', 28 | '--node', 29 | action='store_true', 30 | help="Print the node information of the model", 31 | ) 32 | subparser.add_argument( 33 | '-t', 34 | '--tensor', 35 | action='store_true', 36 | help="Print the tensor information of the model", 37 | ) 38 | subparser.add_argument( 39 | '-io', 40 | '--io', 41 | action='store_true', 42 | help="Print the input and output tensor information of the model", 43 | ) 44 | subparser.add_argument( 45 | '-i', 46 | '--indices', 47 | type=int, 48 | nargs="+", 49 | default=[], 50 | help="Specify the indices of the node(s) or tensor(s) to inspect. Can NOT set together with --names", 51 | ) 52 | subparser.add_argument( 53 | '-N', 54 | '--names', 55 | type=str, 56 | nargs="+", 57 | default=[], 58 | help="Specify the names of the node(s) or tensor(s) to inspect. Can NOT set together with --indices", 59 | ) 60 | subparser.add_argument( 61 | '-d', 62 | '--detail', 63 | action='store_true', 64 | help="Print detailed information of the nodes or tensors that specified by --indices or --names." 65 | " Warning: will print the data of tensors.", 66 | ) 67 | 68 | def run(self, args): 69 | logger.info("Running on model {}".format(args.input_path)) 70 | has_indices = len(args.indices) != 0 71 | has_names = len(args.names) != 0 72 | no_tensor_or_node = args.node is None and args.tensor is None 73 | if has_indices and has_names: 74 | raise ValueError("Can NOT set both --indices and --names") 75 | if (has_indices or has_indices) and no_tensor_or_node: 76 | raise ValueError("Can NOT set --indices or --names without --node or --tensor") 77 | if (not has_indices and not has_names) and args.detail: 78 | raise ValueError("Can NOT set --detail without --indices or --names") 79 | 80 | try: 81 | onnx.checker.check_model(args.input_path) 82 | except Exception: 83 | logger.warning("Failed to check model {}, statistic could be inaccurate!".format(args.input_path)) 84 | m = onnx.load_model(args.input_path) 85 | g = m.graph 86 | printed_any = False 87 | 88 | if args.meta: 89 | print_meta(m) 90 | printed_any = True 91 | 92 | if args.node: 93 | print_nodes(g, args.indices, args.names, args.detail) 94 | printed_any = True 95 | 96 | if args.tensor: 97 | print_tensor(g, args.indices, args.names, args.detail) 98 | printed_any = True 99 | 100 | if args.io: 101 | print_io(g) 102 | printed_any = True 103 | 104 | if not printed_any: 105 | print_basic(g) 106 | 107 | 108 | def print_meta(m): 109 | print("Meta information") 110 | print("-" * 80) 111 | print(" IR Version: {}".format(m.ir_version)) 112 | print(" Opset Import: {}".format(m.opset_import)) 113 | print(" Producer name: {}".format(m.producer_name)) 114 | print(" Producer version: {}".format(m.producer_version)) 115 | print(" Domain: {}".format(m.domain)) 116 | print(" Doc string: {}".format(m.doc_string)) 117 | for i in m.metadata_props: 118 | print(" meta.{} = {}", i.key, i.value) 119 | 120 | 121 | def print_basic(g): 122 | print(" Graph name: {}".format(len(g.name))) 123 | print(" Graph inputs: {}".format(len(g.input))) 124 | print(" Graph outputs: {}".format(len(g.output))) 125 | print(" Nodes in total: {}".format(len(g.node))) 126 | print(" ValueInfo in total: {}".format(len(g.value_info))) 127 | print(" Initializers in total: {}".format(len(g.initializer))) 128 | print(" Sparse Initializers in total: {}".format(len(g.sparse_initializer))) 129 | print(" Quantization in total: {}".format(len(g.quantization_annotation))) 130 | 131 | 132 | def print_tensor(g, indices, names, detail): 133 | print("Tensor information") 134 | print("-" * 80) 135 | 136 | if len(indices) > 0: 137 | for idx in indices: 138 | printed = print_tensor_with_indice(g, idx, detail) 139 | if not printed: 140 | raise ValueError("indice {} out of range".format(idx)) 141 | return 142 | 143 | # print with names 144 | if len(names) > 0: 145 | for name in names: 146 | printed = print_tensor_with_name(g, detail, name) 147 | if not printed: 148 | raise ValueError("No tensor found with name {}".format(name)) 149 | return 150 | 151 | # print all tensors 152 | for t in g.value_info: 153 | print_value_info(t) 154 | for t in g.initializer: 155 | print_initializer(t, False) 156 | 157 | 158 | def print_io(g): 159 | print("Input information") 160 | print("-" * 80) 161 | for t in g.input: 162 | print_value_info(t) 163 | print("Output information") 164 | print("-" * 80) 165 | for t in g.output: 166 | print_value_info(t) 167 | 168 | 169 | def print_value_info(t): 170 | txt = " ValueInfo \"{}\":".format(t.name) 171 | txt += " type {},".format(dtype(t.type.tensor_type.elem_type)) 172 | txt += " shape {},".format(shape(t.type.tensor_type.shape)) 173 | print(txt) 174 | 175 | 176 | def print_initializer(t, detail): 177 | txt = " Initializer \"{}\":".format(t.name) 178 | txt += " type {},".format(dtype(t.data_type)) 179 | txt += " shape {},".format(t.dims) 180 | txt += " data_location {},".format(t.data_location) 181 | txt += " external_data {},".format(t.external_data) 182 | print(txt) 183 | if detail: 184 | import numpy as np 185 | import onnx.numpy_helper as nphelper 186 | 187 | data = np.array(nphelper.to_array(t)).reshape(t.dims) 188 | print(" data in numpy format: \n{}".format(data)) 189 | 190 | 191 | def print_tensor_with_indice(g, idx, detail): 192 | tensor_name = None 193 | if idx < len(g.initializer): 194 | print_initializer(g.initializer[idx], detail) 195 | tensor_name = g.initializer[idx].name 196 | if idx < len(g.value_info): 197 | print_value_info(g.value_info[idx]) 198 | tensor_name = g.value_info[idx].name 199 | return tensor_name is not None 200 | 201 | 202 | def print_tensor_with_name(g, detail, name): 203 | for i in g.value_info: 204 | if i.name == name: 205 | print_value_info(i) 206 | return True 207 | for i in g.initializer: 208 | if i.name == name: 209 | print_initializer(i, detail) 210 | return True 211 | for i in g.input: 212 | if i.name == name: 213 | print_value_info(i) 214 | return True 215 | for i in g.output: 216 | if i.name == name: 217 | print_value_info(i) 218 | return True 219 | 220 | 221 | def print_nodes(g, indices, names, detail): 222 | print("Node information") 223 | print("-" * 80) 224 | 225 | def print_node(n, detail): 226 | txt = " Node \"{}\":".format(n.name) 227 | txt += " type \"{}\",".format(n.op_type) 228 | txt += " inputs \"{}\",".format(n.input) 229 | txt += " outputs \"{}\"".format(n.output) 230 | print(txt) 231 | if detail and len(n.attribute) > 0: 232 | print(" attributes: {}".format(n.attribute)) 233 | 234 | # print with indices 235 | if len(indices) > 0: 236 | for idx in indices: 237 | if idx >= len(g.node): 238 | raise ValueError("indices {} out of range, node in total {}".format(idx, len(g.node))) 239 | print_node(g.node[idx], detail) 240 | return 241 | 242 | # print with names 243 | if len(names) > 0: 244 | found_any = False 245 | for name in names: 246 | for n in g.node: 247 | if n.name == name: 248 | print_node(n, detail) 249 | found_any = True 250 | break 251 | if not found_any: 252 | raise ValueError("No node found with name {}".format(name)) 253 | return 254 | 255 | import collections 256 | 257 | ops = collections.Counter([node.op_type for node in g.node]) 258 | for op, count in ops.most_common(): 259 | print(" Node type \"{}\" has: {}".format(op, count)) 260 | 261 | print("-" * 80) 262 | for node in g.node: 263 | print_node(node, False) 264 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------