├── .flake8 ├── .gitignore ├── .gitlab-ci.yml ├── .pre-commit-config.yaml ├── .pylintrc ├── .vscode └── settings.json ├── LICENSE ├── Makefile ├── README.md ├── asset └── img │ ├── class-design.png │ └── top-design.png ├── dockers ├── Dockerfile ├── env_vars ├── requirements-dev.txt ├── requirements.txt └── trt_8_5_1_7.sh ├── md_doc ├── design_manual.md ├── developer_manual.md └── user_manual.md ├── onn ├── __init__.py ├── atomic │ ├── __init__.py │ ├── add.py │ └── backend │ │ ├── __init__.py │ │ └── triton │ │ ├── __init__.py │ │ └── add.py ├── block │ └── __init__.py ├── cv │ └── __init__.py ├── nlp │ └── __init__.py ├── operator.py ├── slot.py └── task │ ├── __init__.py │ └── auto_driving │ └── __init__.py ├── pyproject.toml ├── requirements-dev.txt ├── requirements.txt ├── setup.py └── tests ├── atomic └── add_test.py └── pkg_test.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # Exclude some exluded places to speed up. 3 | # Reference: https://flake8.pycqa.org/en/latest/user/configuration.html#project-configuration 4 | extend-exclude = 5 | # No need to traverse our git directory. 6 | .git, 7 | # No need to traverse our virtual env directory. 8 | .venv, 9 | # There's no value in checking cache directories. 10 | __pycache__, 11 | # This contains our built documentation. 12 | build, 13 | # This contains builds of flake8 that we don't want to check 14 | dist 15 | 16 | # exclude = # torch dist code, ignore it., 17 | 18 | # Ignore some specific erorrs. 19 | extend-ignore = 20 | # Back-compatibility for flake8-blind-except. Should remove it in the future. 21 | B902, 22 | # Since dict() is commonly used for configuration. 23 | C408, 24 | # black compatibility. 25 | # Reference: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#flake8 26 | E203, 27 | # Pickle and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue. 28 | S301, 29 | # Standard pseudo-random generators are not suitable for security/cryptographic purposes. 30 | S311, 31 | # possible security implications associated with pickle module. 32 | S403, 33 | I001, 34 | I005, 35 | BLK100, 36 | ABS101, 37 | F403, 38 | F401, 39 | # Make `flake8-quotes` compatible with black. 40 | # Reference: https://github.com/zheller/flake8-quotes#configuration 41 | # Reference: https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#strings 42 | inline-quotes = double 43 | # black compatibility. 44 | # Reference: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#flake8 45 | max-line-length = 88 46 | # For E501, Ignore line too long in setup.py and experiments folder. 47 | # For I900, Ignore requirement error in the tests folder. 48 | # For S101, Ignore assert used error in the tests folder. 49 | # Reference: https://github.com/tylerwince/flake8-bandit#configuration 50 | per-file-ignores = 51 | setup.py:E501 52 | tests/*:I900,S101 53 | # Require code for noqa. Avoid rough `# noqa` 54 | noqa-require-code = true 55 | 56 | # known-modules = 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Custom 2 | *.swp 3 | *.un~ 4 | .DS_Store 5 | Pipfile* 6 | launch.json 7 | output 8 | public 9 | 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 107 | __pypackages__/ 108 | 109 | # Celery stuff 110 | celerybeat-schedule 111 | celerybeat.pid 112 | 113 | # SageMath parsed files 114 | *.sage.py 115 | 116 | # Environments 117 | .env 118 | .venv 119 | env/ 120 | venv/ 121 | ENV/ 122 | env.bak/ 123 | venv.bak/ 124 | 125 | # Spyder project settings 126 | .spyderproject 127 | .spyproject 128 | 129 | # Rope project settings 130 | .ropeproject 131 | 132 | # mkdocs documentation 133 | /site 134 | 135 | # mypy 136 | .mypy_cache/ 137 | .dmypy.json 138 | dmypy.json 139 | 140 | # Pyre type checker 141 | .pyre/ 142 | 143 | # pytype static type analyzer 144 | .pytype/ 145 | 146 | # Cython debug symbols 147 | cython_debug/ 148 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - format_checker 3 | - tester 4 | - release 5 | - build_pages 6 | 7 | lint: 8 | stage: format_checker 9 | image: your_docker_image 10 | script: 11 | - env | sort 12 | - nvidia-smi 13 | - make dev 14 | - make lint 15 | only: 16 | - merge_requests 17 | - /^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)$/ 18 | tags: 19 | - your git lab ci-runner-tag 20 | interruptible: true 21 | 22 | test: 23 | stage: tester 24 | image: your_docker_image 25 | script: 26 | - env | sort 27 | - nvidia-smi 28 | - make dev 29 | - make test 30 | only: 31 | - merge_requests 32 | - /^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)$/ 33 | tags: 34 | - your git lab ci-runner-tag 35 | interruptible: true 36 | 37 | package: 38 | stage: release 39 | image: your_docker_image 40 | script: 41 | - env | sort 42 | - make build 43 | - make upload 44 | only: 45 | - /^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)$/ 46 | tags: 47 | - your git lab ci-runner-tag 48 | interruptible: true 49 | 50 | pages: 51 | stage: build_pages 52 | image: your_docker_image 53 | script: 54 | - make dev 55 | - make docs 56 | artifacts: 57 | paths: 58 | - public 59 | only: 60 | refs: 61 | - master 62 | - /^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)$/ 63 | changes: 64 | - docs/**/* 65 | except: 66 | refs: 67 | - schedules 68 | tags: 69 | - your git lab ci-runner-tag 70 | interruptible: true 71 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.1.0 4 | hooks: 5 | - id: check-added-large-files 6 | - id: check-docstring-first 7 | - id: check-json 8 | - id: check-merge-conflict 9 | - id: check-toml 10 | - id: check-xml 11 | - id: check-yaml 12 | - id: end-of-file-fixer 13 | - id: fix-encoding-pragma 14 | - id: mixed-line-ending 15 | - id: name-tests-test 16 | - id: no-commit-to-branch 17 | stages: [push] 18 | - id: pretty-format-json 19 | args: [--autofix] 20 | - id: sort-simple-yaml 21 | files: .pre-commit-config.yaml 22 | - id: trailing-whitespace 23 | - repo: local 24 | hooks: 25 | - id: black 26 | name: black 27 | language: system 28 | entry: pipenv run python -m black 29 | types: [python] 30 | - id: mypy 31 | name: mypy 32 | language: system 33 | entry: make mypy 34 | types: [python] 35 | require_serial: true 36 | pass_filenames: false 37 | - id: flake8 38 | name: flake8 39 | language: system 40 | entry: pipenv run python -m flake8 41 | types: [python] 42 | - id: pylint 43 | name: pylint 44 | language: system 45 | entry: pipenv run python -m pylint 46 | types: [python] 47 | require_serial: true 48 | - id: toml-sort 49 | name: toml-sort 50 | language: system 51 | entry: pipenv run toml-sort -a -i 52 | types: [toml] 53 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code. 6 | extension-pkg-allow-list=mpi4py.MPI, 7 | numpy, 8 | 9 | # Add files or directories matching the regex patterns to the ignore-list. The 10 | # regex matches against paths and can be in Posix or Windows format. 11 | ignore-paths=(./)?.venv/, 12 | 13 | # List of plugins (as comma separated values of python module names) to load, 14 | # usually to register additional checkers. 15 | # TODO(xuan.hu): Disabled due to: https://github.com/reverbc/pylint-pytest/pull/22 16 | load-plugins=pylint.extensions.no_self_use, 17 | # pylint_pytest, 18 | 19 | # Discover python modules and packages in the file system subtree. 20 | recursive=yes 21 | 22 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 23 | # number of processors available to use. 24 | jobs=0 25 | 26 | [MESSAGES CONTROL] 27 | 28 | # Disable the message, report, category or checker with the given id(s). You 29 | # can either give multiple identifiers separated by comma (,) or put this 30 | # option multiple times (only on the command line, not in the configuration 31 | # file where it should appear only once). You can also use "--disable=all" to 32 | # disable everything first and then reenable specific checks. For example, if 33 | # you want to run only the similarities checker, you can use "--disable=all 34 | # --enable=similarities". If you want to run only the classes checker, but have 35 | # no Warning level messages displayed, use "--disable=all --enable=classes 36 | # --disable=W". 37 | # 38 | # Reference: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#pylint 39 | # c-extension-no-member: I1101 40 | # not-callable: For `torch.tensor`, should remove when upgrade to torch 1.11. 41 | # Reference: https://github.com/pytorch/pytorch/issues/24807 42 | # duplicate-code: For temporarily fix. 43 | # Reference: https://git.nevint.com/nio-pilot/torchpilot/issues/143 44 | # too-many-lines: Too many lines in module, C0302 45 | disable=c-extension-no-member, 46 | duplicate-code, 47 | not-callable, 48 | too-many-lines, 49 | use-dict-literal, 50 | arguments-differ, 51 | 52 | [SIMILARITIES] 53 | 54 | # Imports are removed from the similarity computation 55 | ignore-imports=yes 56 | 57 | # Minimum lines number of a similarity. 58 | min-similarity-lines=20 59 | 60 | [FORMAT] 61 | 62 | # Maximum number of characters on a single line. 63 | # 64 | # Make pylint compatible with black. 65 | # Reference: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#pylint 66 | max-line-length=88 67 | 68 | [BASIC] 69 | 70 | # Good variable names which should always be accepted, separated by a comma. 71 | 72 | # good-names= 73 | 74 | [TYPECHECK] 75 | 76 | # List of members which are set dynamically and missed by pylint inference 77 | # system, and so shouldn't trigger E1101 when accessed. Python regular 78 | # expressions are accepted. 79 | # generated-members= 80 | 81 | [DESIGN] 82 | 83 | # Maximum number of arguments for function / method. 84 | max-args=16 85 | 86 | # Maximum number of attributes for a class (see R0902). 87 | max-attributes=16 88 | 89 | # Maximum number of locals for function / method body. 90 | max-locals=32 91 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[json]": { 3 | "editor.defaultFormatter": "esbenp.prettier-vscode" 4 | }, 5 | "[jsonc]": { 6 | "editor.defaultFormatter": "esbenp.prettier-vscode" 7 | }, 8 | "[markdown]": { 9 | "editor.defaultFormatter": "esbenp.prettier-vscode" 10 | }, 11 | "[python]": { 12 | "editor.codeActionsOnSave": { 13 | "source.organizeImports": true 14 | } 15 | }, 16 | "[restructuredtext]": { 17 | "editor.defaultFormatter": "esbenp.prettier-vscode" 18 | }, 19 | "cSpell.words": [ 20 | "ASGD", 21 | "biiou", 22 | "Caffe", 23 | "ceph", 24 | "cicd", 25 | "ckpt", 26 | "clss", 27 | "conv", 28 | "convs", 29 | "cuda", 30 | "deconv", 31 | "dets", 32 | "discretization", 33 | "dtype", 34 | "dylib", 35 | "gloo", 36 | "inplace", 37 | "intrinsics", 38 | "isclass", 39 | "Keras", 40 | "kpts", 41 | "LBFGS", 42 | "libcuda", 43 | "multinomial", 44 | "nccl", 45 | "NCHW", 46 | "ndarray", 47 | "nums", 48 | "optim", 49 | "Popen", 50 | "preds", 51 | "psutil", 52 | "pydantic", 53 | "pylintrc", 54 | "pyproject", 55 | "pythonic", 56 | "randn", 57 | "regs", 58 | "relu", 59 | "rois", 60 | "stds", 61 | "STMD", 62 | "symm", 63 | "tolist", 64 | "topk", 65 | "toplevel", 66 | "xywh", 67 | "xyxy" 68 | ], 69 | "editor.formatOnSave": true, 70 | "editor.rulers": [ 71 | 88 72 | ], 73 | "files.exclude": { 74 | "**/*.egg-info": true, 75 | "**/.coverage": true, 76 | "**/.mypy_cache": true, 77 | "**/.pytest_cache": true, 78 | "**/.venv": true, 79 | "**/Pipfile*": true, 80 | "**/__pycache__": true, 81 | "**/build": true, 82 | "**/coverage.xml": true, 83 | "**/htmlcov": true 84 | }, 85 | "files.insertFinalNewline": true, 86 | "files.trimFinalNewlines": true, 87 | "files.trimTrailingWhitespace": true, 88 | "python.formatting.provider": "black", 89 | "python.linting.flake8Enabled": true, 90 | "python.linting.mypyEnabled": true, 91 | "python.linting.pylintEnabled": true, 92 | "remote.downloadExtensionsLocally": true 93 | } 94 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean deepclean dev pre-commit lint black mypy flake8 pylint pip-check-reqs toml-sort test build upload docs devdocs 2 | 3 | CI=false 4 | PIPRUN := $(shell [ "${CI}" != "true" ] && command -v pipenv > /dev/null 2>&1 && echo pipenv run) 5 | PKGDIR := onn 6 | 7 | 8 | # Remove common intermediate files. 9 | clean: 10 | find . -name '*.pyc' -print0 | xargs -0 rm -f 11 | find . -name '*.swp' -print0 | xargs -0 rm -f 12 | find . -name '.DS_Store' -print0 | xargs -0 rm -rf 13 | find . -name '__pycache__' -print0 | xargs -0 rm -rf 14 | -rm -rf \ 15 | *.egg-info \ 16 | .coverage \ 17 | .eggs \ 18 | .mypy_cache \ 19 | .pytest_cache \ 20 | Pipfile* \ 21 | build \ 22 | dist \ 23 | output \ 24 | public 25 | 26 | # Remove common intermediate files alongside with `pre-commit` hook and virtualenv created by `pipenv`. 27 | deepclean: clean 28 | -pre-commit uninstall --hook-type pre-push 29 | -pipenv --venv >/dev/null 2>&1 && pipenv --rm 30 | 31 | # Prepare virtualenv. 32 | # - Create virtual environment with pipenv and conda python when 33 | # - Not in CI environment. 34 | # - No existing venv. 35 | venv: 36 | -[ "${CI}" != "true" ] && ! pipenv --venv >/dev/null 2>&1 && pipenv --site-packages 37 | 38 | # Install package in editable mode without dev packages. 39 | install: venv 40 | ${PIPRUN} pip install --no-build-isolation -e . 41 | 42 | # Prepare dev environments: 43 | # - Install package in editable mode alongside with dev requirements. 44 | # - Install pre-commit hoook when not in CI environment. 45 | dev: venv 46 | ${PIPRUN} pip install --no-build-isolation -e . -r requirements-dev.txt 47 | -[ "${CI}" != "true" ] && pre-commit install --hook-type pre-push 48 | 49 | # Run pre-commit for all files. 50 | pre-commit: 51 | pre-commit run --all-files 52 | 53 | # Lint with all tools: black, mypy, flake8, pylint and toml-sort. 54 | lint: flake8 pylint toml-sort 55 | 56 | # Code formatter. 57 | black: 58 | ${PIPRUN} python -m black setup.py tests ${PKGDIR} 59 | 60 | # Static typing checker. 61 | mypy: 62 | ${PIPRUN} python -m mypy setup.py tests ${PKGDIR} 63 | 64 | # Style checker with various of plugins. 65 | flake8: 66 | ${PIPRUN} python -m flake8 67 | 68 | # Static code analysis. 69 | pylint: 70 | ${PIPRUN} python -m pylint . 71 | 72 | # [Experimental] Check missing/redundant requirements. 73 | pip-check-reqs: 74 | ${PIPRUN} pip-missing-reqs ${PKGDIR} 75 | ${PIPRUN} pip-extra-reqs ${PKGDIR} 76 | 77 | # Sort and format toml files (especially for pyproject.toml). 78 | toml-sort: 79 | ${PIPRUN} toml-sort -a -i pyproject.toml 80 | 81 | # Trigger tests. 82 | test: 83 | ${PIPRUN} python -m pytest --cov=${PKGDIR} . 84 | 85 | # Build package. 86 | build: 87 | ${PIPRUN} python -m build 88 | 89 | # Upload package. 90 | upload: 91 | ${PIPRUN} python -m twine upload dist/* 92 | 93 | # Generate docs. 94 | docs: 95 | ${PIPRUN} python -m sphinx.cmd.build docs public 96 | 97 | # Auto build docs. 98 | devdocs: 99 | ${PIPRUN} python -m sphinx_autobuild docs public 100 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # One Neural Network (ONN) 2 | This is a cross-chip platform collection of operators and a unified neural network library. 3 | We hope to use this lib to run [OpenMMLab/Detectron/DeepSpeed/Magatron-LM/...] on different chip platforms. 4 | To achieve this goal, we utilized Deep Learning Compiler or DSL (such as,TVM,Triton,... etc) as the backend. 5 | At the same time, we abstracted the operators from some high-level applications. 6 | 7 | ![TOP-Design](./asset/img/top-design.png "TOP-Design") 8 | 9 | 10 | ### Developer Quick Start 11 | 12 | If you want to be a developer, you can read this [developer_manual](./md_doc/developer_manual.md) 13 | 14 | 15 | ### User Quick Start 16 | 17 | As a user, you can to complete cross chip platform training and inference based on ONN. You can read this [user_manual](./md_doc/user_manual.md) 18 | 19 | ### Design Manual 20 | 21 | You can read this [Design Manual](./md_doc/design_manual.md) for more detail. 22 | 23 | ### Acknowledgements 24 | 25 | - Project Scaffold,From:https://github.com/serious-scaffold/serious-scaffold-python 26 | -------------------------------------------------------------------------------- /asset/img/class-design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matrix97317/OneNeuralNetwork/f61fa64be607c39550215f320c1278cd99ba160e/asset/img/class-design.png -------------------------------------------------------------------------------- /asset/img/top-design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matrix97317/OneNeuralNetwork/f61fa64be607c39550215f320c1278cd99ba160e/asset/img/top-design.png -------------------------------------------------------------------------------- /dockers/Dockerfile: -------------------------------------------------------------------------------- 1 | # This dockerfile is used to Training/Evaluation/Deploy. 2 | 3 | # Specific Source Image FROM: 4 | # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/ 5 | ARG NVIDIA_PYTORCH_VERSION=22.12 6 | FROM nvcr.io/nvidia/pytorch:${NVIDIA_PYTORCH_VERSION}-py3 7 | 8 | # General settings. 9 | ARG DEBIAN_FRONTEND=noninteractive 10 | 11 | # Config Ubuntu Mirror. 12 | ARG UBUNTU_MIRROR=mirrors.ustc.edu.cn 13 | RUN sed -i -e "s/security.ubuntu.com/${UBUNTU_MIRROR}/g" -e "s/archive.ubuntu.com/${UBUNTU_MIRROR}/g" /etc/apt/sources.list 14 | 15 | # System-level package installation. 16 | # - curl, openssh-server, sudo: Necessary dependency for NIO cluster. 17 | # - ffmpeg: For visualization. 18 | # - libboost-all-dev: Boost dev C++ lib for read_cache_op_py. 19 | # - libgl1: For OpenCV. 20 | # - libturbojpeg: TurboJPEG's lib 21 | # - lsb-release: Gather release information for docker installation. 22 | RUN apt-get update && apt-get install -y \ 23 | curl \ 24 | ffmpeg \ 25 | libboost-python-dev \ 26 | libgl1 \ 27 | libturbojpeg \ 28 | lsb-release \ 29 | openssh-server \ 30 | python3.8-venv \ 31 | sudo \ 32 | && rm -rf /var/lib/apt/lists/* 33 | 34 | # Config Docker gpg key and repository with custom mirror. 35 | # Reference: https://docs.docker.com/engine/install/ubuntu/#set-up-the-repository 36 | ARG DOCKER_MIRROR=mirrors.ustc.edu.cn/docker-ce 37 | RUN curl -fsSL https://${DOCKER_MIRROR}/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 38 | RUN echo \ 39 | "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://${DOCKER_MIRROR}/linux/ubuntu \ 40 | $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null 41 | 42 | # Software-level package installation. 43 | # - docker-ce-cli: docker client for image build. 44 | RUN apt-get update && apt-get install -y \ 45 | docker-ce-cli \ 46 | && rm -rf /var/lib/apt/lists/* 47 | 48 | # Hack for dirty partial installation of opencv python package. 49 | RUN rm -rf /opt/conda/lib/python3.8/site-packages/cv2 50 | 51 | # Upgrade pip to the latest version. 52 | RUN python3 -m pip install --no-cache-dir --upgrade pip 53 | 54 | # Install NVIDIA apex 55 | RUN git clone --recursive https://github.com/NVIDIA/apex.git /opt/apex 56 | RUN cd /opt/apex && git checkout 2386a912164b0c5cfcd8be7a2b890fbac5607c82 57 | RUN cd /opt/apex && MAX_JOBS=32 pip3 install --no-build-isolation --no-cache-dir -v --config-settings="--global-option=--cpp_ext" --config-settings="--global-option=--cuda_ext" --config-settings="--global-option=--permutation_search" --config-settings="--global-option=--peer_memory" --config-settings="--global-option=--cudnn_gbn" ./ 58 | 59 | # Install pypcd 60 | RUN git clone --recursive https://github.com/dimatura/pypcd /opt/pypcd 61 | RUN cd /opt/pypcd && git fetch origin pull/9/head:python3 && git checkout python3 \ 62 | && python3 setup.py install && rm -rf /opt/pypcd 63 | 64 | # Config PyPI Mirror. 65 | ARG PIP_INDEX_URL=https://mirror.sjtu.edu.cn/pypi/web/simple 66 | ENV PIP_INDEX_URL=${PIP_INDEX_URL} 67 | 68 | # Install and config pipx (with conda python). 69 | RUN python3 -m pip install --no-cache-dir pipx 70 | ENV PIPX_HOME=/usr/local/pipx 71 | ENV PIPX_BIN_DIR=/usr/local/bin 72 | 73 | # Install requirements and dev requirements in conda python. 74 | COPY requirements.txt requirements-dev.txt ./ 75 | RUN python3 -m pip install --no-cache-dir -r requirements.txt -r requirements-dev.txt 76 | 77 | # Hack for the inconsistent behavior of tensorboard in base image. 78 | # This should be deleted after the strange behavior reverted in base image. 79 | # https://github.com/tensorflow/tensorboard/issues/5648 80 | 81 | # RUN [[ -f /opt/conda/lib/python3.8/site-packages/tensorboard/plugins/core/core_plugin.py ]] \ 82 | # && sed -i "s/\"--bind_all\", default=True,/\"--bind_all\",/g" /opt/conda/lib/python3.8/site-packages/tensorboard/plugins/core/core_plugin.py 83 | 84 | # /usr/local/lib/python3.8/dist-packages/tensorboard/plugins/core/core_plugin.py 85 | 86 | RUN [[ -f /usr/local/lib/python3.8/dist-packages/tensorboard/plugins/core/core_plugin.py ]] \ 87 | && sed -i "s/\"--bind_all\", default=True,/\"--bind_all\",/g" /usr/local/lib/python3.8/dist-packages/tensorboard/plugins/core/core_plugin.py 88 | 89 | # Install requirements to satisfy niofs installation. 90 | # This should be deleted after upstream bug fix. 91 | RUN python3 -m pip install --no-cache-dir boto3 requests 92 | 93 | # Install and config pipenv via pipx. 94 | RUN pipx install pipenv --pip-args="--no-cache-dir" 95 | ENV PIPENV_VENV_IN_PROJECT=true 96 | ENV PIPENV_MAX_DEPTH=0 97 | 98 | # Install pre-commit via pipx. 99 | RUN pipx install pre-commit --pip-args="--no-cache-dir" 100 | 101 | # Install onnxruntime lib 102 | RUN cd /opt && wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz 103 | RUN cd /opt && tar -xzvf onnxruntime-linux-x64-1.8.1.tgz 104 | RUN chmod a+rwx /opt/onnxruntime-linux-x64-1.8.1 105 | RUN export ONNXRUNTIME_DIR=/opt/onnxruntime-linux-x64-1.8.1 106 | RUN export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH 107 | RUN rm -f /opt/onnxruntime-linux-x64-1.8.1.tgz 108 | 109 | # Install tensorrt lib 110 | # Please download TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz to local. 111 | COPY TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz /opt 112 | RUN cd /opt && tar -xzvf TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz 113 | RUN chmod a+rwx /opt/TensorRT-8.5.1.7 114 | RUN export TENSORRT_DIR=/opt/TensorRT-8.5.1.7 115 | RUN export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$TENSORRT_DIR/lib 116 | RUN export PATH=$PATH:$TENSORRT_DIR/bin 117 | RUN rm -f /opt/TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz 118 | 119 | # Dev pacakge installation. 120 | RUN apt-get update && apt-get install -y \ 121 | bash-completion \ 122 | htop \ 123 | locales \ 124 | python3.8-tk \ 125 | sshpass \ 126 | tmux \ 127 | zsh \ 128 | zsh-autosuggestions \ 129 | zsh-syntax-highlighting \ 130 | && rm -rf /var/lib/apt/lists/* 131 | RUN locale-gen en_US.UTF-8 132 | 133 | # Various settings mostly for dev. 134 | # - Disable pip without virtual env for dev. 135 | ENV PIP_REQUIRE_VIRTUALENV=true 136 | # - Disable local python packages for dev. 137 | ENV PYTHONNOUSERSITE=1 138 | # - Set default editor to vim mostly for git. 139 | ENV EDITOR=vim 140 | # - Set locale related env. 141 | ENV LANG=en_US.UTF-8 142 | ENV LC_ALL=en_US.UTF-8 143 | ENV LC_CTYPE=en_US.UTF-8 144 | 145 | # build docker images all environment vars 146 | COPY env_vars /opt 147 | RUN chmod a+rwx /opt/env_vars 148 | RUN cat /opt/env_vars | while read line; do echo $line; done >> ~/.bashrc 149 | 150 | # build 2 versions of trt environment vars 151 | COPY trt_8_5_1_7.sh /opt 152 | -------------------------------------------------------------------------------- /dockers/env_vars: -------------------------------------------------------------------------------- 1 | export _=/usr/bin/env 2 | export UBUNTU_MIRROR=mirrors.ustc.edu.cn 3 | export TORCH_ALLOW_TF32_CUBLAS_OVERRIDE=1 4 | export DEBIAN_FRONTEND=noninteractive 5 | export TRTOSS_VERSION=22.12 6 | export NVIDIA_PYTORCH_VERSION=22.12 7 | export MOFED_VERSION=5.4-rdmacore36.0 8 | export PATH=/usr/local/lib/python3.8/dist-packages/torch_tensorrt/bin:/usr/local/mpi/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/ucx/bin:/opt/TensorRT-8.5.1.7/bin 9 | export NVIDIA_REQUIRE_JETPACK_HOST_MOUNTS= 10 | export CUDA_MODULE_LOADING=LAZY 11 | export CUDA_HOME=/usr/local/cuda 12 | export _CUDA_COMPAT_PATH=/usr/local/cuda/compat 13 | export PYTORCH_BUILD_VERSION=1.14.0a0+410ce96 14 | export TRANSFORMER_ENGINE_VERSION=0.3 15 | export LC_ALL=en_US.UTF-8 16 | export CUDA_DRIVER_VERSION=520.61.05 17 | export OPAL_PREFIX=/opt/hpcx/ompi 18 | export OMPI_MCA_coll_hcoll_enable=0 19 | export NVIDIA_BUILD_ID=49968248 20 | export LC_CTYPE=en_US.UTF-8 21 | export LD_LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/torch/lib:/usr/local/lib/python3.8/dist-packages/torch_tensorrt/lib:/usr/local/cuda/compat/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/opt/TensorRT-8.5.1.7/lib:/opt/onnxruntime-linux-x64-1.8.1/lib 22 | export PYTORCH_HOME=/opt/pytorch/pytorch 23 | export TENSORRT_DIR=/opt/TensorRT-8.5.1.7 24 | export ONNXRUNTIME_DIR=/opt/onnxruntime-linux-x64-1.8.1 25 | export JUPYTER_PORT=8888 26 | export DALI_VERSION=1.20.0 27 | export NSIGHT_COMPUTE_VERSION=2022.3.0.22 28 | export CUDNN_VERSION=8.7.0.84 29 | export DOCKER_MIRROR=mirrors.ustc.edu.cn/docker-ce 30 | export BASH_ENV=/etc/bash.bashrc 31 | export SHLVL=1 32 | export PIP_INDEX_URL=https://mirror.sjtu.edu.cn/pypi/web/simple 33 | export PIPX_HOME=/usr/local/pipx 34 | export PYTHONIOENCODING=utf-8 35 | export LIBRARY_PATH=/usr/local/cuda/lib64/stubs: 36 | export NVJPEG_VERSION=11.9.0.86 37 | export OPENMPI_VERSION=4.1.4 38 | export GDRCOPY_VERSION=2.3 39 | export PIPENV_MAX_DEPTH=0 40 | export NVM_DIR=/usr/local/nvm 41 | export TORCH_CUDNN_V8_API_ENABLED=1 42 | export PIP_REQUIRE_VIRTUALENV=true 43 | export HPCX_VERSION=2.13 44 | export PIP_DEFAULT_TIMEOUT=100 45 | export CUTENSOR_VERSION=1.6.1.5 46 | export USE_EXPERIMENTAL_CUDNN_V8_API=1 47 | export PYTORCH_BUILD_NUMBER=0 48 | export CURAND_VERSION=10.3.0.86 49 | export PYTORCH_VERSION=1.14.0a0+410ce96 50 | export CUDA_VERSION=11.8.0.065 51 | export COCOAPI_VERSION=2.0+nv0.7.1 52 | export LANG=en_US.UTF-8 53 | export HOME=/root 54 | export RDMACORE_VERSION=36.0 55 | export NVIDIA_PRODUCT_NAME=PyTorch 56 | export TRT_VERSION=8.5.10.4 57 | export POLYGRAPHY_VERSION=0.43.1 58 | export NVIDIA_DRIVER_CAPABILITIES=compute,utility,video 59 | export NSIGHT_SYSTEMS_VERSION=2022.4.2.1 60 | export OPENUCX_VERSION=1.14.0 61 | export PIPX_BIN_DIR=/usr/local/bin 62 | export PWD=/workspace 63 | export ENV=/etc/shinit_v2 64 | export CUSPARSE_VERSION=11.7.5.86 65 | export EDITOR=vim 66 | export OPENBLAS_VERSION=0.3.20 67 | export NCCL_VERSION=2.15.5 68 | export TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6 9.0+PTX" 69 | export TENSORBOARD_PORT=6006 70 | export CUDA_CACHE_DISABLE=1 71 | export NVIDIA_REQUIRE_CUDA="cuda>=9.0" 72 | export CUFFT_VERSION=10.9.0.58 73 | export PYVER=3.8 74 | export PYTHONNOUSERSITE=1 75 | export CUBLAS_VERSION=11.11.3.6 76 | export CUSOLVER_VERSION=11.4.1.48 77 | export DALI_BUILD=6562491 78 | export PIPENV_VENV_IN_PROJECT=true 79 | export NVIDIA_VISIBLE_DEVICES=all 80 | export SHELL=/bin/bash 81 | export NPP_VERSION=11.8.0.86 82 | -------------------------------------------------------------------------------- /dockers/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Requirements for development only. 2 | # Generally it is not supposed to be changed. 3 | black 4 | build 5 | # New version of flake8 broken many plugins. 6 | flake8 < 5 7 | flake8-absolute-import 8 | flake8-bandit 9 | flake8-black 10 | flake8-comprehensions 11 | flake8-docstrings 12 | flake8-isort 13 | flake8-logging-format 14 | flake8-noqa 15 | flake8-print 16 | flake8-pytest 17 | flake8-pytest-style 18 | flake8-quotes 19 | flake8-requirements 20 | flake8-use-fstring 21 | furo 22 | mypy 23 | pep8-naming 24 | pip-check-reqs 25 | pylint 26 | pylint-pytest 27 | pytest 28 | pytest-cov 29 | Sphinx 30 | sphinx-autobuild 31 | sphinx-click 32 | toml-sort 33 | twine 34 | types-setuptools 35 | types-tabulate 36 | -------------------------------------------------------------------------------- /dockers/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements for runtime only. 2 | -i https://mirror.sjtu.edu.cn/pypi/web/simple 3 | --extra-index-url https://pypi.ngc.nvidia.com 4 | addict 5 | click 6 | coloredlogs 7 | easydict 8 | importlib-metadata 9 | mpi4py 10 | numpy 11 | Pillow 12 | prettytable 13 | pydantic 14 | pynvml 15 | setuptools-scm 16 | tabulate 17 | tensorboard 18 | torch 19 | torch_tb_profiler 20 | torchvision 21 | tqdm 22 | tvm 23 | terminaltables 24 | pycuda 25 | pycocotools 26 | onnxsim 27 | onnx_graphsurgeon 28 | onnxruntime==1.8.1 29 | open3d 30 | jupyterlab==3.0.16 31 | opencv-python==4.8.0.74 32 | -------------------------------------------------------------------------------- /dockers/trt_8_5_1_7.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=$(echo $PATH | sed 's|/opt/TensorRT-8.5.10.4/bin|/opt/TensorRT-8.5.1.7/bin|g') 4 | export LD_LIBRARY_PATH=$(echo $LD_LIBRARY_PATH | sed 's|/opt/TensorRT-8.5.10.4/lib|/opt/TensorRT-8.5.1.7/lib|g') 5 | export TENSORRT_DIR=/opt/TensorRT-8.5.1.7 6 | export TRT_VERSION=8.5.1.7 7 | 8 | # switch python version 9 | sudo pip install /opt/TensorRT-8.5.1.7/python/tensorrt-8.5.1.7-cp38-none-linux_x86_64.whl 10 | -------------------------------------------------------------------------------- /md_doc/design_manual.md: -------------------------------------------------------------------------------- 1 | # Design Manual 2 | 3 | ### project directory structure 4 | 5 | The following will introduce the functions of the project directory. 6 | ``` 7 | - onn 8 | |-- atomic // Mainly storing atomic level operators, including add, conv,etc 9 | |-- backend 10 | | -- triton 11 | | -- tvm 12 | |-- block // Mainly storing block level operators, including conv-bn-relu, decoder,etc 13 | |-- cv // Mainly storing domain operators about computer vision, including NMS, etc 14 | |-- nlp // Mainly storing domain operators about nature language processing. 15 | |-- task // Mainly storing task operators,including BEVPool, etc 16 | |--auto-driving 17 | ``` 18 | 19 | ### Design of Class 20 | 21 | ![Class-Design](../asset/img/class-design.png "Class-Design") 22 | -------------------------------------------------------------------------------- /md_doc/developer_manual.md: -------------------------------------------------------------------------------- 1 | 2 | # Developer Manual 3 | 4 | Hi, I'm glad you want to become a developer. The following tutorial will quickly guide you on how to develop. 5 | 6 | 7 | ### STEP 0. Build Docker 8 | 9 | The following operations are based on your proficiency in using Docker. If you don't know how to do so, you can take a look here. 10 | https://docs.docker.com/get-started/ 11 | 12 | If you have a docker image, you can skip this step. If you not, you can do the following commands one by one. 13 | 14 | ``` 15 | $ git clone https://github.com/matrix97317/OneNeuralNetwork.git 16 | $ cd OneNeuralNetwork 17 | $ cd dockers 18 | $ docker buildx build --platform=linux/amd64 -t ONN:v1.0.0 -f . 19 | $ docker images //you can look `ONN:v1.0.0` 20 | ``` 21 | 22 | ### STEP 1. Clone Repo 23 | If you have clone repo,you can skip it. 24 | 25 | ``` 26 | $ git clone https://github.com/matrix97317/OneNeuralNetwork.git 27 | $ cd OneNeuralNetwork 28 | $ git checkout -b / 29 | ``` 30 | 31 | ### STEP 2. Build Development Environment 32 | 33 | ``` 34 | $ cd OneNeuralNetwork 35 | $ make dev 36 | $ make pre-commit 37 | ``` 38 | 39 | ### STEP 3. Develop Project 40 | 41 | Now,you can develop this project. 42 | 43 | ### STEP 4. Add your unit test. 44 | 45 | To ensure the robustness of the project, you must write unit tests. 46 | 47 | ### STEP 5. Upload your commit to your branch 48 | 49 | Congratulations! 50 | 51 | ``` 52 | $ make test // Verify Unit Test 53 | $ make pre-commit // Verify Code Style & auto format your code. 54 | 55 | $ git checkout -b / 56 | $ git add # Add the intended files to commit. 57 | $ git commit -m "commit message" 58 | $ git checkout main 59 | $ git pull 60 | $ git checkout -b / 61 | $ git merge main 62 | $ git push 63 | ``` 64 | -------------------------------------------------------------------------------- /md_doc/user_manual.md: -------------------------------------------------------------------------------- 1 | # User Manual 2 | -------------------------------------------------------------------------------- /onn/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """One Neural Network.""" 3 | from importlib.metadata import version 4 | 5 | from .atomic import * 6 | 7 | 8 | __version__ = version(__package__) 9 | -------------------------------------------------------------------------------- /onn/atomic/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN atomic operator.""" 3 | from .backend import * 4 | from .add import Add 5 | 6 | 7 | __all__ = [ 8 | "Add", 9 | ] 10 | -------------------------------------------------------------------------------- /onn/atomic/add.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN atomic operator-Add.""" 3 | import torch 4 | 5 | from onn.slot import TRITON 6 | 7 | 8 | class Add(torch.autograd.Function): # pylint: disable=abstract-method 9 | """Add Op.""" 10 | 11 | backend = "triton" 12 | arch = "sm80" 13 | device = "cuda" 14 | op = None 15 | 16 | def __init__(self, backend="triton", arch="sm80", device="cuda"): 17 | """init.""" 18 | super().__init__() 19 | Add.backend = backend 20 | Add.arch = arch 21 | Add.device = device 22 | Add.op = TRITON.pop("onn.atomic.backend.triton.add.TritonAdd")() 23 | 24 | @staticmethod 25 | def forward(ctx, a, b): 26 | """Forward.""" 27 | ctx.op = Add.op 28 | ctx.save_for_backward(a, b) 29 | return ctx.op.forward(a, b) 30 | 31 | @staticmethod 32 | def backward(ctx, grad_output): 33 | """Backward.""" 34 | ( 35 | _, # a 36 | _, # b 37 | ) = ctx.saved_tensors 38 | grad_a = ctx.op.backward(grad_output) * grad_output 39 | grad_b = ctx.op.backward(grad_output) * grad_output 40 | return grad_a, grad_b 41 | -------------------------------------------------------------------------------- /onn/atomic/backend/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN atomic operator-backend.""" 3 | from .triton import * 4 | -------------------------------------------------------------------------------- /onn/atomic/backend/triton/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN atomic operator-backend.""" 3 | from .add import * 4 | -------------------------------------------------------------------------------- /onn/atomic/backend/triton/add.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN atomic operator-add.""" 3 | import math 4 | 5 | import torch 6 | from torch import Tensor 7 | import triton # pylint: disable=import-error 8 | import triton.language as tl # pylint: disable=import-error 9 | 10 | from onn.slot import TRITON 11 | from onn.operator import Operator 12 | 13 | 14 | @triton.jit 15 | def add_func(a_ptr, b_ptr, c_ptr, element_size, block_size: tl.constexpr): 16 | """Triton Func.""" 17 | block_id = tl.program_id(axis=0) 18 | thread_id = block_id * block_size + tl.arange(0, block_size) 19 | mask = thread_id < element_size 20 | a = tl.load(a_ptr + thread_id, mask=mask) 21 | b = tl.load(b_ptr + thread_id, mask=mask) 22 | tl.store(c_ptr + thread_id, a + b, mask=mask) 23 | 24 | 25 | @TRITON.push() 26 | class TritonAdd(Operator): 27 | """Add Operator.""" 28 | 29 | BLOCK_SIZE = 512 30 | 31 | def parse_inputs_data(self, a_tensor: Tensor, b_tensor: Tensor): 32 | """Parse inputs data.""" 33 | return a_tensor, b_tensor 34 | 35 | def parse_outputs_data(self, out_tensor: Tensor): 36 | """Parse outputs data.""" 37 | return out_tensor 38 | 39 | def forward(self, a_tensor: Tensor, b_tensor: Tensor): 40 | """Compute flow.""" 41 | out = torch.zeros_like( # pylint: disable=no-member 42 | a_tensor, device=a_tensor.device, dtype=a_tensor.dtype 43 | ) 44 | gird = (math.ceil(a_tensor.numel() / TritonAdd.BLOCK_SIZE),) 45 | add_func[gird](a_tensor, b_tensor, out, a_tensor.numel(), TritonAdd.BLOCK_SIZE) 46 | return out 47 | 48 | def backward(self, input_grad: Tensor): 49 | """Compute flow.""" 50 | return torch.ones_like( # pylint: disable=no-member 51 | input_grad, device=input_grad.device, dtype=input_grad.dtype 52 | ) 53 | -------------------------------------------------------------------------------- /onn/block/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN block operator.""" 3 | -------------------------------------------------------------------------------- /onn/cv/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN Computer Vision operator.""" 3 | -------------------------------------------------------------------------------- /onn/nlp/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN NLP operator.""" 3 | -------------------------------------------------------------------------------- /onn/operator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """One Neural Network-Slot.""" 3 | import abc 4 | 5 | 6 | class Operator(abc.ABC): 7 | """Operator base class.""" 8 | 9 | @abc.abstractmethod 10 | def parse_inputs_data(self, *args, **kwargs): 11 | """Parse inputs data.""" 12 | raise NotImplementedError 13 | 14 | @abc.abstractmethod 15 | def parse_outputs_data(self, *args, **kwargs): 16 | """Parse outputs data.""" 17 | raise NotImplementedError 18 | 19 | @abc.abstractmethod 20 | def forward(self, *args, **kwargs): 21 | """Compute data.""" 22 | raise NotImplementedError 23 | 24 | @abc.abstractmethod 25 | def backward(self, *args, **kwargs): 26 | """Compute data.""" 27 | raise NotImplementedError 28 | -------------------------------------------------------------------------------- /onn/slot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """One Neural Network-Slot.""" 3 | 4 | 5 | class Slot: 6 | """Any Class save in Slot.""" 7 | 8 | def __init__(self): 9 | """Init.""" 10 | self._class_table = {} 11 | 12 | def _load(self, cls_name, cls): 13 | """Load cls to class table.""" 14 | self._class_table[cls_name] = cls 15 | 16 | def push(self): 17 | """Push cls to class table.""" 18 | 19 | def _inner_func(cls): 20 | cls_name = cls.__module__ + "." + cls.__name__ 21 | self._load(cls_name, cls) 22 | return cls 23 | 24 | return _inner_func 25 | 26 | def pop(self, cls_name): 27 | """Pop Class from class table.""" 28 | return self._class_table[cls_name] 29 | 30 | def __str__(self): 31 | """Return str expr.""" 32 | expr = "" 33 | for i, cls_name in enumerate(self._class_table.keys()): 34 | expr += f"[{cls_name}] " 35 | if i % 20 == 0: 36 | expr += "\n" 37 | return expr 38 | 39 | 40 | TRITON = Slot() 41 | -------------------------------------------------------------------------------- /onn/task/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN Task operator.""" 3 | -------------------------------------------------------------------------------- /onn/task/auto_driving/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ONN Auto Driving operator.""" 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Use leading comments since `toml-sort` will remove all other comments. 2 | # Reference: https://github.com/pappasam/toml-sort/issues/11 3 | # - build-system: 4 | # - build-backend: 5 | # Use the new standard way (PEP 517) to build package. 6 | # Reference: https://setuptools.pypa.io/en/latest/build_meta.html#how-to-use-it 7 | # - requires: 8 | # Build requirements for setuptools_scm. 9 | # Reference: https://github.com/pypa/setuptools_scm/#pyprojecttoml-usage 10 | # - tool.isort: 11 | # - force_single_line: 12 | # Forces all from imports to appear on their own line. 13 | # Reference: https://www.python.org/dev/peps/pep-0008/#imports 14 | # Reference: E401 on https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes 15 | # - lines_after_imports: 16 | # Force two blank lines after import for consistency. 17 | # - profile: 18 | # Make isort compatible with black. 19 | # Reference: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#isort 20 | # - tools.mypy.overrides: 21 | # MyPy Settings for the third-party libraries that is not compatible with mypy. 22 | # - tool.setuptools_scm: 23 | # Enable scm based version provided by setuptools_scm. 24 | # Reference: https://github.com/pypa/setuptools_scm/#pyprojecttoml-usage 25 | 26 | [build-system] 27 | build-backend = "setuptools.build_meta" 28 | requires = [ 29 | "setuptools>=45", 30 | "setuptools_scm>=6.2", 31 | "wheel" 32 | ] 33 | 34 | [tool.isort] 35 | force_single_line = true 36 | lines_after_imports = 2 37 | profile = "black" 38 | 39 | [tool.mypy] 40 | disable_error_code = [ 41 | "union-attr" 42 | ] 43 | enable_error_code = [ 44 | "ignore-without-code" 45 | ] 46 | # exclude = [] 47 | ignore_missing_imports = true 48 | show_error_codes = true 49 | warn_unused_ignores = true 50 | 51 | [tool.pytest.ini_options] 52 | addopts = "-l -s -v --color=yes --durations=0 --cov-report xml --cov-report term" 53 | log_cli = true 54 | log_cli_level = "info" 55 | log_date_format = "%Y-%m-%d %H:%M:%S" 56 | log_format = "%(asctime)s %(levelname)s %(message)s" 57 | minversion = "6.0" 58 | 59 | [tool.setuptools_scm] 60 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Requirements for development only. 2 | # Generally it is not supposed to be changed. 3 | black 4 | build 5 | # New version of flake8 broken many plugins. 6 | flake8 < 5 7 | flake8-absolute-import 8 | flake8-bandit 9 | flake8-black 10 | flake8-comprehensions 11 | flake8-docstrings 12 | flake8-isort 13 | flake8-logging-format 14 | flake8-noqa 15 | flake8-print 16 | flake8-pytest 17 | flake8-pytest-style 18 | flake8-quotes 19 | flake8-requirements 20 | flake8-use-fstring 21 | furo 22 | mypy 23 | pep8-naming 24 | pip-check-reqs 25 | pylint 26 | pylint-pytest 27 | pytest 28 | pytest-cov 29 | Sphinx 30 | sphinx-autobuild 31 | sphinx-click 32 | toml-sort 33 | twine 34 | types-setuptools 35 | types-tabulate 36 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements for runtime only. 2 | addict 3 | click 4 | coloredlogs 5 | importlib-metadata 6 | jupyterlab==3.0.16 7 | kafka-python 8 | mpi4py 9 | numpy 10 | opencv-python-headless 11 | Pillow 12 | prettytable 13 | pydantic 14 | pynvml 15 | setuptools-scm 16 | tabulate 17 | tensorboard 18 | torch 19 | torch_tb_profiler 20 | torchvision 21 | tqdm 22 | tvm 23 | triton 24 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """The setup.py for One Neural Network.""" 3 | from pkg_resources import parse_requirements 4 | from setuptools import find_packages 5 | from setuptools import setup 6 | 7 | 8 | # Parse content from `README.md` as long description. 9 | with open("README.md", encoding="utf-8") as fh: 10 | long_description = fh.read() 11 | 12 | # Parse content from `requirements.txt` as install requires. 13 | with open("requirements.txt", encoding="utf-8") as fh: 14 | install_requires = [str(requirement) for requirement in parse_requirements(fh)] 15 | 16 | setup( 17 | author="matrix97317", 18 | classifiers=[ 19 | "Development Status :: 4 - Beta", 20 | "Intended Audience :: Developers", 21 | "License :: OSI Approved :: Apache Software License", 22 | "Programming Language :: Python :: 3.8", 23 | ], 24 | description="This is a cross-chip platform collection of \ 25 | operators and a unified neural network library.", 26 | entry_points={}, 27 | install_requires=install_requires, 28 | license="Apache License 2.0", 29 | long_description=long_description, 30 | long_description_content_type="text/markdown", 31 | name="onn", 32 | packages=find_packages(exclude=["dist.*", "dist", "tests.*", "tests"]), 33 | python_requires=">=3.8", 34 | url="https://github.com/matrix97317/OneNerualNetwork", 35 | ) 36 | -------------------------------------------------------------------------------- /tests/atomic/add_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Unit tests.""" 4 | # pylint: disable=no-member,import-error 5 | import torch 6 | 7 | import onn 8 | 9 | 10 | def test_add(): 11 | """Test Add.""" 12 | a_tensor = torch.ones((2, 4), requires_grad=True) 13 | a_tensor_device = a_tensor.cuda() 14 | b_tensor = torch.ones((2, 4), requires_grad=True) 15 | b_tensor_device = b_tensor.cuda() 16 | b_tensor_v2 = b_tensor_device * 3 17 | 18 | out = onn.Add().apply(a_tensor_device, b_tensor_v2) 19 | out = torch.sum(out) 20 | out.backward() 21 | 22 | assert torch.allclose(a_tensor.grad.data, torch.ones((2, 4))) 23 | assert torch.allclose(b_tensor.grad.data, torch.ones((2, 4)) * 3) 24 | 25 | 26 | if __name__ == "__main__": 27 | test_add() 28 | -------------------------------------------------------------------------------- /tests/pkg_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Unit tests for pkg.""" 4 | 5 | import onn 6 | 7 | 8 | def test_pkg() -> None: 9 | """Unit test for pkg.""" 10 | assert onn.__package__ == "onn" 11 | --------------------------------------------------------------------------------