├── .circleci └── config.yml ├── .gitignore ├── .local.jenkins.lin.yml ├── .travis.yml ├── HISTORY.rst ├── LICENSE.txt ├── MANIFEST.in ├── README.rst ├── _doc ├── bench │ ├── bench_ortmodule_nn_gpu.py │ ├── bench_orttraining_nn_gpu.py │ └── bench_orttraining_nn_gpu_fwbw.py ├── examples │ ├── README.txt │ ├── data │ │ ├── ort_cpu_gpu.csv │ │ ├── ort_gpus.csv │ │ ├── ort_gpus_gpt2.csv │ │ ├── ort_gpus_piece.csv │ │ ├── ort_gpus_piece_resnet18.csv │ │ └── plot_benchmark_eager_mode.csv │ ├── images │ │ ├── onnxfwbw1.png │ │ ├── onnxfwbw2.png │ │ └── onnxfwbwtorch.png │ ├── plot_abegin_convert_pipeline.py │ ├── plot_bbegin_measure_time.py │ ├── plot_benchmark_eager_mode.py │ ├── plot_benchmark_graph_opt.py │ ├── plot_benchmark_inference.py │ ├── plot_benchmark_inference_standard.py │ ├── plot_benchmark_onnx_function.py │ ├── plot_benchmark_onnx_serialize.py │ ├── plot_benchmark_op.py │ ├── plot_benchmark_op_leakyrelu.py │ ├── plot_benchmark_op_short.py │ ├── plot_benchmark_ort_api.py │ ├── plot_catwoe_transformer.py │ ├── plot_cbegin_opset.py │ ├── plot_dbegin_options.py │ ├── plot_dbegin_options_list.py │ ├── plot_dbegin_options_zipmap.py │ ├── plot_ebegin_float_double.py │ ├── plot_f8.py │ ├── plot_fbegin_investigate.py │ ├── plot_funny_sigmoid.py │ ├── plot_gbegin_cst.py │ ├── plot_gbegin_dataframe.py │ ├── plot_gbegin_transfer_learning.py │ ├── plot_gconverting.py │ ├── plot_gexternal_lightgbm.py │ ├── plot_gexternal_lightgbm_reg.py │ ├── plot_gexternal_lightgbm_reg_mono.py │ ├── plot_gexternal_lightgbm_reg_per.py │ ├── plot_gexternal_xgboost.py │ ├── plot_icustom_converter.py │ ├── plot_jcustom_syntax.py │ ├── plot_kcustom_converter_wrapper.py │ ├── plot_lcustom_options.py │ ├── plot_mcustom_parser.py │ ├── plot_mcustom_parser_dataframe.py │ ├── plot_orttraining_benchmark.py │ ├── plot_orttraining_benchmark_fwbw.py │ ├── plot_orttraining_benchmark_fwbw_cls.py │ ├── plot_orttraining_benchmark_torch.py │ ├── plot_orttraining_linear_regression.py │ ├── plot_orttraining_linear_regression_cpu.py │ ├── plot_orttraining_linear_regression_fwbw.py │ ├── plot_orttraining_linear_regression_gpu.py │ ├── plot_orttraining_nn_gpu.py │ ├── plot_orttraining_nn_gpu_fwbw.py │ ├── plot_orttraining_nn_gpu_fwbw_nesterov.py │ ├── plot_parallel_execution.py │ ├── plot_parallel_execution_big_model.py │ ├── plot_pextend_python_runtime.py │ ├── plot_profile_ort.py │ ├── plot_profile_ort_onnx.py │ ├── plot_qextend_onnxruntime.py │ ├── plot_quantization.py │ ├── plot_transformer_discrepancy.py │ ├── plot_usparse_xgboost.py │ └── plot_woe_transformer.py ├── notebooks │ ├── README.txt │ ├── convolutation_matmul.ipynb │ ├── onnxruntime_training_nb.ipynb │ └── tree_to_onnx.ipynb └── sphinxdoc │ └── source │ ├── _static │ └── project_ico.png │ ├── api │ ├── apis.rst │ ├── data.rst │ ├── experiment.rst │ ├── index.rst │ ├── onnx_python │ │ ├── backend.rst │ │ ├── checker.rst │ │ ├── classes.rst │ │ ├── compose.rst │ │ ├── defs.rst │ │ ├── external_data_helper.rst │ │ ├── helper.rst │ │ ├── hub.rst │ │ ├── index.rst │ │ ├── mapping.rst │ │ ├── numpy_helper.rst │ │ ├── parser.rst │ │ ├── printer.rst │ │ ├── reference.rst │ │ ├── serialization.rst │ │ ├── shape_inference.rst │ │ ├── tools.rst │ │ ├── utils.rst │ │ └── version_converter.rst │ ├── onnxruntime_python │ │ ├── exceptions.rst │ │ ├── grad.rst │ │ ├── helpers.rst │ │ ├── index.rst │ │ ├── inference.rst │ │ ├── ortvalue.rst │ │ ├── sparse.rst │ │ ├── tools.rst │ │ ├── training.rst │ │ ├── training_partial.rst │ │ └── training_torch.rst │ ├── plotting.rst │ ├── training.rst │ ├── training_utils.rst │ └── utils.rst │ ├── blog │ ├── 2021 │ │ ├── 2021-10-13_first.rst │ │ └── 2021-12-16_wsl.rst │ └── 2022 │ │ └── 2022-12-02_wsl.rst │ ├── conf.py │ ├── doc.rst │ ├── github_link.py │ ├── i_cmd.rst │ ├── i_ex.rst │ ├── i_faq.rst │ ├── index.rst │ ├── installation.rst │ ├── license.rst │ ├── onnxmd │ ├── index.rst │ ├── index_onnx.rst │ ├── index_onnxruntime.rst │ ├── index_onnxruntime_gh_pages.rst │ ├── onnx_add_new_op.rst │ ├── onnx_changelog.rst │ ├── onnx_changelog_ml.rst │ ├── onnx_contributing.rst │ ├── onnx_docs │ │ ├── AddNewOp.md │ │ ├── Broadcasting.md │ │ ├── CIPipelines.md │ │ ├── CONTRIBUTING.md │ │ ├── Changelog-ml.md │ │ ├── Changelog.md │ │ ├── DefineDifferentiability.md │ │ ├── DimensionDenotation.md │ │ ├── ExternalData.md │ │ ├── Hub.md │ │ ├── IR.md │ │ ├── ImplementingAnOnnxBackend.md │ │ ├── ManagingExperimentalOps.md │ │ ├── MetadataProps.md │ │ ├── ONNXIFI.md │ │ ├── ONNXTypes.md │ │ ├── ONNX_logo_main.png │ │ ├── OnnxBackendTest.md │ │ ├── OnnxReleases.md │ │ ├── OpConventions.md │ │ ├── Operators-ml.md │ │ ├── Operators.md │ │ ├── Overview.md │ │ ├── PythonAPIOverview.md │ │ ├── README.txt │ │ ├── Relicensing.md │ │ ├── ShapeInference.md │ │ ├── Syntax.md │ │ ├── TestCoverage-ml.md │ │ ├── TestCoverage.md │ │ ├── TypeAnnotations.md │ │ ├── TypeDenotation.md │ │ ├── VersionConverter.md │ │ ├── Versioning.md │ │ ├── images │ │ │ └── onnx_hub_arch.svg │ │ ├── onnx-horizontal-color.png │ │ └── proposals │ │ │ ├── ArchiveFileFormatProposal.md │ │ │ ├── FunctionsProposal.md │ │ │ ├── NLPinONNXproposal.md │ │ │ ├── ONNXIFIproposal.md │ │ │ └── SymbolicShapeInfProposal.md │ ├── onnx_managing.rst │ ├── onnx_metadata.rst │ ├── onnx_operators.rst │ ├── onnx_operators_ml.rst │ ├── onnx_releases.rst │ ├── onnx_test_coverage.rst │ ├── onnx_test_coverage_ml.rst │ └── onnxruntime_docs │ │ ├── ABI_Dev_Notes.md │ │ ├── Android_testing.md │ │ ├── C_API_Guidelines.md │ │ ├── Coding_Conventions_and_Standards.md │ │ ├── ContribOperators.md │ │ ├── FAQ.md │ │ ├── How_To_Update_ONNX_Dev_Notes.md │ │ ├── Memory_Optimizer.md │ │ ├── Model_Test.md │ │ ├── NotesOnThreading.md │ │ ├── ONNX_Runtime_Server_Usage.md │ │ ├── ORTMobilePackageOperatorTypeSupport.md │ │ ├── ORTModule_Training_Guidelines.md │ │ ├── ORT_Format_Update_in_1.13.md │ │ ├── OperatorKernels.md │ │ ├── PR_Guidelines.md │ │ ├── Privacy.md │ │ ├── Python_Dev_Notes.md │ │ ├── README.txt │ │ ├── Reduced_Operator_Kernel_build.md │ │ ├── ReleaseManagement.md │ │ ├── Roadmap.md │ │ ├── Server.md │ │ ├── TVM_EP.md │ │ ├── Versioning.md │ │ ├── WinML_principles.md │ │ ├── cmake_guideline.md │ │ ├── execution_providers │ │ └── images │ │ │ ├── ONNX_Runtime_EP1.png │ │ │ ├── ONNX_Runtime_EP2.png │ │ │ ├── ONNX_Runtime_EP3.png │ │ │ ├── Vitis-AI.png │ │ │ ├── mkl-dnn_node.png │ │ │ └── mkl-dnn_subgraph.png │ │ ├── images │ │ ├── Mobile.png │ │ ├── ONNX_Runtime_icon.png │ │ ├── ONNX_Runtime_logo - Docs.png │ │ ├── ONNX_Runtime_logo.png │ │ ├── ONNX_Runtime_logo_dark.png │ │ ├── layered-architecture.png │ │ ├── mnist_optimization.png │ │ ├── mnist_optimization_with_nnapi.png │ │ ├── ngraph-logo.png │ │ └── nnapi_aware_ort_format_model.png │ │ ├── onnxruntime_dependencies.dot │ │ ├── onnxruntime_dependencies.png │ │ └── onnxruntime_extensions.md │ ├── other_pages.rst │ ├── requirements.txt │ └── tutorials │ ├── index.rst │ ├── tutorial_bench │ ├── index.rst │ ├── tutorial_benchmark.rst │ ├── tutorial_op.rst │ ├── tutorial_profile.rst │ └── tutorial_training.rst │ ├── tutorial_onnx │ ├── challenges.rst │ ├── concepts.rst │ ├── images │ │ ├── iff.png │ │ ├── linreg1.png │ │ ├── linreg2.png │ │ └── scanop.png │ ├── index.rst │ ├── onnxops.rst │ └── python.rst │ ├── tutorial_onnxruntime │ ├── extensions.rst │ ├── index.rst │ ├── inference.rst │ ├── ortvalue_doc.rst │ ├── quantization.rst │ └── training_ort_api.rst │ ├── tutorial_parallel │ └── index.rst │ ├── tutorial_skl │ ├── index.rst │ ├── tutorial_1-5_external.rst │ ├── tutorial_1_simple.rst │ ├── tutorial_2_new_converter.rst │ ├── tutorial_3_new_operator.rst │ └── tutorial_4_complex.rst │ └── tutorial_training │ ├── images │ └── onnxfwbwloss.png │ ├── index.rst │ ├── tutorial_6_training.rst │ ├── tutorial_6_training_partial.rst │ └── tutorial_7_related_topics.rst ├── _unittests ├── ut_cli │ └── test_cli_profile.py ├── ut_documentation │ ├── test_documentation_check_coverage.py │ ├── test_documentation_examples_benchmark.py │ ├── test_documentation_examples_benchmark_op.py │ ├── test_documentation_examples_benchmark_orttraining.py │ ├── test_documentation_examples_benchmark_serialize.py │ ├── test_documentation_examples_lightgbm.py │ ├── test_documentation_examples_m.py │ ├── test_documentation_examples_m_u.py │ ├── test_documentation_examples_parallel.py │ ├── test_documentation_examples_show.py │ ├── test_documentation_examples_training.py │ ├── test_documentation_examples_training_fwbw.py │ ├── test_documentation_examples_training_torch.py │ ├── test_documentation_examples_u_.py │ └── test_documentation_notebooks.py ├── ut_experiment │ └── test_f8.py ├── ut_module │ ├── test_check.py │ ├── test_code_style.py │ ├── test_onnx_runtimes.py │ └── test_readme.py ├── ut_plotting │ └── test_plotting_onnx.py ├── ut_training │ ├── test_data_loader.py │ ├── test_grad_helper.py │ ├── test_gradient_mlp.py │ ├── test_learning_rate.py │ ├── test_optimizers.py │ ├── test_optimizers_classification.py │ ├── test_optimizers_forward_backward.py │ ├── test_optimizers_grid_score.py │ ├── test_optimizers_nan.py │ ├── test_orttraining_forward_backward.py │ └── test_training_qdq.py └── ut_utils │ ├── data │ ├── bench_ortmodule_nn_gpu.nvvp │ └── bench_ortmodule_nn_gpu.sql.zip │ ├── test_doc_helper.py │ ├── test_onnx_function.py │ ├── test_onnx_helper.py │ ├── test_onnx_writer.py │ ├── test_onnxruntime_helper.py │ ├── test_orttraining_helper.py │ ├── test_print_helper.py │ ├── test_split_onnx.py │ ├── test_utils_benchmark.py │ ├── test_utils_classes.py │ └── test_utils_nvjson.py ├── appveyor.yml ├── azure-pipelines.yml ├── build_script.bat ├── onnxcustom ├── __init__.py ├── __main__.py ├── cli │ ├── __init__.py │ └── profiling.py ├── experiment │ ├── __init__.py │ └── f8.py ├── plotting │ ├── __init__.py │ └── plotting_onnx.py ├── training │ ├── __init__.py │ ├── _base.py │ ├── _base_estimator.py │ ├── _base_onnx_function.py │ ├── data_loader.py │ ├── excs.py │ ├── grad_helper.py │ ├── optimizers.py │ ├── optimizers_partial.py │ ├── ortgradient.py │ ├── sgd_learning_loss.py │ ├── sgd_learning_penalty.py │ └── sgd_learning_rate.py └── utils │ ├── __init__.py │ ├── benchmark.py │ ├── doc_helper.py │ ├── imagenet_classes.py │ ├── nvprof2json.py │ ├── onnx_function.py │ ├── onnx_helper.py │ ├── onnx_rewriter.py │ ├── onnx_split.py │ ├── onnxruntime_helper.py │ ├── orttraining_helper.py │ └── print_helper.py ├── requirements-dev.txt ├── requirements.txt └── setup.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: cimg/python:3.9.5 6 | 7 | working_directory: ~/repo 8 | 9 | steps: 10 | - checkout 11 | 12 | - restore_cache: 13 | keys: 14 | - v3-dependencies-{{ checksum "requirements.txt" }} 15 | - v3-dependencies- 16 | 17 | - run: 18 | name: install dependencies 19 | command: | 20 | pip install -r requirements.txt 21 | pip install onnxruntime-training 22 | pip install -r requirements-dev.txt 23 | 24 | # - run: 25 | # name: install onnxruntime-training 26 | # command: | 27 | # pip install onnxruntime-training --extra-index-url https://download.onnxruntime.ai/onnxruntime_nightly_cpu.html 28 | 29 | - save_cache: 30 | paths: 31 | - ./venv 32 | key: v3-dependencies-{{ checksum "requirements.txt" }} 33 | 34 | - run: 35 | name: compile and build 36 | command: | 37 | python setup.py build_ext --inplace 38 | 39 | - run: 40 | name: flake8 41 | command: | 42 | python -m flake8 onnxcustom --max-line-length=100 --ignore=E731,W504 43 | python -m flake8 _doc/examples --max-line-length=100 --ignore=E731,W504 44 | 45 | - run: 46 | name: run tests 47 | command: | 48 | python setup.py unittests -d 50 49 | 50 | - run: 51 | name: wheel 52 | command: | 53 | python setup.py bdist_wheel 54 | cp dist/*.whl test-reports 55 | 56 | - run: 57 | name: check speed 58 | command: | 59 | python -m onnxcustom check 60 | 61 | - store_artifacts: 62 | path: test-reports 63 | destination: test-reports 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | coverage.html/* 3 | .coverage 4 | bin/* 5 | dist/* 6 | build/* 7 | data/* 8 | .eggs/* 9 | *.jpg 10 | *.onnx 11 | *.pt 12 | *.pkl 13 | *.err 14 | *.out 15 | *.prof 16 | *.pyproj 17 | *.bat 18 | *egg-info/* 19 | temp_* 20 | version.txt 21 | _doc/bench/*.svg 22 | _doc/bench/*.html 23 | _doc/examples/*.svg 24 | _doc/examples/*.html 25 | _doc/examples/*.json 26 | _doc/examples/*.dot* 27 | _doc/notebooks/.ipynb_checkpoints/* 28 | _doc/sphinxdoc/source/coverage/* 29 | _doc/sphinxdoc/source/_static/*.js 30 | _doc/sphinxdoc/source/_static/reveal.js/* 31 | _doc/sphinxdoc/source/_static/style_notebook_snippet.css 32 | _doc/sphinxdoc/source/_temp_* 33 | _doc/sphinxdoc/source/all_*.rst 34 | _doc/sphinxdoc/source/blog/* 35 | _doc/sphinxdoc/source/filechanges.rst 36 | _doc/sphinxdoc/source/index_*.rst 37 | _doc/sphinxdoc/source/onnxcustom/* 38 | _doc/sphinxdoc/source/phdoc_templates/*.html 39 | _doc/sphinxdoc/source/README.rst 40 | _doc/sphinxdoc/source/notebooks/reveal.js/* 41 | _doc/sphinxdoc/source/gyexamples/* 42 | _doc/sphinxdoc/build/* 43 | _doc/sphinxdoc/source/HISTORY.rst 44 | _doc/sphinxdoc/source/LICENSE.txt 45 | _doc/sphinxdoc/source/nbcov*.png 46 | _doc/sphinxdoc/source/notebooks/* 47 | _doc/sphinxdoc/source/gallery/* 48 | _unittests/ut_cli/something 49 | _unittests/ut_documentation/onnxruntime_profile*.json 50 | _unittests/ut_utils/t.* 51 | _doc/examples/bench.png 52 | _doc/examples/data.csv 53 | _doc/examples/plot_*.png 54 | _doc/examples/plot_l*.xlsx 55 | _doc/examples/plot_*.csv 56 | _doc/examples/summary.csv 57 | _unittests/ut_documentation/bench.png 58 | _unittests/ut_documentation/data.csv 59 | _unittests/ut_documentation/_test_example.txt 60 | _unittests/ut_documentation/plot_linear_regression.png 61 | _unittests/ut_documentation/summary.csv 62 | _unittests/ut_documentation/_test_example.txt 63 | _unittests/ut_documentation/_test_example.txt 64 | something 65 | _doc/examples/ort_cpu_bind.csv 66 | _doc/examples/ort_cpu_gpu.csv 67 | _doc/examples/ort_cpu.csv 68 | _doc/examples/ort_gpus.csv 69 | _doc/examples/ort_cpu_ortvalue.csv 70 | _unittests/ut_documentation/data 71 | _unittests/ut_documentation/ort_*.csv 72 | _doc/examples/*splits*.png 73 | _doc/examples/eager*.png 74 | _doc/examples/plot_*.onnx.txt 75 | -------------------------------------------------------------------------------- /.local.jenkins.lin.yml: -------------------------------------------------------------------------------- 1 | 2 | language: python 3 | 4 | python: 5 | - { PATH: "{{Python39}}", VERSION: 3.9, DIST: std, PYINT: python3.9 } 6 | 7 | virtualenv: 8 | - { system_site_packages: 0, path: {{ospathjoin(root_path, pickname("$NAME_JENKINS", project_name + "_$VERSION_$DIST_$NAME"), "_venv")}} } 9 | 10 | install: 11 | - $PYINT -m pip install --upgrade pip 12 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ jyquickhelper pyquickhelper cpyquickhelper --extra-index-url=https://pypi.python.org/simple/ 13 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ scikit-learn>=0.22 --extra-index-url=https://pypi.python.org/simple/ 14 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ onnx --extra-index-url=https://pypi.python.org/simple/ 15 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ onnxruntime-training --extra-index-url=https://pypi.python.org/simple/ 16 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ onnxconverter-common skl2onnx onnx mlprodict mlinsights --extra-index-url=https://pypi.python.org/simple/ 17 | - $PYINT -m pip install --upgrade --no-cache-dir --no-deps --index http://localhost:8067/simple/ torch_interop_utils aten_op_executor --extra-index-url=https://pypi.python.org/simple/ 18 | - $PYINT -m pip install -r requirements.txt 19 | - $PYINT -m pip install -r requirements-dev.txt 20 | - $PYINT -m pip install funcparserlib==1.0.0a0 21 | - $PYINT --version 22 | - $PYINT -m pip freeze 23 | 24 | before_script: 25 | - $PYINT -u setup.py build_ext --inplace 26 | 27 | script: 28 | - { CMD: "$PYINT -u setup.py unittests --covtoken=3d3d394f-f562-40da-ae73-52cc66c95a37", NAME: "UT" } 29 | 30 | after_script: 31 | - $PYINT -u ./setup.py bdist_wheel 32 | - if [ ${NAME} == "UT" ] then cp dist/*.whl {{root_path}}/../local_pypi/local_pypi_server fi 33 | 34 | documentation: 35 | - if [ ${NAME} == "UT" ] then $PYINT -u setup.py build_sphinx --layout=html fi 36 | - if [ ${NAME} == "UT" ] then cp -R -f _doc/sphinxdoc/build/html dist/html fi 37 | - if [ ${NAME} == "UT" and ${VERSION} == "3.9" ] then zip doc.zip -r dist/html fi 38 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: focal 2 | sudo: true 3 | language: python 4 | python: 5 | - "3.9" 6 | addons: 7 | apt: 8 | packages: 9 | - graphviz 10 | install: 11 | - pip install -r requirements.txt 12 | - pip install scikit-learn 13 | - pip install -r requirements-dev.txt 14 | - pip install onnxruntime-training --extra-index-url https://download.onnxruntime.ai/onnxruntime_nightly_cpu.html 15 | before_script: 16 | - gcc --version 17 | - python setup.py build_ext --inplace 18 | script: 19 | - python setup.py unittests 20 | - python -m flake8 onnxcustom 21 | - python -m flake8 _doc/examples 22 | - python setup.py bdist_wheel 23 | - python -m onnxcustom check 24 | after_script: 25 | - export 26 | - python -m codecov 27 | 28 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020-2023, Xavier Dupré 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | prune _doc 2 | prune _unittests 3 | prune bin 4 | prune .circleci 5 | exclude *.bat 6 | exclude *.yml 7 | exclude *.git* 8 | -------------------------------------------------------------------------------- /_doc/examples/README.txt: -------------------------------------------------------------------------------- 1 | .. _examples-gallery: 2 | 3 | Examples Gallery 4 | ================ 5 | 6 | 7 | -------------------------------------------------------------------------------- /_doc/examples/data/ort_cpu_gpu.csv: -------------------------------------------------------------------------------- 1 | index,n_imgs,maxN,stepN,repN,batch_size,n_threads,n_imgs_seq_cpu,time_seq_cpu,n_imgs_seq_gpu,time_seq_gpu,n_imgs_parallel,time_parallel 2 | 0,42,21,2,4,1,2,1,0.0027487186695604273,1,0.0019176103329906862,1,0.003333035002773007 3 | 1,42,21,2,4,2,2,2,0.010106401663506404,2,0.0036703916654611626,2,0.006719301997994383 4 | 2,42,21,2,4,6,2,6,0.019608354991457116,6,0.010759482664677003,6,0.014681133664756393 5 | 3,42,21,2,4,10,2,10,0.033823502667170637,10,0.018201829661848024,10,0.03152883699901091 6 | 4,42,21,2,4,14,2,14,0.039911389661331974,14,0.03187365999716955,14,0.03242401066624249 7 | 5,42,21,2,4,18,2,18,0.06327330066900079,18,0.04141061200061813,18,0.04056690366511854 8 | 6,42,21,2,4,22,2,22,0.059805569665816925,22,0.04993862733438922,22,0.0490827189933043 9 | 7,42,21,2,4,26,2,26,0.0755907039953551,26,0.06517837800007935,26,0.06089577066207615 10 | 8,42,21,2,4,30,2,30,0.08374553034082055,30,0.07051802033674903,30,0.07092717500442329 11 | 9,42,21,2,4,34,2,34,0.09733626266825013,34,0.07806666434044018,34,0.08508089099389811 12 | 10,42,21,2,4,38,2,38,0.12441956399319072,38,0.07200747666259606,38,0.07738801766148147 13 | -------------------------------------------------------------------------------- /_doc/examples/data/ort_gpus.csv: -------------------------------------------------------------------------------- 1 | index,n_imgs,maxN,stepN,repN,batch_size,n_threads,n_imgs_sequence,time_sequence,n_imgs_parallel,time_parallel 2 | 0,84,21,2,4,1,4,1,0.0021262706626051417,1,0.002218634666254123 3 | 1,84,21,2,4,4,4,4,0.007492412337645267,4,0.003118074993835762 4 | 2,84,21,2,4,12,4,12,0.02320858366632213,12,0.008246990667733675 5 | 3,84,21,2,4,20,4,20,0.040624970997062824,20,0.012640328000998124 6 | 4,84,21,2,4,28,4,28,0.05428626866584333,28,0.017552250006701797 7 | 5,84,21,2,4,36,4,36,0.06985467700481725,36,0.022314309666398913 8 | 6,84,21,2,4,44,4,44,0.08546845099772327,44,0.02716856700135395 9 | 7,84,21,2,4,52,4,52,0.10023868267307989,52,0.02866038967234393 10 | 8,84,21,2,4,60,4,60,0.11441926533977191,60,0.035442356660496444 11 | 9,84,21,2,4,68,4,68,0.13154826132813469,68,0.03777248799451627 12 | 10,84,21,2,4,76,4,76,0.1494300350022968,76,0.04743560366720582 13 | -------------------------------------------------------------------------------- /_doc/examples/data/ort_gpus_gpt2.csv: -------------------------------------------------------------------------------- 1 | index,n_imgs,maxN,stepN,repN,batch_size,n_threads,n_imgs_sequence,time_sequence,n_imgs_parallel,time_parallel 2 | 0,20,5,1,4,1,4,1,0.01781351266739269,1,0.01835523033514619 3 | 1,20,5,1,4,4,4,4,0.06945946999864343,4,0.031993265001801774 4 | 2,20,5,1,4,8,4,8,0.12952254799893126,8,0.051516027665153764 5 | 3,20,5,1,4,12,4,12,0.18449534167302772,12,0.05998864700086415 6 | 4,20,5,1,4,16,4,16,0.23770540566571677,16,0.07035543967504054 7 | -------------------------------------------------------------------------------- /_doc/examples/images/onnxfwbw1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/examples/images/onnxfwbw1.png -------------------------------------------------------------------------------- /_doc/examples/images/onnxfwbw2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/examples/images/onnxfwbw2.png -------------------------------------------------------------------------------- /_doc/examples/images/onnxfwbwtorch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/examples/images/onnxfwbwtorch.png -------------------------------------------------------------------------------- /_doc/examples/plot_benchmark_inference_standard.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. _benchmark-inference-sklearn: 3 | 4 | Benchmark inference for scikit-learn models 5 | =========================================== 6 | 7 | This short code compares the execution of a couple of runtime 8 | for inference including :epkg:`onnxruntime`. It uses examples 9 | `Measure ONNX runtime performances 10 | `_. It is an automated process 12 | to compare the performance of a model against :epkg:`scikit-learn`. 13 | This model is a simple model taken from all implemented by 14 | :epkg:`scikit-learn`. 15 | 16 | .. contents:: 17 | :local: 18 | 19 | Linear Regression 20 | +++++++++++++++++ 21 | 22 | """ 23 | from pandas import read_csv 24 | from mlprodict.cli import validate_runtime 25 | from mlprodict.plotting.plotting import plot_validate_benchmark 26 | 27 | res = validate_runtime( 28 | verbose=1, 29 | out_raw="data.csv", out_summary="summary.csv", 30 | benchmark=True, dump_folder="dump_errors", 31 | runtime=['python', 'onnxruntime1'], 32 | models=['LinearRegression'], 33 | skip_models=['LinearRegression[m-reg]'], 34 | n_features=[10, 50], dtype="32", 35 | out_graph="bench.png", 36 | opset_min=15, opset_max=15, 37 | time_kwargs={ 38 | 1: {"number": 50, "repeat": 50}, 39 | 10: {"number": 25, "repeat": 25}, 40 | 100: {"number": 20, "repeat": 20}, 41 | 1000: {"number": 20, "repeat": 20}, 42 | 10000: {"number": 10, "repeat": 10}, 43 | } 44 | ) 45 | 46 | results = read_csv('summary.csv') 47 | results 48 | 49 | ########################################### 50 | # Graph. 51 | 52 | _, ax = plot_validate_benchmark(results) 53 | ax 54 | 55 | # import matplotlib.pyplot as plt 56 | # plt.show() 57 | -------------------------------------------------------------------------------- /_doc/examples/plot_gexternal_lightgbm.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. _example-lightgbm: 3 | 4 | Convert a pipeline with a LightGBM classifier 5 | ============================================= 6 | 7 | .. index:: LightGBM 8 | 9 | :epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX* 10 | but many libraries implement :epkg:`scikit-learn` API so that their models 11 | can be included in a :epkg:`scikit-learn` pipeline. This example considers 12 | a pipeline including a :epkg:`LightGBM` model. :epkg:`sklearn-onnx` can convert 13 | the whole pipeline as long as it knows the converter associated to 14 | a *LGBMClassifier*. Let's see how to do it. 15 | 16 | .. contents:: 17 | :local: 18 | 19 | Train a LightGBM classifier 20 | +++++++++++++++++++++++++++ 21 | """ 22 | from pyquickhelper.helpgen.graphviz_helper import plot_graphviz 23 | from mlprodict.onnxrt import OnnxInference 24 | import onnxruntime as rt 25 | from skl2onnx import convert_sklearn, update_registered_converter 26 | from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa 27 | from onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa 28 | from skl2onnx.common.data_types import FloatTensorType 29 | import numpy 30 | from sklearn.datasets import load_iris 31 | from sklearn.pipeline import Pipeline 32 | from sklearn.preprocessing import StandardScaler 33 | from lightgbm import LGBMClassifier 34 | 35 | data = load_iris() 36 | X = data.data[:, :2] 37 | y = data.target 38 | 39 | ind = numpy.arange(X.shape[0]) 40 | numpy.random.shuffle(ind) 41 | X = X[ind, :].copy() 42 | y = y[ind].copy() 43 | 44 | pipe = Pipeline([('scaler', StandardScaler()), 45 | ('lgbm', LGBMClassifier(n_estimators=3))]) 46 | pipe.fit(X, y) 47 | 48 | ###################################### 49 | # Register the converter for LGBMClassifier 50 | # +++++++++++++++++++++++++++++++++++++++++ 51 | # 52 | # The converter is implemented in :epkg:`onnxmltools`: 53 | # `onnxmltools...LightGbm.py 54 | # `_. 56 | # and the shape calculator: 57 | # `onnxmltools...Classifier.py 58 | # `_. 60 | 61 | update_registered_converter( 62 | LGBMClassifier, 'LightGbmLGBMClassifier', 63 | calculate_linear_classifier_output_shapes, convert_lightgbm, 64 | options={'nocl': [True, False], 'zipmap': [True, False, 'columns']}) 65 | 66 | ################################## 67 | # Convert again 68 | # +++++++++++++ 69 | 70 | model_onnx = convert_sklearn( 71 | pipe, 'pipeline_lightgbm', 72 | [('input', FloatTensorType([None, 2]))], 73 | target_opset={'': 14, 'ai.onnx.ml': 2}, 74 | options={'lgbm__zipmap': False}) 75 | 76 | # And save. 77 | with open("pipeline_lightgbm.onnx", "wb") as f: 78 | f.write(model_onnx.SerializeToString()) 79 | 80 | ########################### 81 | # Compare the predictions 82 | # +++++++++++++++++++++++ 83 | # 84 | # Predictions with LightGbm. 85 | 86 | print("predict", pipe.predict(X[:5])) 87 | print("predict_proba", pipe.predict_proba(X[:1])) 88 | 89 | ########################## 90 | # Predictions with onnxruntime. 91 | 92 | sess = rt.InferenceSession("pipeline_lightgbm.onnx", 93 | providers=['CPUExecutionProvider']) 94 | 95 | pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)}) 96 | print("predict", pred_onx[0]) 97 | print("predict_proba", pred_onx[1][:1]) 98 | 99 | ############################# 100 | # Final graph 101 | # +++++++++++ 102 | 103 | 104 | oinf = OnnxInference(model_onnx) 105 | ax = plot_graphviz(oinf.to_dot()) 106 | ax.get_xaxis().set_visible(False) 107 | ax.get_yaxis().set_visible(False) 108 | -------------------------------------------------------------------------------- /_doc/examples/plot_qextend_onnxruntime.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fast runtime with onnxruntime 3 | ============================= 4 | 5 | :epkg:`ONNX operators` does not contain operator 6 | from :epkg:`numpy`. There is no operator for 7 | `solve `_ but this one 9 | is needed to implement the prediction function 10 | of model :epkg:`NMF`. The converter can be written 11 | including a new ONNX operator but then it requires a 12 | runtime for it to be tested. Example 13 | :ref:`l-extend-python-runtime` shows how to do that 14 | with :epkg:`mlprodict`. Doing the same with 15 | :epkg:`onnxruntime` is more ambitious as it requires 16 | C++... 17 | 18 | *to be continued* 19 | """ 20 | -------------------------------------------------------------------------------- /_doc/notebooks/README.txt: -------------------------------------------------------------------------------- 1 | .. _notebooks-gallery: 2 | 3 | Notebooks Gallery 4 | ================= 5 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/_static/project_ico.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/_static/project_ico.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/apis.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-apis: 3 | 4 | === 5 | API 6 | === 7 | 8 | .. toctree:: 9 | :maxdepth: 1 10 | 11 | index 12 | onnx_python/index 13 | onnxruntime_python/index 14 | onnxops/index 15 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/data.rst: -------------------------------------------------------------------------------- 1 | 2 | Data 3 | ==== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | DataLoader 9 | ++++++++++ 10 | 11 | .. autosignature:: onnxcustom.training.data_loader.OrtDataLoader 12 | :members: 13 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/experiment.rst: -------------------------------------------------------------------------------- 1 | 2 | Experiment 3 | ========== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Float 8 9 | +++++++ 10 | 11 | .. autosignature:: onnxcustom.experiment.f8.display_fe4m3 12 | 13 | .. autosignature:: onnxcustom.experiment.f8.display_fe5m2 14 | 15 | .. autosignature:: onnxcustom.experiment.f8.display_float16 16 | 17 | .. autosignature:: onnxcustom.experiment.f8.display_float32 18 | 19 | .. autosignature:: onnxcustom.experiment.f8.fe4m3_to_float32 20 | 21 | .. autosignature:: onnxcustom.experiment.f8.fe5m2_to_float32 22 | 23 | .. autosignature:: onnxcustom.experiment.f8.float32_to_fe4m3 24 | 25 | .. autosignature:: onnxcustom.experiment.f8.float32_to_fe5m2 26 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/index.rst: -------------------------------------------------------------------------------- 1 | 2 | ============== 3 | onnxcustom API 4 | ============== 5 | 6 | .. toctree:: 7 | 8 | utils 9 | plotting 10 | data 11 | training 12 | training_utils 13 | experiment 14 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/backend.rst: -------------------------------------------------------------------------------- 1 | onnx.backend 2 | ============ 3 | 4 | .. contents:: 5 | :local: 6 | 7 | Backend 8 | +++++++ 9 | 10 | .. autoclass:: onnx.backend.base.Backend 11 | :members: 12 | 13 | BackendRep 14 | ++++++++++ 15 | 16 | .. autoclass:: onnx.backend.base.BackendRep 17 | :members: 18 | 19 | Device 20 | ++++++ 21 | 22 | .. autoclass:: onnx.backend.base.Device 23 | :members: 24 | 25 | DeviceType 26 | ++++++++++ 27 | 28 | .. autoclass:: onnx.backend.base.DeviceType 29 | :members: 30 | 31 | load_model_tests 32 | ++++++++++++++++ 33 | 34 | .. autofunction:: onnx.backend.test.loader.load_model_tests 35 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/checker.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.checker 3 | ============ 4 | 5 | .. contents:: 6 | :local: 7 | 8 | check_model 9 | +++++++++++ 10 | 11 | .. autofunction:: onnx.checker.check_model 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/compose.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.compose 3 | ============ 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. currentmodule:: onnx.compose 9 | 10 | .. autosummary:: 11 | 12 | merge_graphs 13 | merge_models 14 | 15 | merge_graphs 16 | ++++++++++++ 17 | 18 | .. autofunction:: onnx.compose.merge_graphs 19 | 20 | merge_models 21 | ++++++++++++ 22 | 23 | .. autofunction:: onnx.compose.merge_models 24 | 25 | prefix 26 | ++++++ 27 | 28 | .. autofunction:: onnx.compose.add_prefix_graph 29 | 30 | .. autofunction:: onnx.compose.add_prefix 31 | 32 | dimension 33 | +++++++++ 34 | 35 | .. autofunction:: onnx.compose.expand_out_dim 36 | 37 | .. autofunction:: onnx.compose.expand_out_dim_graph 38 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/defs.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-mod-onnx-defs: 3 | 4 | onnx.defs 5 | ========= 6 | 7 | .. contents:: 8 | :local: 9 | 10 | .. _l-api-opset-version: 11 | 12 | Opset Version 13 | +++++++++++++ 14 | 15 | .. autofunction:: onnx.defs.onnx_opset_version 16 | 17 | .. autofunction:: onnx.defs.get_all_schemas_with_history 18 | 19 | Operators and Functions Schemas 20 | +++++++++++++++++++++++++++++++ 21 | 22 | .. autofunction:: onnx.defs.get_function_ops 23 | 24 | .. autofunction:: onnx.defs.get_schema 25 | 26 | class OpSchema 27 | ++++++++++++++ 28 | 29 | .. autoclass:: onnx.defs.OpSchema 30 | :members: 31 | 32 | Exception SchemaError 33 | +++++++++++++++++++++ 34 | 35 | .. autoclass:: onnx.defs.SchemaError 36 | :members: 37 | 38 | Constants 39 | +++++++++ 40 | 41 | Domains officially supported in onnx package. 42 | 43 | .. exec_code:: 44 | 45 | from onnx.defs import ( 46 | ONNX_DOMAIN, 47 | ONNX_ML_DOMAIN, 48 | AI_ONNX_PREVIEW_TRAINING_DOMAIN, 49 | ) 50 | print(f"ONNX_DOMAIN={ONNX_DOMAIN!r}") 51 | print(f"ONNX_ML_DOMAIN={ONNX_ML_DOMAIN!r}") 52 | print(f"AI_ONNX_PREVIEW_TRAINING_DOMAIN={AI_ONNX_PREVIEW_TRAINING_DOMAIN!r}") 53 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/external_data_helper.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.external_data_helper 3 | ========================= 4 | 5 | .. contents:: 6 | :local: 7 | 8 | convert_model_from_external_data 9 | ++++++++++++++++++++++++++++++++ 10 | 11 | .. autofunction:: onnx.external_data_helper.convert_model_from_external_data 12 | 13 | convert_model_to_external_data 14 | ++++++++++++++++++++++++++++++ 15 | 16 | .. autofunction:: onnx.external_data_helper.convert_model_to_external_data 17 | 18 | ExternalDataInfo 19 | ++++++++++++++++ 20 | 21 | .. autoclass:: onnx.external_data_helper.ExternalDataInfo 22 | 23 | load_external_data_for_model 24 | ++++++++++++++++++++++++++++ 25 | 26 | .. autofunction:: onnx.external_data_helper.load_external_data_for_model 27 | 28 | load_external_data_for_tensor 29 | +++++++++++++++++++++++++++++ 30 | 31 | .. autofunction:: onnx.external_data_helper.load_external_data_for_tensor 32 | 33 | remove_external_data_field 34 | ++++++++++++++++++++++++++ 35 | 36 | .. autofunction:: onnx.external_data_helper.remove_external_data_field 37 | 38 | save_external_data 39 | ++++++++++++++++++ 40 | 41 | .. autofunction:: onnx.external_data_helper.save_external_data 42 | 43 | set_external_data 44 | +++++++++++++++++ 45 | 46 | .. autofunction:: onnx.external_data_helper.set_external_data 47 | 48 | write_external_data_tensors 49 | +++++++++++++++++++++++++++ 50 | 51 | .. autofunction:: onnx.external_data_helper.write_external_data_tensors 52 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/helper.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.helper 3 | =========== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | getter 9 | ++++++ 10 | 11 | .. autofunction:: onnx.helper.get_attribute_value 12 | 13 | print 14 | +++++ 15 | 16 | .. autofunction:: onnx.helper.printable_attribute 17 | 18 | .. autofunction:: onnx.helper.printable_dim 19 | 20 | .. autofunction:: onnx.helper.printable_graph 21 | 22 | .. autofunction:: onnx.helper.printable_node 23 | 24 | .. autofunction:: onnx.helper.printable_tensor_proto 25 | 26 | .. autofunction:: onnx.helper.printable_type 27 | 28 | .. autofunction:: onnx.helper.printable_value_info 29 | 30 | tools 31 | +++++ 32 | 33 | .. autofunction:: onnx.helper.find_min_ir_version_for 34 | 35 | .. autofunction:: onnx.helper.split_complex_to_pairs 36 | 37 | .. _l-onnx-make-function: 38 | 39 | make function 40 | +++++++++++++ 41 | 42 | All functions uses to create an ONNX graph. 43 | 44 | .. autofunction:: onnx.helper.make_attribute 45 | 46 | .. autofunction:: onnx.helper.make_empty_tensor_value_info 47 | 48 | .. autofunction:: onnx.helper.make_function 49 | 50 | .. autofunction:: onnx.helper.make_graph 51 | 52 | .. autofunction:: onnx.helper.make_map 53 | 54 | .. autofunction:: onnx.helper.make_model 55 | 56 | .. autofunction:: onnx.helper.make_node 57 | 58 | .. autofunction:: onnx.helper.make_operatorsetid 59 | 60 | .. autofunction:: onnx.helper.make_opsetid 61 | 62 | .. autofunction:: onnx.helper.make_optional 63 | 64 | .. autofunction:: onnx.helper.make_optional_type_proto 65 | 66 | .. autofunction:: onnx.helper.make_sequence 67 | 68 | .. autofunction:: onnx.helper.make_sequence_type_proto 69 | 70 | .. autofunction:: onnx.helper.make_sparse_tensor 71 | 72 | .. autofunction:: onnx.helper.make_sparse_tensor_type_proto 73 | 74 | .. autofunction:: onnx.helper.make_sparse_tensor_value_info 75 | 76 | .. autofunction:: onnx.helper.make_tensor 77 | 78 | .. autofunction:: onnx.helper.make_tensor_sequence_value_info 79 | 80 | .. autofunction:: onnx.helper.make_tensor_type_proto 81 | 82 | .. autofunction:: onnx.helper.make_training_info 83 | 84 | .. autofunction:: onnx.helper.make_tensor_type_proto 85 | 86 | .. autofunction:: onnx.helper.make_tensor_value_info 87 | 88 | .. autofunction:: onnx.helper.make_value_info 89 | 90 | getter 91 | ++++++ 92 | 93 | .. autofunction:: onnx.helper.get_attribute_value 94 | 95 | print 96 | +++++ 97 | 98 | .. autofunction:: onnx.helper.printable_attribute 99 | 100 | .. autofunction:: onnx.helper.printable_dim 101 | 102 | .. autofunction:: onnx.helper.printable_graph 103 | 104 | .. autofunction:: onnx.helper.printable_node 105 | 106 | .. autofunction:: onnx.helper.printable_tensor_proto 107 | 108 | .. autofunction:: onnx.helper.printable_type 109 | 110 | .. autofunction:: onnx.helper.printable_value_info 111 | 112 | type mappings 113 | +++++++++++++ 114 | 115 | .. autofunction:: onnx.helper.get_all_tensor_dtypes 116 | 117 | .. autofunction:: onnx.helper.np_dtype_to_tensor_dtype 118 | 119 | .. autofunction:: onnx.helper.tensor_dtype_to_field 120 | 121 | .. autofunction:: onnx.helper.tensor_dtype_to_np_dtype 122 | 123 | .. autofunction:: onnx.helper.tensor_dtype_to_storage_tensor_dtype 124 | 125 | .. autofunction:: onnx.helper.tensor_dtype_to_string 126 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/hub.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.hub 3 | ======== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | ModelInfo 9 | +++++++++ 10 | 11 | .. autoclass:: onnx.hub.ModelInfo 12 | :members: 13 | 14 | list_models 15 | +++++++++++ 16 | 17 | .. autofunction:: onnx.hub.list_models 18 | 19 | get_model_info 20 | ++++++++++++++ 21 | 22 | .. autofunction:: onnx.hub.get_model_info 23 | 24 | load 25 | ++++ 26 | 27 | .. autofunction:: onnx.hub.load 28 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-python-onnx-api: 3 | 4 | Summary of onnx API 5 | =================== 6 | 7 | Versioning 8 | ++++++++++ 9 | 10 | The following example shows how to retrieve onnx version, 11 | the onnx opset, the IR version. Every new major release increments the opset version 12 | (see :ref:`l-api-opset-version`). 13 | 14 | .. runpython:: 15 | :showcode: 16 | 17 | from onnx import __version__, IR_VERSION 18 | from onnx.defs import onnx_opset_version 19 | print(f"onnx.__version__={__version__!r}, opset={onnx_opset_version()}, IR_VERSION={IR_VERSION}") 20 | 21 | The intermediate representation (IR) specification is the abstract model for 22 | graphs and operators and the concrete format that represents them. 23 | Adding a structure, modifying one them increases the IR version. 24 | 25 | The opset version increases when an operator is added or removed or modified. 26 | A higher opset means a longer list of operators and more options to 27 | implement an ONNX functions. An operator is usually modified because it 28 | supports more input and output type, or an attribute becomes an input. 29 | 30 | Data Structures 31 | +++++++++++++++ 32 | 33 | Every ONNX object is defined based on a `protobuf message 34 | `_ 35 | and has a name ended with suffix `Proto`. For example, :ref:`l-nodeproto` defines 36 | an operator, :ref:`l-tensorproto` defines a tensor. Next page lists all of them. 37 | 38 | .. toctree:: 39 | :maxdepth: 1 40 | 41 | classes 42 | serialization 43 | 44 | Functions 45 | +++++++++ 46 | 47 | An ONNX model can be directly from the classes described 48 | in previous section but it is faster to create and 49 | verify a model with the following helpers. 50 | 51 | .. toctree:: 52 | :maxdepth: 1 53 | 54 | backend 55 | checker 56 | compose 57 | defs 58 | external_data_helper 59 | helper 60 | hub 61 | mapping 62 | numpy_helper 63 | parser 64 | printer 65 | reference 66 | shape_inference 67 | tools 68 | utils 69 | version_converter 70 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/mapping.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-mod-onnx-mapping: 3 | 4 | onnx.mapping 5 | ============ 6 | 7 | This module defines the correspondance between onnx numerical types 8 | and numpy numerical types. This information can be accessed 9 | through attribute :ref:`l-onnx-types-mapping` or through the functions 10 | defined in :ref:`l-mod-onnx-helper`. 11 | 12 | .. contents:: 13 | :local: 14 | 15 | TensorDtypeMap 16 | ++++++++++++++ 17 | 18 | .. autoclass:: onnx.mapping.TensorDtypeMap 19 | 20 | .. _l-onnx-types-mapping: 21 | 22 | TENSOR_TYPE_MAP 23 | +++++++++++++++ 24 | 25 | .. exec_code:: 26 | 27 | import pprint 28 | from onnx.mapping import TENSOR_TYPE_MAP 29 | 30 | pprint.pprint(TENSOR_TYPE_MAP) 31 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/numpy_helper.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.numpy_helper 3 | ================= 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. _l-numpy-helper-onnx-array: 9 | 10 | array 11 | +++++ 12 | 13 | .. autofunction:: onnx.numpy_helper.from_array 14 | 15 | .. autofunction:: onnx.numpy_helper.to_array 16 | 17 | sequence 18 | ++++++++ 19 | 20 | .. autofunction:: onnx.numpy_helper.to_list 21 | 22 | .. autofunction:: onnx.numpy_helper.from_list 23 | 24 | dictionary 25 | ++++++++++ 26 | 27 | .. autofunction:: onnx.numpy_helper.to_dict 28 | 29 | .. autofunction:: onnx.numpy_helper.from_dict 30 | 31 | optional 32 | ++++++++ 33 | 34 | .. autofunction:: onnx.numpy_helper.to_optional 35 | 36 | .. autofunction:: onnx.numpy_helper.from_optional 37 | 38 | tools 39 | +++++ 40 | 41 | .. autofunction:: onnx.numpy_helper.convert_endian 42 | 43 | .. autofunction:: onnx.numpy_helper.combine_pairs_to_complex 44 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/parser.rst: -------------------------------------------------------------------------------- 1 | onnx.parser 2 | =========== 3 | 4 | .. contents:: 5 | :local: 6 | 7 | parse_function 8 | ++++++++++++++ 9 | 10 | .. autofunction:: onnx.parser.parse_function 11 | 12 | parse_graph 13 | +++++++++++ 14 | 15 | .. autofunction:: onnx.parser.parse_graph 16 | 17 | parse_model 18 | +++++++++++ 19 | 20 | .. autofunction:: onnx.parser.parse_model 21 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/printer.rst: -------------------------------------------------------------------------------- 1 | onnx.printer 2 | ============ 3 | 4 | .. contents:: 5 | :local: 6 | 7 | to_text 8 | +++++++ 9 | 10 | .. autofunction:: onnx.printer.to_text 11 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/reference.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-reference-implementation: 3 | 4 | onnx.reference 5 | ============== 6 | 7 | .. contents:: 8 | :local: 9 | 10 | DefaultNone 11 | +++++++++++ 12 | 13 | .. autoclass:: onnx.reference.op_run.DefaultNone 14 | :members: 15 | 16 | Inference 17 | +++++++++ 18 | 19 | .. autoclass:: onnx.reference.ReferenceEvaluator 20 | :members: input_names, output_names, opsets, run 21 | 22 | OpFunction 23 | ++++++++++ 24 | 25 | .. autoclass:: onnx.reference.op_run.OpFunction 26 | :members: create, eval, input, output, implicit_inputs, domain, need_context, run, make_node 27 | 28 | OpRun 29 | +++++ 30 | 31 | .. autoclass:: onnx.reference.op_run.OpRun 32 | :members: create, eval, input, output, implicit_inputs, domain, need_context, run, make_node 33 | 34 | RuntimeTypeError 35 | ++++++++++++++++ 36 | 37 | .. autoclass:: onnx.reference.op_run.RuntimeTypeError 38 | :members: 39 | 40 | SparseTensor 41 | ++++++++++++ 42 | 43 | .. autoclass:: onnx.reference.op_run.SparseTensor 44 | :members: 45 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/serialization.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-serialization: 3 | 4 | Serialization 5 | ============= 6 | 7 | .. contents:: 8 | :local: 9 | 10 | Save a model and any Proto class 11 | ++++++++++++++++++++++++++++++++ 12 | 13 | This ONNX graph needs to be serialized into one contiguous 14 | memory buffer. Method `SerializeToString` is available 15 | in every ONNX objects. 16 | 17 | :: 18 | 19 | with open("model.onnx", "wb") as f: 20 | f.write(onnx_model.SerializeToString()) 21 | 22 | This method has the following signature. 23 | 24 | .. autoclass:: onnx.ModelProto 25 | :members: SerializeToString 26 | 27 | Every Proto class implements method `SerializeToString`. 28 | Therefore the following code works with any class described 29 | in page :ref:`l-onnx-classes`. 30 | 31 | :: 32 | 33 | with open("proto.pb", "wb") as f: 34 | f.write(proto.SerializeToString()) 35 | 36 | Next example shows how to save a :ref:`l-nodeproto`. 37 | 38 | .. runpython:: 39 | :showcode: 40 | 41 | from onnx import NodeProto 42 | 43 | node = NodeProto() 44 | node.name = "example-type-proto" 45 | node.op_type = "Add" 46 | node.input.extend(["X", "Y"]) 47 | node.output.extend(["Z"]) 48 | 49 | with open("node.pb", "wb") as f: 50 | f.write(node.SerializeToString()) 51 | 52 | Load a model 53 | ++++++++++++ 54 | 55 | Following function only automates the loading of a class 56 | :ref:`l-modelproto`. Next sections shows how to restore 57 | any other proto class. 58 | 59 | .. autofunction:: onnx.load 60 | 61 | :: 62 | 63 | from onnx import load 64 | 65 | onnx_model = load("model.onnx") 66 | 67 | Or: 68 | 69 | :: 70 | 71 | from onnx import load 72 | 73 | with open("model.onnx", "rb") as f: 74 | onnx_model = load(f) 75 | 76 | Next function does the same from a bytes array. 77 | 78 | .. autofunction:: onnx.load_model_from_string 79 | 80 | .. _l-onnx-load-data: 81 | 82 | Load a Proto 83 | ++++++++++++ 84 | 85 | Proto means here any type containing data including a model, a tensor, 86 | a sparse tensor, any class listed in page :ref:`l-onnx-classes`. 87 | The user must know the type of the data he needs to restore 88 | and then call method `ParseFromString`. 89 | :epkg:`protobuf` does not store any information about the class 90 | of the saved data. Therefore, this class must be known before 91 | restoring an object. 92 | 93 | .. autoclass:: onnx.ModelProto 94 | :members: ParseFromString 95 | 96 | Next example shows how to restore a :ref:`l-nodeproto`. 97 | 98 | .. runpython:: 99 | :showcode: 100 | 101 | from onnx import NodeProto 102 | 103 | tp2 = NodeProto() 104 | with open("node.pb", "rb") as f: 105 | content = f.read() 106 | 107 | tp2.ParseFromString(content) 108 | 109 | print(tp2) 110 | 111 | A shortcut exists for :ref:`l-tensorproto`: 112 | 113 | .. autofunction:: onnx.load_tensor_from_string 114 | 115 | Performance 116 | +++++++++++ 117 | 118 | .. toctree:: 119 | 120 | ../../gyexamples/plot_benchmark_onnx_serialize 121 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/shape_inference.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.shape_inference 3 | ==================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | infer_shapes 9 | ++++++++++++ 10 | 11 | .. autofunction:: onnx.shape_inference.infer_shapes 12 | 13 | infer_shapes_path 14 | +++++++++++++++++ 15 | 16 | .. autofunction:: onnx.shape_inference.infer_shapes_path 17 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/tools.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.tools 3 | ========== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | net_drawer 9 | ++++++++++ 10 | 11 | .. autofunction:: onnx.tools.net_drawer.GetPydotGraph 12 | 13 | .. autofunction:: onnx.tools.net_drawer.GetOpNodeProducer 14 | 15 | :: 16 | 17 | from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer 18 | 19 | pydot_graph = GetPydotGraph( 20 | model_onnx.graph, # model_onnx is a ModelProto instance 21 | name=model_onnx.graph.name, 22 | rankdir="TP", 23 | node_producer=GetOpNodeProducer("docstring")) 24 | pydot_graph.write_dot("graph.dot") 25 | 26 | update_inputs_outputs_dims 27 | ++++++++++++++++++++++++++ 28 | 29 | .. autofunction:: onnx.tools.update_model_dims.update_inputs_outputs_dims 30 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/utils.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.utils 3 | ========== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Extractor 9 | +++++++++ 10 | 11 | .. autoclass:: onnx.utils.Extractor 12 | :members: 13 | 14 | extract_model 15 | +++++++++++++ 16 | 17 | .. autofunction:: onnx.utils.extract_model 18 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnx_python/version_converter.rst: -------------------------------------------------------------------------------- 1 | 2 | onnx.version_converter 3 | ====================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | convert_version 9 | +++++++++++++++ 10 | 11 | .. autofunction:: onnx.version_converter.convert_version 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/exceptions.rst: -------------------------------------------------------------------------------- 1 | 2 | Exceptions 3 | ========== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. autoclass:: onnxruntime.capi._pybind_state.EngineError 9 | 10 | .. autoclass:: onnxruntime.capi._pybind_state.EPFail 11 | 12 | .. autoclass:: onnxruntime.capi._pybind_state.Fail 13 | 14 | .. autoclass:: onnxruntime.capi._pybind_state.InvalidArgument 15 | 16 | .. autoclass:: onnxruntime.capi._pybind_state.InvalidGraph 17 | 18 | .. autoclass:: onnxruntime.capi._pybind_state.InvalidProtobuf 19 | 20 | .. autoclass:: onnxruntime.capi._pybind_state.ModelLoaded 21 | 22 | .. autoclass:: onnxruntime.capi._pybind_state.NoModel 23 | 24 | .. autoclass:: onnxruntime.capi._pybind_state.NoSuchFile 25 | 26 | .. autoclass:: onnxruntime.capi._pybind_state.NotImplemented 27 | 28 | .. autoclass:: onnxruntime.capi._pybind_state.RuntimeException 29 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/grad.rst: -------------------------------------------------------------------------------- 1 | 2 | Gradient 3 | ======== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | C++ API 9 | +++++++ 10 | 11 | .. autoclass:: onnxruntime.capi._pybind_state.GradientGraphBuilder 12 | 13 | .. autoclass:: onnxruntime.capi._pybind_state.GradientNodeAttributeDefinition 14 | 15 | .. autoclass:: onnxruntime.capi._pybind_state.GradientNodeDefinition 16 | 17 | .. autofunction:: onnxruntime.capi._pybind_state.register_gradient_definition 18 | 19 | .. autofunction:: onnxruntime.capi._pybind_state.register_aten_op_executor 20 | 21 | .. autofunction:: onnxruntime.capi._pybind_state.register_backward_runner 22 | 23 | .. autofunction:: onnxruntime.capi._pybind_state.register_forward_runner 24 | 25 | Python API 26 | ++++++++++ 27 | 28 | .. autofunction:: onnxruntime.training.experimental.gradient_graph._gradient_graph_tools.export_gradient_graph 29 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/helpers.rst: -------------------------------------------------------------------------------- 1 | 2 | onnxruntime helpers 3 | =================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Frequent functions 9 | ++++++++++++++++++ 10 | 11 | .. autofunction:: onnxruntime.get_device 12 | 13 | .. runpython:: 14 | :showcode: 15 | 16 | import onnxruntime 17 | print(onnxruntime.get_device()) 18 | 19 | .. autofunction:: onnxruntime.get_all_providers 20 | 21 | .. runpython:: 22 | :showcode: 23 | 24 | import pprint 25 | import onnxruntime 26 | pprint.pprint(onnxruntime.get_all_providers()) 27 | 28 | .. autofunction:: onnxruntime.get_available_providers 29 | 30 | .. runpython:: 31 | :showcode: 32 | 33 | import onnxruntime 34 | import pprint 35 | pprint.pprint(onnxruntime.get_available_providers()) 36 | 37 | .. autofunction:: onnxruntime.set_default_logger_severity 38 | 39 | .. autofunction:: onnxruntime.set_seed 40 | 41 | Python Wrapper OrtDevice 42 | ++++++++++++++++++++++++ 43 | 44 | .. autoclass:: onnxruntime.OrtDevice 45 | :members: 46 | :undoc-members: 47 | 48 | C class, OrtDevice or C_OrtDevice 49 | +++++++++++++++++++++++++++++++++ 50 | 51 | .. autoclass:: onnxruntime.capi._pybind_state.OrtDevice 52 | :members: 53 | :undoc-members: 54 | 55 | OrtMemoryInfo 56 | +++++++++++++ 57 | 58 | .. autoclass:: onnxruntime.capi._pybind_state.OrtMemoryInfo 59 | :members: 60 | :undoc-members: 61 | 62 | C classes, frequent types 63 | +++++++++++++++++++++++++ 64 | 65 | .. autoclass:: onnxruntime.capi._pybind_state.ModelMetadata 66 | :members: 67 | :undoc-members: 68 | 69 | .. autoclass:: onnxruntime.capi._pybind_state.OrtMemType 70 | :members: 71 | 72 | Rare functions 73 | ++++++++++++++ 74 | 75 | .. autofunction:: onnxruntime.capi._pybind_state.clear_training_ep_instances 76 | 77 | .. autofunction:: onnxruntime.capi._pybind_state.create_and_register_allocator 78 | 79 | .. autofunction:: onnxruntime.capi._pybind_state.enable_telemetry_events 80 | 81 | .. autofunction:: onnxruntime.capi._pybind_state.disable_telemetry_events 82 | 83 | .. autofunction:: onnxruntime.capi._pybind_state.get_session_initializer 84 | 85 | .. autofunction:: onnxruntime.capi._pybind_state.is_dlpack_uint8_tensor 86 | 87 | .. autofunction:: onnxruntime.capi._pybind_state.unregister_python_functions 88 | 89 | Rare functions for training 90 | +++++++++++++++++++++++++++ 91 | 92 | .. autofunction:: onnxruntime.tools.have_torch 93 | 94 | .. autofunction:: onnxruntime.tools.infer_input_info 95 | 96 | .. autofunction:: onnxruntime.tools.optimize_onnx_model.optimize_model 97 | 98 | .. autofunction:: onnxruntime.tools.pytorch_export_helpers 99 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/index.rst: -------------------------------------------------------------------------------- 1 | 2 | Summary of onnxruntime and onnxruntime-training API 3 | =================================================== 4 | 5 | Module :epkg:`onnxcustom` leverages :epkg:`onnxruntime-training` to train models. 6 | Next sections exposes frequent functions uses to run inference 7 | and training with :epkg:`onnxruntime` and :epkg:`onnxruntime-training`. 8 | 9 | Most of the code in :epkg:`onnxruntime` is written in C++ and exposed 10 | in Python using :epkg:`pybind11`. For inference, the main class 11 | is :epkg:`InferenceSession`. It wraps C class :ref:`l-ort-inference-session-c`. 12 | The python class is easier to use. Both have the same name. 13 | It adds some short overhead but significant on small models 14 | such as a linear regression. 15 | It is recommended to use C classes in that case 16 | (inference, ortvalue, device). 17 | 18 | .. toctree:: 19 | :maxdepth: 1 20 | 21 | helpers 22 | ortvalue 23 | sparse 24 | inference 25 | training 26 | training_partial 27 | training_torch 28 | exceptions 29 | grad 30 | tools 31 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/inference.rst: -------------------------------------------------------------------------------- 1 | 2 | Inference 3 | ========= 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. _l-ort-inference-session: 9 | 10 | Python Wrapper for InferenceSession 11 | +++++++++++++++++++++++++++++++++++ 12 | 13 | .. autoclass:: onnxruntime.InferenceSession 14 | :members: 15 | :inherited-members: 16 | 17 | .. _l-ort-inference-session-c: 18 | 19 | C Class InferenceSession 20 | ++++++++++++++++++++++++ 21 | 22 | .. autoclass:: onnxruntime.capi._pybind_state.InferenceSession 23 | :members: 24 | :undoc-members: 25 | 26 | RunOptions 27 | ++++++++++ 28 | 29 | .. autoclass:: onnxruntime.capi._pybind_state.RunOptions 30 | :members: 31 | :undoc-members: 32 | 33 | SessionOptions 34 | ++++++++++++++ 35 | 36 | .. autoclass:: onnxruntime.capi._pybind_state.SessionOptions 37 | :members: 38 | :undoc-members: 39 | 40 | Python Wrapper for SessionIOBinding 41 | +++++++++++++++++++++++++++++++++++ 42 | 43 | .. autoclass:: onnxruntime.SessionIOBinding 44 | :members: 45 | :undoc-members: 46 | 47 | C Class SessionIOBinding 48 | ++++++++++++++++++++++++ 49 | 50 | .. autoclass:: onnxruntime.capi._pybind_state.SessionIOBinding 51 | :members: 52 | :undoc-members: 53 | 54 | Others classes 55 | ++++++++++++++ 56 | 57 | OrtAllocatorType 58 | ~~~~~~~~~~~~~~~~ 59 | 60 | .. autoclass:: onnxruntime.capi._pybind_state.OrtAllocatorType 61 | :members: 62 | 63 | ExecutionOrder 64 | ~~~~~~~~~~~~~~ 65 | 66 | .. autoclass:: onnxruntime.capi._pybind_state.ExecutionOrder 67 | :members: 68 | 69 | ExecutionMode 70 | ~~~~~~~~~~~~~ 71 | 72 | .. autoclass:: onnxruntime.capi._pybind_state.ExecutionMode 73 | :members: 74 | 75 | GraphInfo 76 | ~~~~~~~~~ 77 | 78 | .. autoclass:: onnxruntime.capi._pybind_state.GraphInfo 79 | :members: 80 | 81 | GraphOptimizationLevel 82 | ~~~~~~~~~~~~~~~~~~~~~~ 83 | 84 | .. autoclass:: onnxruntime.capi._pybind_state.GraphOptimizationLevel 85 | :members: 86 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/ortvalue.rst: -------------------------------------------------------------------------------- 1 | 2 | OrtValue 3 | ======== 4 | 5 | :epkg:`onnxruntime` implements tensors with class :epkg:`OrtValue`. 6 | It has the same properties as a :class:`numpy.array`, shape and type 7 | but only represents a contiguous array. The current implementation 8 | is just a container and does not allow standard operators such as 9 | addition, substraction. :epkg:`onnxruntime` has a C implementation 10 | wrapped into a Python class with the same. 11 | 12 | .. contents:: 13 | :local: 14 | 15 | Python Wrapper for OrtValue 16 | +++++++++++++++++++++++++++ 17 | 18 | .. note:: 19 | Method `ortvalue_from_numpy` does not copy data, it borrows 20 | the data pointer. The numpy array must remain alive while 21 | the instance of OrtValue is in use. 22 | 23 | .. autoclass:: onnxruntime.OrtValue 24 | :members: 25 | :undoc-members: 26 | 27 | C Class OrtValue or C_OrtValue 28 | ++++++++++++++++++++++++++++++ 29 | 30 | .. autoclass:: onnxruntime.capi._pybind_state.OrtValue 31 | :members: 32 | :undoc-members: 33 | 34 | C Class OrtValueVector 35 | ++++++++++++++++++++++ 36 | 37 | .. autoclass:: onnxruntime.capi._pybind_state.OrtValueVector 38 | :members: 39 | :undoc-members: 40 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/sparse.rst: -------------------------------------------------------------------------------- 1 | 2 | ============ 3 | SparseTensor 4 | ============ 5 | 6 | .. contents:: 7 | :local: 8 | 9 | Python Wrapper for C Classes 10 | ============================ 11 | 12 | .. autoclass:: onnxruntime.SparseTensor 13 | :members: 14 | :undoc-members: 15 | 16 | C classes 17 | ========= 18 | 19 | C++ class SparseTensor 20 | ++++++++++++++++++++++ 21 | 22 | .. autoclass:: onnxruntime.capi._pybind_state.SparseTensor 23 | :members: 24 | :undoc-members: 25 | 26 | OrtSparseFormat 27 | +++++++++++++++ 28 | 29 | .. autoclass:: onnxruntime.capi._pybind_state.OrtSparseFormat 30 | :members: 31 | 32 | SparseCooView 33 | +++++++++++++ 34 | 35 | .. autoclass:: onnxruntime.capi._pybind_state.SparseCooView 36 | :members: 37 | :undoc-members: 38 | 39 | SparseCsrView 40 | +++++++++++++ 41 | 42 | .. autoclass:: onnxruntime.capi._pybind_state.SparseCsrView 43 | :members: 44 | :undoc-members: 45 | 46 | SparseBlockSparseView 47 | +++++++++++++++++++++ 48 | 49 | .. autoclass:: onnxruntime.capi._pybind_state.SparseBlockSparseView 50 | :members: 51 | :undoc-members: 52 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/tools.rst: -------------------------------------------------------------------------------- 1 | 2 | ===== 3 | Tools 4 | ===== 5 | 6 | .. contents:: 7 | :local: 8 | 9 | Quantization 10 | ============ 11 | 12 | The main functions. 13 | 14 | .. autofunction:: onnxruntime.quantization.quantize.quantize_dynamic 15 | 16 | .. autofunction:: onnxruntime.quantization.quantize.quantize_static 17 | 18 | .. autofunction:: onnxruntime.quantization.shape_inference.quant_pre_process 19 | 20 | Calibration: 21 | 22 | .. autoclass:: onnxruntime.quantization.calibrate.CalibrationDataReader 23 | :members: 24 | 25 | The parameters. 26 | 27 | .. autoclass:: onnxruntime.quantization.quant_utils.QuantFormat 28 | :members: 29 | 30 | .. autoclass:: onnxruntime.quantization.quant_utils.QuantizationMode 31 | :members: 32 | 33 | .. autoclass:: onnxruntime.quantization.quant_utils.QuantType 34 | :members: 35 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/training.rst: -------------------------------------------------------------------------------- 1 | 2 | Training 3 | ======== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. faqref:: 9 | :title: Differences between onnxruntime and onnxruntime-training 10 | 11 | onnxruntime-training is an extension of onnxruntime 12 | that supports training. Version 1.10 is obtained by compiling 13 | onnxruntime from the sources with different flags. 14 | One example: 15 | 16 | :: 17 | 18 | python ./tools/ci_build/build.py --build_dir ./build/debian \\ 19 | --config Release --build_wheel --numpy_version= \\ 20 | --skip_tests --build_shared_lib --enable_training \\ 21 | --enable_training_ops --enable_training_torch_interop \\ 22 | --parallel 23 | 24 | .. _l-ort-training-session: 25 | 26 | Python Wrapper for TrainingSession 27 | ++++++++++++++++++++++++++++++++++ 28 | 29 | .. autoclass:: onnxruntime.TrainingSession 30 | :members: 31 | :inherited-members: 32 | :undoc-members: 33 | 34 | .. _l-ort-training-session-c: 35 | 36 | C Class TrainingSession 37 | +++++++++++++++++++++++ 38 | 39 | .. autoclass:: onnxruntime.capi._pybind_state.TrainingSession 40 | :members: 41 | :undoc-members: 42 | 43 | TrainingParameters 44 | ++++++++++++++++++ 45 | 46 | .. autoclass:: onnxruntime.capi._pybind_state.TrainingParameters 47 | :members: 48 | :undoc-members: 49 | 50 | GradientGraphBuilder 51 | ++++++++++++++++++++ 52 | 53 | .. autoclass:: onnxruntime.capi._pybind_state.GradientGraphBuilder 54 | :members: 55 | :undoc-members: 56 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/training_partial.rst: -------------------------------------------------------------------------------- 1 | 2 | Partial Training 3 | ================ 4 | 5 | .. contents:: 6 | :local: 7 | 8 | OrtValueCache 9 | +++++++++++++ 10 | 11 | .. autoclass:: onnxruntime.capi._pybind_state.OrtValueCache 12 | :members: 13 | :undoc-members: 14 | 15 | TrainingAgent 16 | +++++++++++++ 17 | 18 | .. autoclass:: onnxruntime.capi._pybind_state.TrainingAgent 19 | :members: 20 | :undoc-members: 21 | 22 | PartialGraphExecutionState 23 | ++++++++++++++++++++++++++ 24 | 25 | .. autoclass:: onnxruntime.capi._pybind_state.PartialGraphExecutionState 26 | :members: 27 | :undoc-members: 28 | 29 | OrtModuleGraphBuilder 30 | +++++++++++++++++++++ 31 | 32 | .. autoclass:: onnxruntime.capi._pybind_state.OrtModuleGraphBuilder 33 | :members: 34 | :undoc-members: 35 | 36 | OrtModuleGraphBuilderConfiguration 37 | ++++++++++++++++++++++++++++++++++ 38 | 39 | .. autoclass:: onnxruntime.capi._pybind_state.OrtModuleGraphBuilderConfiguration 40 | :members: 41 | :undoc-members: 42 | 43 | TrainingGraphTransformerConfiguration 44 | +++++++++++++++++++++++++++++++++++++ 45 | 46 | .. autoclass:: onnxruntime.capi._pybind_state.TrainingGraphTransformerConfiguration 47 | :members: 48 | :undoc-members: 49 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/onnxruntime_python/training_torch.rst: -------------------------------------------------------------------------------- 1 | 2 | Training with onnxruntime and pytorch 3 | ===================================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | .. _l-ortmodule: 9 | 10 | ORTModule 11 | +++++++++ 12 | 13 | .. autoclass:: onnxruntime.training.ortmodule.ORTModule 14 | :members: 15 | :inherited-members: 16 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/plotting.rst: -------------------------------------------------------------------------------- 1 | 2 | Plotting 3 | ======== 4 | 5 | .. autosignature:: onnxcustom.plotting.plotting_onnx.plot_onnxs 6 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/training_utils.rst: -------------------------------------------------------------------------------- 1 | 2 | Training utilities 3 | ================== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | ONNX 9 | ++++ 10 | 11 | .. autosignature:: onnxcustom.utils.onnx_helper.add_initializer 12 | 13 | .. autosignature:: onnxcustom.utils.onnx_helper.dtype_to_var_type 14 | 15 | .. autosignature:: onnxcustom.utils.onnx_helper.get_onnx_opset 16 | 17 | .. autosignature:: onnxcustom.utils.orttraining_helper.get_train_initializer 18 | 19 | .. autosignature:: onnxcustom.utils.onnx_helper.proto_type_to_dtype 20 | 21 | .. autosignature:: onnxcustom.utils.onnx_helper.onnx_rename_weights 22 | 23 | .. autosignature:: onnxcustom.utils.onnx_rewriter.onnx_rewrite_operator 24 | 25 | .. autosignature:: onnxcustom.utils.onnx_helper.replace_initializers_into_onnx 26 | 27 | onnxruntime 28 | +++++++++++ 29 | 30 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.device_to_providers 31 | 32 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.numpy_to_ort_value 33 | 34 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.get_ort_device 35 | 36 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.get_ort_device_type 37 | 38 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.ort_device_to_string 39 | 40 | .. autosignature:: onnxcustom.utils.onnxruntime_helper.provider_to_device 41 | 42 | functions 43 | +++++++++ 44 | 45 | .. autosignature:: onnxcustom.utils.orttraining_helper.add_loss_output 46 | 47 | .. autosignature:: onnxcustom.utils.onnx_function.get_supported_functions 48 | 49 | .. autosignature:: onnxcustom.utils.onnx_function.function_onnx_graph 50 | 51 | .. autosignature:: onnxcustom.utils.orttraining_helper.penalty_loss_onnx 52 | 53 | gradient 54 | ++++++++ 55 | 56 | .. autosignature:: onnxcustom.training.grad_helper.onnx_derivative 57 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/api/utils.rst: -------------------------------------------------------------------------------- 1 | 2 | Utils 3 | ===== 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Analysis, Parsing 9 | +++++++++++++++++ 10 | 11 | .. autosignature:: onnxcustom.utils.nvprof2json.convert_trace_to_json 12 | 13 | .. autosignature:: onnxcustom.utils.nvprof2json.json_to_dataframe 14 | 15 | .. autosignature:: onnxcustom.utils.nvprof2json.json_to_dataframe_streaming 16 | 17 | Labelling 18 | +++++++++ 19 | 20 | .. autosignature:: onnxcustom.utils.imagenet_classes.get_class_names 21 | 22 | Splitting 23 | +++++++++ 24 | 25 | .. autosignature:: onnxcustom.utils.onnx_split.split_onnx 26 | 27 | Time 28 | ++++ 29 | 30 | .. autosignature:: onnxcustom.utils.benchmark.measure_time 31 | 32 | Debugging 33 | +++++++++ 34 | 35 | .. autosignature:: onnxcustom.utils.print_helper.str_ortvalue 36 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/blog/2021/2021-10-13_first.rst: -------------------------------------------------------------------------------- 1 | 2 | .. blogpost:: 3 | :title: First blog 4 | :keywords: first blog 5 | :date: 2021-10-13 6 | :categories: blog 7 | 8 | This package proposes many examples to discover 9 | `onnxruntume `_ or 10 | `onnxruntime-training `_. 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/doc.rst: -------------------------------------------------------------------------------- 1 | 2 | Agility 3 | ======= 4 | 5 | .. contents:: 6 | :local: 7 | :depth: 1 8 | 9 | Examples 10 | ++++++++ 11 | 12 | .. exreflist:: 13 | :contents: 14 | 15 | Ligne de commande 16 | +++++++++++++++++ 17 | 18 | .. contents:: 19 | :local: 20 | 21 | Commande ``check`` 22 | ^^^^^^^^^^^^^^^^^^ 23 | 24 | .. cmdref:: 25 | :title: check 26 | :cmd: -m onnxcustom check --help 27 | 28 | Checks the module works as expected. 29 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/github_link.py: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/scikit-learn/scikit-learn/blob/ 2 | # master/doc/sphinxext/github_link.py 3 | from operator import attrgetter 4 | import inspect 5 | import subprocess 6 | import os 7 | import sys 8 | from functools import partial 9 | 10 | REVISION_CMD = 'git rev-parse --short HEAD' 11 | 12 | 13 | def _get_git_revision(): 14 | try: 15 | revision = subprocess.check_output(REVISION_CMD.split()).strip() 16 | except (subprocess.CalledProcessError, OSError): 17 | print('Failed to execute git to get revision') 18 | return None 19 | return revision.decode('utf-8') 20 | 21 | 22 | def _linkcode_resolve(domain, info, package, url_fmt, revision): 23 | """Determine a link to online source for a class/method/function 24 | This is called by sphinx.ext.linkcode 25 | An example with a long-untouched module that everyone has 26 | >>> _linkcode_resolve('py', {'module': 'tty', 27 | ... 'fullname': 'setraw'}, 28 | ... package='tty', 29 | ... url_fmt='http://hg.python.org/cpython/file/' 30 | ... '{revision}/Lib/{package}/{path}#L{lineno}', 31 | ... revision='xxxx') 32 | 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' 33 | """ 34 | 35 | if revision is None: 36 | return 37 | if domain not in ('py', 'pyx'): 38 | return 39 | if not info.get('module') or not info.get('fullname'): 40 | return 41 | 42 | class_name = info['fullname'].split('.')[0] 43 | module = __import__(info['module'], fromlist=[class_name]) 44 | obj = attrgetter(info['fullname'])(module) 45 | 46 | # Unwrap the object to get the correct source 47 | # file in case that is wrapped by a decorator 48 | obj = inspect.unwrap(obj) 49 | 50 | try: 51 | fn = inspect.getsourcefile(obj) 52 | except Exception: 53 | fn = None 54 | if not fn: 55 | try: 56 | fn = inspect.getsourcefile(sys.modules[obj.__module__]) 57 | except Exception: 58 | fn = None 59 | if not fn: 60 | return 61 | 62 | fn = os.path.relpath(fn, 63 | start=os.path.dirname(__import__(package).__file__)) 64 | try: 65 | lineno = inspect.getsourcelines(obj)[1] 66 | except Exception: 67 | lineno = '' 68 | return url_fmt.format(revision=revision, package=package, 69 | path=fn, lineno=lineno) 70 | 71 | 72 | def make_linkcode_resolve(package, url_fmt): 73 | """Returns a linkcode_resolve function for the given URL format 74 | revision is a git commit reference (hash or name) 75 | package is the name of the root module of the package 76 | url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 77 | 'blob/{revision}/{package}/' 78 | '{path}#L{lineno}') 79 | """ 80 | revision = _get_git_revision() 81 | return partial(_linkcode_resolve, revision=revision, package=package, 82 | url_fmt=url_fmt) 83 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/i_cmd.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-CMD2: 3 | 4 | Command lines 5 | ============= 6 | 7 | .. cmdreflist:: 8 | :contents: 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/i_ex.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-EX2: 3 | 4 | Short examples 5 | ============== 6 | 7 | .. exreflist:: 8 | :contents: 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/i_faq.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-FAQ2: 3 | 4 | FAQ 5 | === 6 | 7 | .. contents:: 8 | :local: 9 | 10 | .. faqreflist:: 11 | :contents: 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/installation.rst: -------------------------------------------------------------------------------- 1 | 2 | Installation 3 | ============ 4 | 5 | The main dependency is :epkg:`onnxruntime-training`. It is only available 6 | on Linux. It is available from pypi for CPU. GPU versions are available 7 | `download.onnxruntime.ai `_. 8 | Its installation replaces *onnxruntime* and includes *onnxruntime* and 9 | *onnxruntime-training*. 10 | 11 | Installation of onnxruntime-training for GPU 12 | ++++++++++++++++++++++++++++++++++++++++++++ 13 | 14 | onnxruntime-training is only available on Linux. The CPU 15 | can be installed with the following instruction. 16 | 17 | :: 18 | 19 | pip install onnxruntime-training --extra-index-url https://download.onnxruntime.ai/onnxruntime_nightly_cpu.html 20 | 21 | Versions using GPU with CUDA or ROCm are available. Check 22 | `download.onnxruntime.ai `_ 23 | to find a specific version. 24 | You can use it on Windows 25 | inside WSL (Windows Linux Subsystem) or compile it for CPU: 26 | 27 | :: 28 | 29 | python tools\ci_build\build.py --skip_tests --build_dir .\build\Windows --config Release --build_shared_lib --build_wheel --numpy_version= --cmake_generator="Visual Studio 16 2019" --enable_training --enable_training_ops --enable_training_torch_interop 30 | 31 | GPU versions work better on WSL, see `Build onnxruntime on WSL (Windows Linux Subsystem) 32 | `_. 33 | 34 | Installation of onnxcustom 35 | ++++++++++++++++++++++++++ 36 | 37 | :: 38 | 39 | pip install onnxcustom 40 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/license.rst: -------------------------------------------------------------------------------- 1 | .. _l-license: 2 | 3 | License 4 | ======= 5 | 6 | .. include:: LICENSE.txt 7 | :literal: 8 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/index.rst: -------------------------------------------------------------------------------- 1 | 2 | Markdown documentation for onnx and onnxruntime 3 | =============================================== 4 | 5 | This section only renders markdown documentation 6 | from :epkg:`onnx` or :epkg:`onnxruntime` using :epkg:`Sphinx` 7 | and :epkg:`myst-parser`. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | index_onnx 13 | index_onnxruntime 14 | index_onnxruntime_gh_pages 15 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/index_onnx.rst: -------------------------------------------------------------------------------- 1 | 2 | ONNX documentation rendered with Sphinx 3 | ======================================= 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Overwiew 9 | ++++++++ 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | onnx_docs/Overview.md 15 | onnx_docs/IR.md 16 | onnx_docs/PythonAPIOverview.md 17 | onnx_docs/OpConventions.md 18 | onnx_docs/DimensionDenotation.md 19 | onnx_docs/Broadcasting.md 20 | onnx_docs/ExternalData.md 21 | onnx_docs/Hub.md 22 | onnx_metadata 23 | onnx_docs/ShapeInference.md 24 | onnx_docs/CIPipelines.md 25 | 26 | Syntax 27 | ++++++ 28 | 29 | .. toctree:: 30 | :maxdepth: 1 31 | 32 | onnx_docs/Syntax.md 33 | 34 | Versions 35 | ++++++++ 36 | 37 | .. toctree:: 38 | :maxdepth: 1 39 | 40 | onnx_docs/Versioning.md 41 | onnx_docs/VersionConverter.md 42 | onnx_docs/Relicensing.md 43 | onnx_releases 44 | 45 | Operators 46 | +++++++++ 47 | 48 | .. toctree:: 49 | :maxdepth: 1 50 | 51 | onnx_operators 52 | onnx_operators_ml 53 | onnx_changelog 54 | onnx_changelog_ml 55 | onnx_test_coverage 56 | onnx_test_coverage_ml 57 | 58 | Contribute 59 | ++++++++++ 60 | 61 | .. toctree:: 62 | :maxdepth: 1 63 | 64 | onnx_contributing 65 | onnx_add_new_op 66 | onnx_docs/ImplementingAnOnnxBackend.md 67 | onnx_docs/OnnxBackendTest.md 68 | onnx_managing 69 | onnx_docs/ONNXIFI.md 70 | onnx_docs/ONNXTypes.md 71 | onnx_docs/TypeAnnotations.md 72 | onnx_docs/TypeDenotation.md 73 | 74 | Training 75 | ++++++++ 76 | 77 | .. toctree:: 78 | :maxdepth: 1 79 | 80 | onnx_docs/DefineDifferentiability.md 81 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/index_onnxruntime.rst: -------------------------------------------------------------------------------- 1 | 2 | onnxruntime markdown documentation rendered with Sphinx 3 | ======================================================= 4 | 5 | The full documentation is available on 6 | `onnxruntime.ai/docs `_, 7 | with the `Python API `_. 8 | The following pages renders the `markdown documentation 9 | `_. 10 | 11 | .. contents:: 12 | :local: 13 | 14 | Overview 15 | ++++++++ 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | onnxruntime_docs/Roadmap.md 21 | onnxruntime_docs/Privacy.md 22 | onnxruntime_docs/Server.md 23 | onnxruntime_docs/ONNX_Runtime_Server_Usage.md 24 | onnxruntime_docs/FAQ.md 25 | onnxruntime_docs/OperatorKernels.md 26 | 27 | Versions 28 | ++++++++ 29 | 30 | .. toctree:: 31 | :maxdepth: 1 32 | 33 | onnxruntime_docs/Versioning.md 34 | onnxruntime_docs/ORT_Format_Update_in_1.13.md 35 | 36 | Contributing 37 | ++++++++++++ 38 | 39 | .. toctree:: 40 | :maxdepth: 1 41 | 42 | onnxruntime_docs/Coding_Conventions_and_Standards.md 43 | onnxruntime_docs/ABI_Dev_Notes.md 44 | onnxruntime_docs/PR_Guidelines.md 45 | onnxruntime_docs/Model_Test.md 46 | onnxruntime_docs/NotesOnThreading.md 47 | onnxruntime_docs/Python_Dev_Notes.md 48 | 49 | C API 50 | +++++ 51 | 52 | .. toctree:: 53 | :maxdepth: 1 54 | 55 | onnxruntime_docs/How_To_Update_ONNX_Dev_Notes.md 56 | onnxruntime_docs/C_API_Guidelines.md 57 | onnxruntime_docs/cmake_guideline.md 58 | onnxruntime_docs/onnxruntime_extensions.md 59 | onnxruntime_docs/ContribOperators.md 60 | 61 | Optimization 62 | ++++++++++++ 63 | 64 | .. toctree:: 65 | :maxdepth: 1 66 | 67 | onnxruntime_docs/Memory_Optimizer.md 68 | 69 | Training 70 | ++++++++ 71 | 72 | .. toctree:: 73 | :maxdepth: 1 74 | 75 | onnxruntime_docs/ORTModule_Training_Guidelines.md 76 | 77 | Others 78 | ++++++ 79 | 80 | .. toctree:: 81 | :maxdepth: 1 82 | 83 | onnxruntime_docs/Android_testing.md 84 | onnxruntime_docs/ORTMobilePackageOperatorTypeSupport.md 85 | onnxruntime_docs/WinML_principles.md 86 | onnxruntime_docs/Reduced_Operator_Kernel_build.md 87 | onnxruntime_docs/ReleaseManagement.md 88 | onnxruntime_docs/TVM_EP.md 89 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/index_onnxruntime_gh_pages.rst: -------------------------------------------------------------------------------- 1 | 2 | onnxruntime (branch gh-pages) markdown documentation rendered with Sphinx 3 | ========================================================================= 4 | 5 | The full documentation is available on 6 | `onnxruntime.ai/docs `_, 7 | with the `Python API `_. 8 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_add_new_op.rst: -------------------------------------------------------------------------------- 1 | 2 | Adding a new operator 3 | ===================== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/AddNewOp.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_changelog.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-md-change-logs: 3 | 4 | Change Logs 5 | =========== 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | onnx_docs/Changelog.md 11 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_changelog_ml.rst: -------------------------------------------------------------------------------- 1 | 2 | ML Change Logs 3 | ============== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/Changelog-ml.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_contributing.rst: -------------------------------------------------------------------------------- 1 | 2 | Contributing 3 | ============ 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/CONTRIBUTING.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/Broadcasting.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Broadcasting in ONNX 4 | 5 | In ONNX, element-wise operators can take inputs with different shape, 6 | as long as the input tensors are broadcastable to the same shape. 7 | ONNX supports two types of broadcasting: multidirectional broadcasting and 8 | unidirectional broadcasting. We will introduce these two types of broadcasting 9 | respectively in the following sections. 10 | 11 | ## Multidirectional Broadcasting 12 | 13 | In ONNX, a set of tensors are multidirectional broadcastable to the same shape 14 | if one of the following is true: 15 | - The tensors all have exactly the same shape. 16 | - The tensors all have the same number of dimensions and the length of 17 | each dimensions is either a common length or 1. 18 | - The tensors that have too few dimensions can have their shapes prepended 19 | with a dimension of length 1 to satisfy property 2. 20 | 21 | For example, the following tensor shapes are supported by multidirectional broadcasting: 22 | 23 | - shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar ==> shape(result) = (2, 3, 4, 5) 24 | - shape(A) = (2, 3, 4, 5), shape(B) = (5,), ==> shape(result) = (2, 3, 4, 5) 25 | - shape(A) = (4, 5), shape(B) = (2, 3, 4, 5), ==> shape(result) = (2, 3, 4, 5) 26 | - shape(A) = (1, 4, 5), shape(B) = (2, 3, 1, 1), ==> shape(result) = (2, 3, 4, 5) 27 | - shape(A) = (3, 4, 5), shape(B) = (2, 1, 1, 1), ==> shape(result) = (2, 3, 4, 5) 28 | 29 | Multidirectional broadcasting is the same as [Numpy's broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html#general-broadcasting-rules). 30 | 31 | Multidirectional broadcasting is supported by the following operators in ONNX: 32 | - [Add](Operators.md#Add) 33 | - [And](Operators.md#And) 34 | - [Div](Operators.md#Div) 35 | - [Equal](Operators.md#Equal) 36 | - [Greater](Operators.md#Greater) 37 | - [Less](Operators.md#Less) 38 | - [Max](Operators.md#Max) 39 | - [Mean](Operators.md#Mean) 40 | - [Min](Operators.md#Min) 41 | - [Mul](Operators.md#Mul) 42 | - [Or](Operators.md#Or) 43 | - [Pow](Operators.md#Pow) 44 | - [Sub](Operators.md#Sub) 45 | - [Sum](Operators.md#Sum) 46 | - [Where](Operators.md#Where) 47 | - [Xor](Operators.md#Xor) 48 | 49 | ## Unidirectional Broadcasting 50 | 51 | In ONNX, tensor B is unidirectional broadcastable to tensor A 52 | if one of the following is true: 53 | - Tensor A and B both have exactly the same shape. 54 | - Tensor A and B all have the same number of dimensions and the length of 55 | each dimensions is either a common length or B's length is 1. 56 | - Tensor B has too few dimensions, and B can have its shapes prepended 57 | with a dimension of length 1 to satisfy property 2. 58 | 59 | When unidirectional broadcasting happens, the output's shape is the same as 60 | the shape of A (i.e., the larger shape of two input tensors). 61 | 62 | In the following examples, tensor B is unidirectional broadcastable to tensor A: 63 | 64 | - shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar ==> shape(result) = (2, 3, 4, 5) 65 | - shape(A) = (2, 3, 4, 5), shape(B) = (5,), ==> shape(result) = (2, 3, 4, 5) 66 | - shape(A) = (2, 3, 4, 5), shape(B) = (2, 1, 1, 5), ==> shape(result) = (2, 3, 4, 5) 67 | - shape(A) = (2, 3, 4, 5), shape(B) = (1, 3, 1, 5), ==> shape(result) = (2, 3, 4, 5) 68 | 69 | Unidirectional broadcasting is supported by the following operators in ONNX: 70 | - [Gemm](Operators.md#Gemm) 71 | - [PRelu](Operators.md#PRelu) 72 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/ManagingExperimentalOps.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Deprecated Experimental Operators 4 | 5 | The following experimental operators were deprecated and removed from ONNX. They should be removed from models, either substituted with newer superseding operators or decomposed into functionally equivalent operators: 6 | 7 | Old operator |New Operator 8 | --------------------|-------------------------- 9 | `ATen` |NA 10 | `Affine` |`Add(Mul(X, alpha), beta)` 11 | `ConstantFill` |`ConstantOfShape` 12 | `Crop` |`Slice-1` 13 | `DynamicSlice` |`Slice-10` 14 | `GRUUnit` |NA 15 | `GivenTensorFill` |`Const` or `ConstantOfShape` 16 | `ImageScaler` |`Add(Mul(X, scale), Unsqueeze(bias, axes=[0, 2, 3]))` 17 | `ParametricSoftplus`|`Mul(alpha, Softplus(Mul(beta, X)))` 18 | `Scale` |`Mul(X, scale)` 19 | `ScaledTanh` |`Mul(Tanh(Mul(X, beta)), alpha)` 20 | 21 | ## Adding Experimental Operators [Deprecated - as of v1.5 experimental ops are no longer supported] 22 | 23 | The experimental flag in ONNX operator definitions indicates that a customer of ONNX may not be able to take a long term dependency on that op. Ops in the ONNX namespace (ai.onnx) in the _main_ branch, whether experimental or not, go through the regular review process. 24 | 25 | Experimental ops that are being worked on that do not have consensus yet can be managed in one of 2 ways: 26 | 1. Use a fork or branch – what you do in the fork or branch is entirely up to you. When you are ready, you can submit a PR using the normal process. This is the recommended way. 27 | 2. If a fork/branch is not workable (for example due to complexity of mapping different branches between multiple repos), put the experimental ops in a custom namespace in the main branch. 28 | The specific process for this is: 29 | * Submit an Issue with a proposal explaining the motivation and plan. It does not need to include detailed technical design. Issues will be tagged as "experimental op". 30 | * Reviewers will generally approve by default unless the proposal directly conflicts with existing ops or somehow goes against general ONNX strategy. Approval is indicated by adding the "experiment approved" tag. 31 | * The approval is good for 3 months, but can be renewed if needed. 32 | * Experimental ops should be submitted in a PR in a custom namespace that is the name of the proposal, i.e. “proposal.controlflow”. The name should be descriptive rather than a company or entity name. These PRs will be approved by default as long as the parent proposal is approved and active. 33 | * Once experimentation is done, the ops can be submitted for addition to the ONNX namespace via the regular process. The owner can also choose to end the experiment without promoting the ops. 34 | * Either way, the custom namespace is deleted once experimentation is complete or when the approval expires. 35 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/MetadataProps.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Metadata 4 | # Metadata 5 | 6 | In addition to the core metadata recommendations listed in the [extensibility documentation](IR.md#optional-metadata) there is additional experimental metadata to help provide information for model inputs and outputs. 7 | 8 | This metadata applies to all input and output tensors of a given category. The first such category we define is: `Image`. 9 | 10 | ## Motivation 11 | 12 | The motivation of such a mechanism is to allow model authors to convey to model consumers enough information for them to consume the model. 13 | 14 | In the case of images there are many option for providing valid image data. However a model which consumes images was trained with a particular set of these options which must 15 | be used during inferencing. 16 | 17 | The goal is this proposal is to provide enough metadata that the model consumer can perform their own featurization prior to running the model and provide a compatible input or retrieve an output and know what its format is. 18 | 19 | ## Image Category Definition 20 | 21 | For every tensor in this model that uses [Type Denotation](TypeDenotation.md) to declare itself an `IMAGE`, you SHOULD provide metadata to assist the model consumer. Note that any metadata provided using this mechanism is global to ALL types 22 | with the accompanying denotation. 23 | 24 | Keys and values are case insenstive. 25 | 26 | Specifically, we define here the following set image metadata: 27 | 28 | |Key|Value|Description| 29 | |-----|----|-----------| 30 | |`Image.BitmapPixelFormat`|__string__|Specifies the format of pixel data. Each enumeration value defines a channel ordering and bit depth. Possible values:
  • `Gray8`: 1 channel image, the pixel data is 8 bpp grayscale.
  • `Rgb8`: 3 channel image, channel order is RGB, pixel data is 8bpp (No alpha)
  • `Bgr8`: 3 channel image, channel order is BGR, pixel data is 8bpp (No alpha)
  • `Rgba8`: 4 channel image, channel order is RGBA, pixel data is 8bpp (Straight alpha)
  • `Bgra8`: 4 channel image, channel order is BGRA, pixel data is 8bpp (Straight alpha)
| 31 | |`Image.ColorSpaceGamma`|__string__|Specifies the gamma color space used. Possible values:
  • `Linear`: Linear color space, gamma == 1.0
  • `SRGB`: sRGB color space, gamma == 2.2
| 32 | |`Image.NominalPixelRange`|__string__|Specifies the range that pixel values are stored. Possible values:
  • `NominalRange_0_255`: [0...255] for 8bpp samples
  • `Normalized_0_1`: [0...1] pixel data is stored normalized
  • `Normalized_1_1`: [-1...1] pixel data is stored normalized
  • `NominalRange_16_235`: [16...235] for 8bpp samples
| 33 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/ONNXTypes.md: -------------------------------------------------------------------------------- 1 | ## Optional Type 2 | 3 | An optional type represents a reference to either an element (could be Tensor, Sequence, Map, or Sparse Tensor) or a null value. The optional type appears in model inputs, outputs, as well as intermediate values. 4 | 5 | ### Use-cases 6 | 7 | Optional type enables users to represent more dynamic typing senarios in ONNX. Similar to Optional[X] type hint in Python typing which is equivalent to Union[None, X], Optional types in ONNX may reference a single element, or null. 8 | 9 | ### Examples in PyTorch 10 | Optional type only appears in TorchScript graphs generated by jit script compiler. Scripting a model captures dynamic types where an optional value can be assigned either None or a value. 11 | 12 | - Example 1 13 | 14 | class Model(torch.nn.Module): 15 | def forward(self, x, y:Optional[Tensor]=None): 16 | if y is not None: 17 | return x + y 18 | return x 19 | 20 | Corresponding TorchScript graph: 21 | 22 | Graph( 23 | %self : __torch__.Model, 24 | %x.1 : Tensor, 25 | %y.1 : Tensor? 26 | ): 27 | %11 : int = prim::Constant[value=1]() 28 | %4 : None = prim::Constant() 29 | %5 : bool = aten::__isnot__(%y.1, %4) 30 | %6 : Tensor = prim::If(%5) 31 | block0(): 32 | %y.4 : Tensor = prim::unchecked_cast(%y.1) 33 | %12 : Tensor = aten::add(%x.1, %y.4, %11) 34 | -> (%12) 35 | block1(): 36 | -> (%x.1) 37 | return (%6) 38 | 39 | ONNX graph: 40 | 41 | Graph( 42 | %x.1 : Float(2, 3), 43 | %y.1 : Float(2, 3) 44 | ): 45 | %2 : Bool(1) = onnx::OptionalHasElement(%y.1) 46 | %5 : Float(2, 3) = onnx::If(%2) 47 | block0(): 48 | %3 : Float(2, 3) = onnx::OptionalGetElement(%y.1) 49 | %4 : Float(2, 3) = onnx::Add(%x.1, %3) 50 | -> (%4) 51 | block1(): 52 | %x.2 : Float(2, 3) = onnx::Identity(%x.1) 53 | -> (%x.2) 54 | return (%5) 55 | 56 | - Example 2 57 | 58 | class Model(torch.nn.Module): 59 | def forward( 60 | self, 61 | src_tokens, 62 | return_all_hiddens=torch.tensor([False]), 63 | ): 64 | encoder_states: Optional[Tensor] = None 65 | if return_all_hiddens: 66 | encoder_states = src_tokens 67 | 68 | return src_tokens, encoder_states 69 | 70 | Corresponding TorchScript graph: 71 | 72 | Graph( 73 | %src_tokens.1 : Float(3, 2, 4,), 74 | %return_all_hiddens.1 : Bool(1) 75 | ): 76 | %3 : None = prim::Constant() 77 | %encoder_states : Tensor? = prim::If(%return_all_hiddens.1) 78 | block0(): 79 | -> (%src_tokens.1) 80 | block1(): 81 | -> (%3) 82 | return (%src_tokens.1, %encoder_states) 83 | 84 | ONNX graph: 85 | 86 | Graph( 87 | %src_tokens.1 : Float(3, 2, 4), 88 | %return_all_hiddens.1 : Bool(1) 89 | ): 90 | %2 : Float(3, 2, 4) = onnx::Optional[type=tensor(float)]() 91 | %3 : Float(3, 2, 4) = onnx::If(%return_all_hiddens.1) 92 | block0(): 93 | -> (%src_tokens.1) 94 | block1(): 95 | -> (%2) 96 | return (%3) 97 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/ONNX_logo_main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnx_docs/ONNX_logo_main.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/OnnxBackendTest.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### ONNX Backend Test 4 | 5 | #### What is ONNX Backend Test 6 | 7 | ONNX Backend Test is a test suite that each ONNX backend should run to verify whether it fulfills ONNX's standard. It serves both as a verification tool for backend implementations and one of the two ways to define each operator's expected behavior (the other way is to add it to the documentation). 8 | 9 | There are two types of tests in this suite – Node Tests and Model Tests: 10 | 11 | - **Node Tests** verify whether a backend is performing the correct computation, having the expected behavior of handling various attributes for each individual operator. In each test case, the backend will be given a node with some input, and the returned output will be compared with an expected output. 12 | - **Model Tests** verify the backend at the model level. The test cases are similar to those of Node Tests', but instead of a node, the backend will be given an ONNX model. 13 | 14 | #### Contributing 15 | 16 | As ONNX aims to become the spec of deep learning models format, it's important to ensure that there is no ambiguity in each ONNX operator's definition; adding more test cases is the only way to enforce this. 17 | 18 | Node Tests are created as Python/Numpy code in [onnx/backend/test/case/node](/onnx/backend/test/case/node), and then exported to protobuf files to [onnx/backend/test/data/node](/onnx/backend/test/data/node) as the source of truth by invoking the shell command `backend-test-tools generate-data`. Test cases of each operator lives in one standalone file, e.g. for the operator [Add](/docs/Operators.md#Add), its test cases are in [add.py](/onnx/backend/test/case/node/add.py), and each `expect(...)` statement in the code corresponds to one test case. The source code of all `export.*` functions will be also embedded as example code snippets in the [Operators documentation page](/docs/Operators.md). You are contributing to both the test and the documentation! 19 | 20 | For Model Tests, since each model protobuf file can be large in size, we don't place the file directly in the repo. Rather, we upload them to the cloud, and download them on demand when running the tests. Each test case consists of one model definition protobuf file, and several pairs of input and output files. Adding a new test case involves some manual work from admins (like uploading the files to the cloud), so if you have an ONNX model that you would like to contribute, please contact us. 21 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/OpConventions.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Operator Conventions 4 | 5 | To maintain consistency in operator signatures, we use the following principles: 6 | - All attribute names should be lower case and use underscores when it helps with readability 7 | - Any input/output represented by a single letter is capitalized (i.e. X) 8 | - Any input/output represented by a full word or multiple words is all lower case and uses underscores when it helps with readability 9 | - Any input/output representing a bias tensor will utilize the name "B" 10 | - Any input/output representing a weight tensor will utilize the name “W” 11 | - “axes” is used when an input, output or attribute is representing multiple axes 12 | - “axis” is used when an input, output or attribute is representing a single axis 13 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/Overview.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | Overview 4 | ======== 5 | 6 | Deep learning with neural networks is accomplished through computation over dataflow graphs. Some frameworks (such as CNTK, Caffe2, Theano, and TensorFlow) make use of static graphs, while others (such as PyTorch and Chainer) use dynamic graphs. However, they all provide interfaces that make it simple for developers to construct computation graphs and runtimes that process the graphs in an optimized way. The graph serves as an Intermediate Representation (IR) that captures the specific intent of the developer's source code, and is conducive for optimization and translation to run on specific devices (CPU, GPU, FPGA, etc.). 7 | 8 | Why a common IR? 9 | ---------------- 10 | 11 | Today, each framework has its own proprietary representation of the graph, though they all provide similar capabilities – meaning each framework is a siloed stack of API, graph, and runtime. Furthermore, frameworks are typically optimized for some characteristic, such as fast training, supporting complicated network architectures, inference on mobile devices, etc. It's up to the developer to select a framework that is optimized for one of these characteristics. Additionally, these optimizations may be better suited for particular stages of development. This leads to significant delays between research and production due to the necessity of conversion. 12 | 13 | With the goal of democratizing AI, we envision empowering developers to select the framework that works best for their project, at any stage of development or deployment. The Open Neural Network Exchange (ONNX) format is a common IR to help establish this powerful ecosystem. 14 | 15 | By providing a common representation of the computation graph, ONNX helps developers choose the right framework for their task, allows authors to focus on innovative enhancements, and enables hardware vendors to streamline optimizations for their platforms. 16 | 17 | ONNX is designed to be an open format. We welcome contributions from the community and encourage everyone to adopt ONNX in their ecosystem. 18 | 19 | Why two variants? 20 | ----------------- 21 | 22 | The base definition of ONNX includes the necessary support for machine learning algorithms based on neural network technologies. ONNX-ML includes additional types and standard operators commonly used in classical machine learning algorithms. The two variants were created in order to explicitly recognize the desire for some frameworks to go beyond neural network algorithms in a standardized fashion, while allowing other frameworks to support only neural networks. 23 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/README.txt: -------------------------------------------------------------------------------- 1 | Copied from 2 | https://github.com/onnx/onnx/tree/master/docs -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/Relicensing.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Relicensing MIT to Apache-2.0 4 | 5 | The following copyright holders agree that all of their contributions originally submitted to this project under the MIT license are hereby relicensed to Apache-2.0, and are submitted pursuant to the Developer Certificate of Origin, version 1.1: 6 | 7 | Intel Corporation 8 | Microsoft Corporation 9 | NVIDIA Corporation 10 | IBM Corporation 11 | Facebook Inc. 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/Syntax.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | Overview 4 | ======== 5 | 6 | This document describes a textual syntax for ONNX models, which is currently an experimental feature. 7 | The syntax enables a compact and readable representation of ONNX models. It is motivated by a couple 8 | of use-cases. One is to enable compact description of test-cases and its use in CI (both in the ONNX 9 | repo as well as in other dependent repos such as ONNX-MLIR). The second is to help simplify the 10 | definition of ONNX functions. Several of the existing function-definitions are verbose, and the 11 | use of this syntax will lead to more compact, readable, and easier-to-maintain function definitions. 12 | Efficient representation and efficient parsing of very large tensor-constants is *not* a goal. 13 | Alternative methods should be used for that. 14 | 15 | The API 16 | ------- 17 | 18 | The key parser methods are the ```OnnxParser::Parse``` methods, used as below. 19 | 20 | ```cpp 21 | const char* code = R"ONNX( 22 | < 23 | ir_version: 7, 24 | opset_import: [ "" : 10 ] 25 | > 26 | agraph (float[N, 128] X, float[128, 10] W, float[10] B) => (float[N, 10] C) 27 | { 28 | T = MatMul(X, W) 29 | S = Add(T, B) 30 | C = Softmax(S) 31 | } 32 | )ONNX"; 33 | 34 | ModelProto model; 35 | OnnxParser::Parse(model, code); 36 | 37 | checker::check_model(model); 38 | ``` 39 | 40 | See the [test-cases](../onnx/test/cpp/parser_test.cc) for more examples illustrating the API and syntax. 41 | 42 | The Syntax 43 | ---------- 44 | 45 | The grammar below describes the syntax: 46 | 47 | ``` 48 | id-list ::= id (',' id)* 49 | tensor-dim ::= '?' | id | int-constant 50 | tensor-dims ::= tensor-dim (',' tensor-dim)* 51 | tensor-type ::= prim-type | prim-type '[' ']' | prim-type '[' tensor-dims ']' 52 | type ::= tensor-type | 'seq' '(' type ')' | 'map' '(' prim-type ',' type ')' 53 | | 'optional' '(' type ')' | 'sparse_tensor' '(' tensor-type ')' 54 | value-info ::= type id 55 | value-infos ::= value-info (',' value-info)* 56 | value-info-list ::= '(' value-infos? ')' 57 | prim-constants ::= prim-constant (',' prim-constant)* 58 | tensor-constant ::= tensor-type (id)? ('=')? '{' prim-constants '}' 59 | attr-ref ::= '@' id 60 | single-attr-value ::= tensor-constant | graph | prim-constant | attr-ref 61 | attr-value-list ::= '[' single-attr-value (',' single-attr-value)* ']' 62 | attr-value ::= single-attr-value | attr-value-list 63 | attr-type ::= ':' id 64 | attr ::= id attr-type? '=' attr-value 65 | attr-list ::= '<' attr (',' attr)* '>' 66 | node ::= id-list? '=' qualified-id attr-list? '(' id-list? ')' 67 | | id-list? '=' qualified-id '(' id-list? ')' attr-list 68 | node-list ::= '{' node* '}' 69 | graph ::= id value-info-list '=>' value-info-list node-list 70 | other-data ::= id ':' value 71 | other-data-list ::= '<' other-data (',' other-data)* '>' 72 | fun-attr-list ::= '<' id-list '>' 73 | fun-input-list ::= '(' id-list ')' 74 | fun-output-list ::= '(' id-list ')' 75 | function ::= other-data-list? id fun-attr-list? fun-input-list '=>' fun-output-list node-list 76 | model ::= other-data-list? graph function* 77 | ``` 78 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/TestCoverage-ml.md: -------------------------------------------------------------------------------- 1 | 2 | # Test Coverage Report (ONNX-ML Operators) 3 | ## Outlines 4 | * [Node Test Coverage](#node-test-coverage) 5 | * [Model Test Coverage](#model-test-coverage) 6 | * [Overall Test Coverage](#overall-test-coverage) 7 | # Node Test Coverage 8 | ## Summary 9 | Node tests have covered 0/18 (0.00%, 0 generators excluded) common operators. 10 | 11 | Node tests have covered 0/0 (N/A) experimental operators. 12 | 13 | * [Covered Common Operators](#covered-common-operators) 14 | * [No Cover Common Operators](#no-cover-common-operators) 15 | * [Covered Experimental Operators](#covered-experimental-operators) 16 | * [No Cover Experimental Operators](#no-cover-experimental-operators) 17 | 18 | ## 💚Covered Common Operators 19 |
20 | 21 | ## 💔No Cover Common Operators 22 | ### ArrayFeatureExtractor (call for test cases) 23 | 24 | ### Binarizer (call for test cases) 25 | 26 | ### CastMap (call for test cases) 27 | 28 | ### CategoryMapper (call for test cases) 29 | 30 | ### DictVectorizer (call for test cases) 31 | 32 | ### FeatureVectorizer (call for test cases) 33 | 34 | ### Imputer (call for test cases) 35 | 36 | ### LabelEncoder (call for test cases) 37 | 38 | ### LinearClassifier (call for test cases) 39 | 40 | ### LinearRegressor (call for test cases) 41 | 42 | ### Normalizer (call for test cases) 43 | 44 | ### OneHotEncoder (call for test cases) 45 | 46 | ### SVMClassifier (call for test cases) 47 | 48 | ### SVMRegressor (call for test cases) 49 | 50 | ### Scaler (call for test cases) 51 | 52 | ### TreeEnsembleClassifier (call for test cases) 53 | 54 | ### TreeEnsembleRegressor (call for test cases) 55 | 56 | ### ZipMap (call for test cases) 57 | 58 |
59 | 60 | ## 💚Covered Experimental Operators 61 |
62 | 63 | ## 💔No Cover Experimental Operators 64 |
65 | 66 | # Model Test Coverage 67 | No model tests present for selected domain 68 | # Overall Test Coverage 69 | ## To be filled. 70 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/TypeDenotation.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Type Denotation 4 | 5 | Type Denotation is used to describe semantic information around what the inputs and outputs are. It is stored on the TypeProto message. 6 | 7 | ## Motivation 8 | 9 | The motivation of such a mechanism can be illustrated via a simple example. In the neural network SqueezeNet, it takes in an NCHW image input float[1,3,244,244] and produces a output float[1,1000,1,1]: 10 | 11 | ``` 12 | input_in_NCHW -> data_0 -> SqueezeNet() -> output_softmaxout_1 13 | ``` 14 | 15 | In order to run this model the user needs a lot of information. In this case the user needs to know: 16 | * the input is an image 17 | * the image is in the format of NCHW 18 | * the color channels are in the order of bgr 19 | * the pixel data is 8 bit 20 | * the pixel data is normalized as values 0-255 21 | 22 | This proposal consists of three key components to provide all of this information: 23 | * Type Denotation, 24 | * [Dimension Denotation](DimensionDenotation.md), 25 | * [Model Metadata](MetadataProps.md). 26 | 27 | ## Type Denotation Definition 28 | 29 | To begin with, we define a set of semantic types that define what models generally consume as inputs and produce as outputs. 30 | 31 | Specifically, in our first proposal we define the following set of standard denotations: 32 | 33 | 0. `TENSOR` describes that a type holds a generic tensor using the standard TypeProto message. 34 | 1. `IMAGE` describes that a type holds an image. You can use dimension denotation to learn more about the layout of the image, and also the optional model metadata_props. 35 | 2. `AUDIO` describes that a type holds an audio clip. 36 | 3. `TEXT` describes that a type holds a block of text. 37 | 38 | Model authors SHOULD add type denotation to inputs and outputs for the model as appropriate. 39 | 40 | ## An Example with input IMAGE 41 | 42 | Let's use the same SqueezeNet example from above and show everything to properly annotate the model: 43 | 44 | * First set the TypeProto.denotation =`IMAGE` for the ValueInfoProto `data_0` 45 | * Because it's an image, the model consumer now knows to go look for image metadata on the model 46 | * Then include 3 metadata strings on ModelProto.metadata_props 47 | * `Image.BitmapPixelFormat` = `Bgr8` 48 | * `Image.ColorSpaceGamma` = `SRGB` 49 | * `Image.NominalPixelRange` = `NominalRange_0_255` 50 | * For that same ValueInfoProto, make sure to also use Dimension Denotations to denote NCHW 51 | * TensorShapeProto.Dimension[0].denotation = `DATA_BATCH` 52 | * TensorShapeProto.Dimension[1].denotation = `DATA_CHANNEL` 53 | * TensorShapeProto.Dimension[2].denotation = `DATA_FEATURE` 54 | * TensorShapeProto.Dimension[3].denotation = `DATA_FEATURE` 55 | 56 | Now there is enough information in the model to know everything about how to pass a correct image into the model. 57 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/VersionConverter.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # ONNX Version Converter 4 | 5 | ONNX provides a library for converting ONNX models between different 6 | opset versions. The primary motivation is to improve backwards compatibility of ONNX 7 | models without having to strengthen the spec for ONNX backends. This 8 | allows backend developers to offer support for a particular opset version 9 | and for users to write or export models to a particular opset version but 10 | run in an environment with a different opset version. Implementation wise, the library leverages the in-memory representation that is much more convenient to manipulate than the raw protobuf structs, and converters to and from the protobuf format which were developed for the ONNX Optimizer. 11 | 12 | You may be interested in invoking the provided op-specific adapters, or in 13 | implementing new ones (or both). Default adapters only work in the default 14 | domain, but can be generalized to work cross-domain or utilizing new 15 | conversion methods, dependent on the nature of relevant breaking changes. 16 | 17 | ## Invoking The Version Converter 18 | 19 | The version converter may be invoked either via C++ or Python. 20 | 21 | The Python API 22 | is described, with example, 23 | [here](PythonAPIOverview.md#converting-version-of-an-onnx-model-within-default-domain-aionnx). 24 | 25 | The C++ API consists of a single function 26 | 27 | ``` 28 | ModelProto ConvertVersion( 29 | const ModelProto& mp_in, 30 | const OpSetID& initial_version, 31 | const OpSetID& target_version); 32 | ``` 33 | 34 | which accepts an input `ModelProto`, the initial opset version of the model, 35 | and the target opset version, and which returns a new `ModelProto` which 36 | is the result of apply all relevant adapters between initial_version and 37 | target_version. For a list of available passes, see 38 | [convert.h](/onnx/version_converter/convert.h). 39 | 40 | ## Implementing Adapters 41 | 42 | You can implement a new adapter by subclassing `Adapter`, and registering 43 | your new adapter with `VersionConverter::registerAdapter()`. Adapters operate 44 | on an in-memory graph representation defined in [ir.h](/onnx/common/ir.h). 45 | There are a number of examples in the [adapters](/onnx/version_converter/adapters) 46 | directory. Please ensure that all adapters convert from opset version i to i + 1 47 | or i - 1, i.e. from Version 6 to Version 5 or vice versa, even if the 2 versions 48 | being converted between are Version 1 and Version 6. 49 | 50 | If your adapter applies in the default domain, please consider adding it 51 | to the core ONNX repository 52 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/onnx-horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnx_docs/onnx-horizontal-color.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_docs/proposals/FunctionsProposal.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Proposal Adding Function into ONNX 4 | 5 | Motivation: 6 | 1. Reduce number of primitive operators in ONNX 7 | To make it easier for hardware vendors to follow ONNX, we want to make it possible to define composite operators in terms of more primitive operators, reducing the number of kernels which must be directly implemented. For example, FC should be declared to be a composition MatMul and Add. 8 | 9 | 2. Expose customize function capability for graph optimization. 10 | To provide a mechanism of doing graph optimization, say, kernel fusion (merge a subgraph into one node with generated efficient kernel codes). This will in turn help HW acceleration, since common-patterns of kernel fusion may be pre-defined as common functions in ONNX and no sub-graph (function) finding needed for kernel fusion anymore. For example, subgraph having "Add", "Sigmoid", "Tanh", "Mul" nodes could be merged into one fusion node with generated cuda kernel containing "+", "sigmoidf", "tanhf", "*". 11 | 12 | 3. Provide a flexible RNN implementation. 13 | To define a library of RNN cells and allow the user to write a custom one. 14 | 15 | MAJOR CHANGES: 16 | 1. FunctionProto added to represent a function. 17 | 2. FunctionSetProto added to represent a function set. 18 | 3. AttributeProto updated to support function attribute type and allow attribute reference. 19 | 4. ModelProto updated to contain customized function set. 20 | 21 | Prototype details can be found [here](https://github.com/linkerzhang/onnx/blob/kezhan/add_function_private/onnx/onnx.in.proto) 22 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_managing.rst: -------------------------------------------------------------------------------- 1 | 2 | Onnx Releases 3 | ============= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/ManagingExperimentalOps.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_metadata.rst: -------------------------------------------------------------------------------- 1 | 2 | Metatdata 3 | ========= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/MetadataProps.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_operators.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-onnx-operators: 3 | 4 | ONNX Operators 5 | ============== 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | onnx_docs/Operators.md 11 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_operators_ml.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-onnx-operators-ml: 3 | 4 | ONNX ML Operators 5 | ================= 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | onnx_docs/Operators-ml.md 11 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_releases.rst: -------------------------------------------------------------------------------- 1 | 2 | Onnx Releases 3 | ============= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/OnnxReleases.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_test_coverage.rst: -------------------------------------------------------------------------------- 1 | 2 | Test Coverage (Operators) 3 | ========================= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/TestCoverage.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnx_test_coverage_ml.rst: -------------------------------------------------------------------------------- 1 | 2 | Test Coverage (ML Operators) 3 | ============================ 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | onnx_docs/TestCoverage-ml.md 9 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/ABI_Dev_Notes.md: -------------------------------------------------------------------------------- 1 | ## Global Variables 2 | Global variables may get constructed or destructed inside "DllMain". There are significant limits on what you can safely do in a DLL entry point. See ['DLL General Best Practices'](https://docs.microsoft.com/en-us/windows/desktop/dlls/dynamic-link-library-best-practices). For example, you can't put a ONNX Runtime InferenceSession into a global variable because it has a thread pool inside. 3 | 4 | ## Thread Local variables 5 | Onnxruntime must support explicit linking, where the operating system loads the DLL on demand at runtime, instead of process startup time. This is required by our language bindings like C#/Java. 6 | 7 | However, there are some special restrictions on this, If a thread local variable need non-trivial construction, for the threads already exist before onnxruntime.dll is loaded, the variable won't get initialized correctly. So it's better to only access such variables from onnxruntime internal threads, or make these variables function local (Like the magic statics). 8 | 9 | ## No undefined symbols 10 | On Windows, you can't build a DLL with undefined symbols. Every symbol must be get resolved at link time. On Linux, you can. 11 | In order to simplify things, we require every symbol must get resolved at link time. The same rule applies for all the platforms. And this is easier for us to control symbol visibility. 12 | 13 | ## Default visibility and how to export a symbol 14 | On Linux, by default, at linker's view, every symbol is global. It's easy to use but it's also much easier to cause conflicts and core dumps. We have encountered too many such problems in ONNX python binding. Indeed, if you have a well design, for each shared lib, you only need to export **one** function. ONNX Runtime python binding is a good example. See [pybind11 FAQ](https://github.com/pybind/pybind11/blob/master/docs/faq.rst#someclass-declared-with-greater-visibility-than-the-type-of-its-field-someclassmember--wattributes) for more info. 15 | 16 | For controlling the visibility, we use linker version scripts on Linux and def files on Windows. They work similar. That: 17 | 1. Only C functions can be exported. 18 | 2. All the function names must be explicitly listed in a text file. 19 | 3. Don't export any C++ class/struct, or global variable. 20 | 21 | Also, on Linux and Mac operating systems, all the code must be compiled with "-fPIC". 22 | On Windows, we don't use dllexport but we still need dllimport. 23 | 24 | Therefore, our DLLEXPORT macro is like: 25 | ``` 26 | #ifdef _WIN32 27 | // Define ORT_DLL_IMPORT if your program is dynamically linked to Ort. 28 | #ifdef ORT_DLL_IMPORT 29 | #define ORT_EXPORT __declspec(dllimport) 30 | #else 31 | #define ORT_EXPORT 32 | #endif 33 | #else 34 | #define ORT_EXPORT 35 | #endif 36 | ``` 37 | 38 | ## static initialization order problem 39 | It's well known C++ has [static initialization order problem](https://isocpp.org/wiki/faq/ctors#static-init-order). Dynamic linking can ensure that onnxruntime's static variables are already initialized before any onnxruntime's C API get called. The same thing applies to their destructors. It's good. But on the other side, static linking may have more usage restrictions on some of the APIs. 40 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Android_testing.md: -------------------------------------------------------------------------------- 1 | # Testing Android Changes using the Emulator 2 | 3 | See [Android build instructions](https://www.onnxruntime.ai/docs/how-to/build.html#android) and information on the locations of the various files referred to here. 4 | 5 | ## Install the emulator 6 | 7 | If using Android Studio this is included in the base install. 8 | 9 | If using sdkmanager install the emulator by running 10 | - `sdkmanager[.bat] --install "emulator"` 11 | 12 | The emulator will emulate the Android device not its processor, so you need to build onnxruntime 13 | with an ABI that's valid for the host machine, and install a system image that matches. 14 | For example you can emulate a Pixel 3 device on an Intel 64-bit host, but it will require a binary built against x86_64 15 | rather than the arm64-v8a ABI of the real device. 16 | 17 | e.g. on Intel 64-bit you would build with `--android_abi x86_64` to create onnxruntime libraries/executables that can be run on the Android emulator 18 | 19 | ## Create the device to emulate 20 | 21 | ### Android Studio 22 | 23 | Tools->AVD Manager->Create Virtual Device... 24 | 25 | Once created the emulator can be started using the 'play' button in AVD Manager. 26 | 27 | ### sdkmanager 28 | 29 | First install a system image. Use `sdkmanager --list` to see the available system images. 30 | 31 | e.g. `sdkmanager --install "system-images;android-27;default;x86_64` 32 | 33 | Create the virtual device using avdmanager[.bat] (which should be in the same directory as sdkmanager[.bat]). 34 | 35 | e.g. `avdmanager create avd -n android27_emulator -k "system-images;android-27;default;x86_64"` 36 | 37 | Run the emulator 38 | e.g. `.../Android/emulator/emulator -avd android27_emulator -partition-size 2048 -no-snapshot -no-audio` 39 | 40 | ## Testing running a model on the emulator directly 41 | 42 | Use ADB to copy files and execute commands 43 | 44 | https://developer.android.com/studio/command-line/adb 45 | 46 | ADB is located in the 'platform-tools' folder of the SDK directory. 47 | 48 | Copy onnx_test_runner and the directory of the model to test (in ONNX test directory format) to /data/local/tmp. 49 | 50 | ``` 51 | adb push /build///onnx_test_runner /data/local/tmp/ 52 | adb push /build///testdata/transform/gemm_activation_fusion /data/local/tmp/ 53 | ``` 54 | 55 | e.g. on Windows that might be 56 | ``` 57 | \platform-tools\adb.exe push \build\Windows\Debug\onnx_test_runner /data/local/tmp/testdata 58 | \platform-tools\adb.exe push \build\Windows\Debug\testdata\transform\gemm_activation_fusion /data/local/tmp/ 59 | ``` 60 | 61 | You may need to change permissions to make onnx_test_runner executable: 62 | `\platform-tools\adb.exe shell chmod +x /data/local/tmp/onnx_test_runner` 63 | 64 | Run onnx_test_runner with the model directory: 65 | `\platform-tools\adb.exe shell 'cd /data/local/tmp && ./onnx_test_runner gemm_activation_fusion'` 66 | 67 | The output should look something like this: 68 | 69 | ``` 70 | D:\Android\platform-tools> .\adb.exe shell 'cd /data/local/tmp && ./onnx_test_runner gemm_activation_fusion' 71 | result: 72 | Models: 1 73 | Total test cases: 1 74 | Succeeded: 1 75 | Not implemented: 0 76 | Failed: 0 77 | Stats by Operator type: 78 | Not implemented(0): 79 | Failed: 80 | Failed Test Cases: 81 | ``` 82 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/FAQ.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | Here are some commonly raised questions from users of ONNX Runtime and brought up in [Issues](https://github.com/microsoft/onnxruntime/issues). 3 | 4 | ## Do the GPU builds support quantized models? 5 | The default CUDA build supports 3 standard quantization operators: QuantizeLinear, DequantizeLinear, and MatMulInteger. The TensorRT EP has limited support for INT8 quantized ops. In general, support of quantized models through ORT is continuing to expand on a model-driven basis. For performance improvements, quantization is not always required, and we suggest trying alternative strategies to [performance tune](./ONNX_Runtime_Perf_Tuning.md) before determining that quantization is necessary. 6 | 7 | ## How do I change the severity level of the default logger to something other than the default (WARNING)? 8 | Setting the severity level to VERBOSE is most useful when debugging errors. 9 | 10 | Refer to the API documentation: 11 | * Python - [RunOptions.log_severity_level](https://microsoft.github.io/onnxruntime/python/api_summary.html#onnxruntime.RunOptions.log_severity_level) 12 | ``` 13 | import onnxruntime as ort 14 | ort.set_default_logger_severity(0) 15 | ``` 16 | * C - [SetSessionLogSeverityLevel](./../include/onnxruntime/core/session/onnxruntime_c_api.h) 17 | 18 | ## How do I load and run models that have multiple inputs and outputs using the C/C++ API? 19 | See an example from the 'override initializer' test in [test_inference.cc](./../onnxruntime/test/shared_lib/test_inference.cc) that has 3 inputs and 3 outputs. 20 | ``` 21 | std::vector ort_inputs; 22 | ort_inputs.push_back(std::move(label_input_tensor)); 23 | ort_inputs.push_back(std::move(f2_input_tensor)); 24 | ort_inputs.push_back(std::move(f11_input_tensor)); 25 | std::vector input_names = {"Label", "F2", "F1"}; 26 | const char* const output_names[] = {"Label0", "F20", "F11"}; 27 | std::vector ort_outputs = session.Run(Ort::RunOptions{nullptr}, input_names.data(), 28 | ort_inputs.data(), ort_inputs.size(), output_names, countof(output_names)); 29 | ``` 30 | 31 | ## How do I force single threaded execution mode in ORT? By default, session.run() uses all the computer's cores. 32 | 33 | To limit use to a single thread only: 34 | * If built with OpenMP, set the environment variable OMP_NUM_THREADS to 1. The default inter_op_num_threads in session options is already 1. 35 | * If not built with OpenMP, set the session options intra_op_num_threads to 1. Do not change the default inter_op_num_threads (1). 36 | 37 | It's recommended to build onnxruntime without openmp if you only need single threaded execution. 38 | 39 | This is supported in ONNX Runtime v1.3.0+ 40 | 41 | **Python example:** 42 | ``` 43 | #!/usr/bin/python3 44 | os.environ["OMP_NUM_THREADS"] = "1" 45 | import onnxruntime 46 | 47 | opts = onnxruntime.SessionOptions() 48 | opts.inter_op_num_threads = 1 49 | opts.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL 50 | ort_session = onnxruntime.InferenceSession('/path/to/model.onnx', sess_options=opts) 51 | ``` 52 | 53 | **C++ example:** 54 | ``` 55 | // initialize environment...one environment per process 56 | Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); 57 | 58 | // initialize session options if needed 59 | Ort::SessionOptions session_options; 60 | session_options.SetInterOpNumThreads(1); 61 | #ifdef _WIN32 62 | const wchar_t* model_path = L"squeezenet.onnx"; 63 | #else 64 | const char* model_path = "squeezenet.onnx"; 65 | #endif 66 | 67 | Ort::Session session(env, model_path, session_options); 68 | ``` 69 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/How_To_Update_ONNX_Dev_Notes.md: -------------------------------------------------------------------------------- 1 | # How to update ONNX 2 | 3 | This note is only for ONNX Runtime developers. 4 | 5 | If you need to update the ONNX submodule to a different version, follow the steps below. 6 | 7 | 1. Update the ONNX submodule 8 | ```sh 9 | cd cmake/external/onnx 10 | git remote update 11 | git reset --hard 12 | cd .. 13 | git add onnx 14 | ``` 15 | (Change the to yours. If you are not sure, use 'origin/master'. Like 'git reset --hard origin/master') 16 | 17 | 1. Update [cgmanifests/generated/cgmanifest.json](/cgmanifests/generated/cgmanifest.json). 18 | This file should be generated. See [cgmanifests/README](/cgmanifests/README.md) for instructions. 19 | 20 | 1. Update [tools/ci_build/github/linux/docker/scripts/requirements.txt](/tools/ci_build/github/linux/docker/scripts/requirements.txt) 21 | and [tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt](/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt). 22 | Update the commit hash for `git+http://github.com/onnx/onnx.git@targetonnxcommithash#egg=onnx`. 23 | 24 | 1. If there is any change to `cmake/external/onnx/onnx/*.in.proto`, you need to regenerate OnnxMl.cs. 25 | [Building onnxruntime with Nuget](https://onnxruntime.ai/docs/build/inferencing.html#build-nuget-packages) will do 26 | this. 27 | 28 | 1. If you are updating ONNX from a released tag to a new commit, please ask Changming (@snnn) to deploy the new test 29 | data along with other test models to our CI build machines. This is to ensure that our tests cover every ONNX opset. 30 | 31 | 1. Send your PR, and **manually** queue a build for every packaging pipeline for your branch. 32 | 33 | 1. If there is a build failure in stage "Check out of dated documents" in WebAssembly CI pipeline, update ONNX Runtime 34 | Web WebGL operator support document: 35 | - Make sure Node.js is installed (see [Prerequisites](../js/README.md#Prerequisites) for instructions). 36 | - Follow step 1 in [js/Build](../js/README.md#Build-2) to install dependencies). 37 | - Follow instructions in [Generate document](../js/README.md#Generating-Document) to update document. Commit changes applied to file `docs/operators.md`. 38 | 39 | 1. Usually some newly introduced tests will fail. Then you may need to update 40 | - [onnxruntime/test/onnx/main.cc](/onnxruntime/test/onnx/main.cc) 41 | - [onnxruntime/test/providers/cpu/model_tests.cc](/onnxruntime/test/providers/cpu/model_tests.cc) 42 | - [csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs](/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs) 43 | - [onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc](/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc) 44 | - [onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc](/onnxruntime/test/testdata/onnx_backend_test_series_overrides.jsonc) 45 | 46 | 1. If an operator has changed we may need to update optimizers involving that operator. 47 | - Run [find_optimizer_opset_version_updates_required.py](/tools/python/find_optimizer_opset_version_updates_required.py), compare with the output from the current main branch, and check for any new warnings. 48 | - If there are new warnings contact the optimizer owner (which can usually be determined by looking at who edited the file most recently) or failing that ask the 'ONNX Runtime Shared Core' mailing list. 49 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Model_Test.md: -------------------------------------------------------------------------------- 1 | ONNX has a collection of standard tests. This document describes how to run these tests through a C++ program named 'onnx_test_runner' in this repo. You could also run these test through onnxruntime python binding, which would be much easier to setup, but, a bit harder to debug issues. 2 | 3 | # Get the test data 4 | You should have: 5 | 1. onnx single node test data 6 | 2. onnx model zoo models 7 | 8 | ## Install onnx python package 9 | You can get onnx python package from [pypi](https://pypi.org/). However, if you are a onnxruntime developer, you may need to work on a cutting edge ONNX version. In this case, you need to build and install ONNX from source code. 10 | 11 | ### Install ONNX from source code 12 | 1. (windows) set ONNX_ML=1 13 | (linux) export ONNX_ML=1 14 | 2. Install protobuf and put protoc into your PATH environment. When you compile protobuf, it's better to only enable the static libraries. 15 | 3. run "python setup.py bdist_wheel" and "pip install dist/*.whl" 16 | 17 | ## Generate node test data 18 | $ python3 -m onnx.backend.test.cmd_tools generate-data -o 19 | e.g. 20 | python3 -m onnx.backend.test.cmd_tools generate-data -o C:\testdata 21 | 22 | ## Get more models 23 | Download https://onnxruntimetestdata.blob.core.windows.net/models/20190419.zip and unzip it. 24 | 25 | # Compile onnx_test_runner and run the tests 26 | onnx_test_runner is a C++ program. Its source code is in onnxruntime/test/onnx directory. 27 | 28 | Usage: onnx_test_runner [options...] 29 | Options: 30 | -j [models]: Specifies the number of models to run simultaneously. 31 | -A : Disable memory arena 32 | -c [runs]: Specifies the number of Session::Run() to invoke simultaneously for each model. 33 | -r [repeat]: Specifies the number of times to repeat 34 | -v: verbose 35 | -n [test_case_name]: Specifies a single test case to run. 36 | -e [EXECUTION_PROVIDER]: EXECUTION_PROVIDER could be 'cpu', 'cuda', 'dnnl' or 'tensorrt'. Default: 'cpu'. 37 | -x: Use parallel executor, default (without -x): sequential executor. 38 | -h: help 39 | 40 | e.g. 41 | //run the tests under C:\testdata dir and enable CUDA provider 42 | $ onnx_test_runner -e cuda C:\testdata 43 | 44 | //run the tests sequentially. It would be easier to debug 45 | $ onnx_test_runner -c 1 -j 1 C:\testdata 46 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/NotesOnThreading.md: -------------------------------------------------------------------------------- 1 | # Notes on Threading in ORT 2 | 3 | This document is intended for ORT developers. 4 | 5 | ORT allows the usage of either OpenMP or non-OpenMP (ORT) threads for execution. Threadpool management 6 | is abstracted behind: (1) ThreadPool class in [threadpool.h](https://github.com/microsoft/onnxruntime/blob/main/include/onnxruntime/core/platform/threadpool.h) and (2) functions in [thread_utils.h](https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/util/thread_utils.h). 7 | 8 | When developing an op, please use these abstractions to parallelize your code. These abstractions centralize 2 things. 9 | When OpenMP is enabled, they resort to using OpenMP. When OpenMP is disabled they resort to sequential execution if the threadpool ptr is NULL or schedule the tasks on the threadpool otherwise. 10 | 11 | Examples of these abstractions are: ([threadpool.h](https://github.com/microsoft/onnxruntime/blob/main/include/onnxruntime/core/platform/threadpool.h) has more documentation for these) 12 | * TryParallelFor 13 | * TrySimpleParallelFor 14 | * TryBatchParallelFor 15 | * ShouldParallelize 16 | * DegreeOfParallelism 17 | 18 | These static methods abstract over the different implementation choices. They can run over the ORT thread pool, or run over OpenMP, or run sequentially. 19 | 20 | In addition, ThreadPool::ParallelSection allows a series of loops to 21 | be grouped together in a single parallel section. This allows an 22 | operator to amortize loop entry/exit costs in cases where it is 23 | impractical to refactor code into a single large loop. 24 | 25 | **Please do not write #ifdef pragma omp in operator code**. 26 | 27 | For intra op parallelism ORT users can use either OpenMP or ORT threadpool. The choice of using OpenMP is indicated by building ORT with ```--use_openmp``` switch. For inter op parallelism, however, we always use the ORT threadpool. 28 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/ORT_Format_Update_in_1.13.md: -------------------------------------------------------------------------------- 1 | # ORT Format Update in 1.13 2 | 3 | In ONNX Runtime 1.13, there was a breaking change to the 4 | [ORT format](https://onnxruntime.ai/docs/reference/ort-format-models.html) (version 5) in order to enable additional 5 | execution providers with statically registered kernels in a minimal build. 6 | More details can be found [here](../onnxruntime/core/flatbuffers/schema/README.md#version-5). 7 | 8 | ## Backwards Compatibility 9 | 10 | ### ONNX Runtime 1.13 11 | Any older models (prior to ORT format version 5) will no longer work with ONNX Runtime 1.13 and must be re-converted. 12 | 13 | ### ONNX Runtime 1.14+ 14 | ONNX Runtime 1.14+ provides limited backwards compatibility for loading older models (prior to ORT format version 5). 15 | - In a full build, older models may be loaded but any saved runtime optimizations will be ignored. 16 | - In a minimal build, older models cannot be loaded. 17 | 18 | An older model may be re-converted. 19 | 20 | It is also possible to load an older ORT format model in a full build and then save it back out as an ORT format model. 21 | This process may be used to upgrade an ORT format model. However, any saved runtime optimizations from the older model 22 | will be ignored. 23 | 24 | ## Re-converting an ORT format model 25 | Please refer 26 | [here](https://onnxruntime.ai/docs/reference/ort-format-models.html#convert-onnx-models-to-ort-format) for instructions 27 | on how to convert an ONNX model to ORT format. 28 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/PR_Guidelines.md: -------------------------------------------------------------------------------- 1 | # Guidelines for creating a good pull request 2 | 3 | 1. A PR should describe the change clearly and most importantly it should mention the motivation behind the change. Filling out the PR template should satisfy this guideline. 4 | 2. If the PR is fixing a performance issue, mention the improvement and how the measurement was done (for educational purposes). 5 | 3. Do not leave comments unresolved. If PR comments have been addressed without making the requested code changes, explicitly mark them resolved with an appropriate comment explaining why you're resolving it. If you intend to resolve it in a follow up PR, create a task and mention why this comment cannot be fixed in this PR. Leaving comments unresolved sets a wrong precedent for other contributors that it's ok to ignore comments. 6 | 4. In the interest of time, discuss the PR/comments in person/phone if it's difficult to explain in writing. Document the resolution in the PR for the educational benefit of others. Don't just mark the comment resolved saying 'based on offline discussion'. 7 | 5. Add comments, if not obvious, in the PR to help the reviewer navigate your PR faster. If this is a big change, include a short design doc (docs/ folder). 8 | 6. Unit tests are mandatory for all PRs (except when the proposed changes are already covered by existing unit tests). 9 | 7. Do not use PRs as scratch pads for development as they consume valuable build/CI cycles for every commit. Build and test your changes for at least one environment (windows/linux/mac) before creating a PR. 10 | 8. Keep it small. If the feature is big, it's best to split into multiple PRs. Modulo cosmetic changes, a PR with more than 10 files is notoriously hard to review. Be kind to the reviewers. 11 | 9. Separate cosmetic changes from functional changes by making them separate PRs. 12 | 10. The PR author is responsible for merging the changes once they're approved. 13 | 11. If you co-author a PR, seek review from someone else. Do not self-approve PRs. 14 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Privacy.md: -------------------------------------------------------------------------------- 1 | # Privacy 2 | 3 | ## Data Collection 4 | The software may collect information about you and your use of the software and send it to Microsoft. Microsoft may use this information to provide services and improve our products and services. You may turn off the telemetry as described in the repository. There are also some features in the software that may enable you and Microsoft to collect data from users of your applications. If you use these features, you must comply with applicable law, including providing appropriate notices to users of your applications together with a copy of Microsoft's privacy statement. Our privacy statement is located at https://go.microsoft.com/fwlink/?LinkID=824704. You can learn more about data collection and use in the help documentation and our privacy statement. Your use of the software operates as your consent to these practices. 5 | 6 | *** 7 | 8 | ### Private Builds 9 | No data collection is performed when using your private builds built from source code. 10 | 11 | ### Official Builds 12 | ONNX Runtime does not maintain any independent telemetry collection mechanisms outside of what is provided by the platforms it supports. However, where applicable, ONNX Runtime will take advantage of platform-supported telemetry systems to collect trace events with the goal of improving product quality. 13 | 14 | Currently telemetry is only implemented for Windows builds and is turned **ON** by default in the official builds distributed in their respective package management repositories ([see here](../README.md#binaries)). This may be expanded to cover other platforms in the future. Data collection is implemented via 'Platform Telemetry' per vendor platform providers (see [telemetry.h](../onnxruntime/core/platform/telemetry.h)). 15 | 16 | #### Technical Details 17 | The Windows provider uses the [TraceLogging](https://docs.microsoft.com/en-us/windows/win32/tracelogging/trace-logging-about) API for its implementation. This enables ONNX Runtime trace events to be collected by the operating system, and based on user consent, this data may be periodically sent to Microsoft servers following GDPR and privacy regulations for anonymity and data access controls. 18 | 19 | Windows ML and onnxruntime C APIs allow Trace Logging to be turned on/off (see [API pages](../README.md#api-documentation) for details). 20 | For information on how to enable and disable telemetry, see [C API: Telemetry](./C_API.md#telemetry). 21 | There are equivalent APIs in the C#, Python, and Java language bindings as well. 22 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Python_Dev_Notes.md: -------------------------------------------------------------------------------- 1 | # Python Dev Notes 2 | 3 | Each Python version uses a specific compiler version. In most cases, you should use the same compiler version for building python extensions. 4 | 5 | ## Which Microsoft Visual C++ compiler to use with a specific Python version ? 6 | 7 | | Visual C++ | CPython | 8 | |-------------|:-----------------------:| 9 | |2015, 2017 | 3.7 | 10 | |2015 | 3.5,3.6 | 11 | |2010 | 3.3,3.4 | 12 | |2008 | 2.6, 2.7, 3.0, 3.1, 3.2 | 13 | 14 | Currently, the official ONNXRuntime Python wheel (v1.3.0 onwards) hosted on PyPi requires [Visual C++ 2019 runtime ](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads) installed on the target machine. 15 | 16 | If the Python wheel is built from source using the build toolset provided with Visual Studio 2017, it will work with the Visual C++ 2017 runtime. 17 | 18 | CPython 3.7 is distributed with a VC++ 2017 runtime. Unlike the earlier VC++ version, VC++ 2017 Runtime is binary backward compatible with VC++ 2015. Which means you could build your application with VC++ 2015 then run it with VC++ 2017 runtime. 19 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/README.txt: -------------------------------------------------------------------------------- 1 | Copied from 2 | https://github.com/microsoft/onnxruntime/tree/master/docs -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/ReleaseManagement.md: -------------------------------------------------------------------------------- 1 | # Release Management 2 | 3 | Releases are versioned according to 4 | [Versioning](Versioning.md). Official releases of ONNX Runtime are managed by the core ONNX Runtime team and packages will be published at least every 6 months. 5 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Server.md: -------------------------------------------------------------------------------- 1 | ## Build ONNX Runtime Server on Linux 2 | 3 | **Deprecation Note: This feature is deprecated and no longer supported.** 4 | 5 | Read more about ONNX Runtime Server [here](./ONNX_Runtime_Server_Usage.md). 6 | 7 | ### Prerequisites 8 | 9 | 1. [golang](https://golang.org/doc/install) 10 | 2. [grpc](https://github.com/grpc/grpc/blob/master/BUILDING.md). Please be aware that the docs at "[https://grpc.io/docs/quickstart/cpp/](https://grpc.io/docs/quickstart/cpp/)" is outdated, because building with make on UNIX systems is deprecated. 11 | 3. [re2](https://github.com/google/re2) 12 | 4. cmake 13 | 5. gcc and g++ 14 | 6. onnxruntime C API binaries. Please get it from [github releases](https://github.com/microsoft/onnxruntime/releases) then extract it to your "/usr" or "/usr/local" folder. 15 | 16 | See [install_server_deps.sh](../tools/ci_build/github/linux/docker/scripts/install_server_deps.sh) for more details. 17 | 18 | ### Build Instructions 19 | ``` 20 | cd server 21 | mkdir build 22 | cmake -DCMAKE_BUILD_TYPE=Debug .. 23 | make 24 | ``` 25 | 26 | ONNX Runtime Server supports sending logs to [rsyslog](https://www.rsyslog.com/) daemon. To enable it, please run the cmake command with an additional parameter: `-Donnxruntime_USE_SYSLOG=1`. 27 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/Versioning.md: -------------------------------------------------------------------------------- 1 | # Versioning 2 | 3 | ## API 4 | ONNX Runtime follows [Semantic Versioning 2.0](https://semver.org/) for its public API. 5 | Each release has the form MAJOR.MINOR.PATCH, adhering to the definitions from the linked semantic versioning doc. 6 | 7 | ## Current stable release version 8 | The version number of the current stable release can be found 9 | [here](../VERSION_NUMBER). 10 | 11 | ## Release cadence 12 | See [Release Management](ReleaseManagement.md) 13 | 14 | # Compatibility 15 | 16 | ## Backwards compatibility 17 | All versions of ONNX Runtime will support ONNX opsets all the way back to (and including) opset version 7. 18 | In other words, if an ONNX Runtime release implements ONNX opset ver 9, it'll be able to run all 19 | models that are stamped with ONNX opset versions in the range [7-9]. 20 | 21 | ### Version matrix 22 | The [table](https://onnxruntime.ai/docs/reference/compatibility.html#onnx-opset-support) summarizes the relationship between the ONNX Runtime version and the ONNX opset version implemented in that release. 23 | Please note the backward compatibility notes above. 24 | For more details on ONNX Release versions, see [this page](https://github.com/onnx/onnx/blob/main/docs/Versioning.md). 25 | 26 | ## Tool Compatibility 27 | A variety of tools can be used to create ONNX models. Unless otherwise noted, please use the latest released version of the tools to convert/export the ONNX model. Most tools are backwards compatible and support multiple ONNX versions. Join this with the table above to evaluate ONNX Runtime compatibility. 28 | 29 | |Tool|Recommended Version|Supported ONNX version(s)| 30 | |---|---|---| 31 | |[PyTorch](https://pytorch.org/)|[Latest stable](https://pytorch.org/get-started/locally/)|1.2-1.6| 32 | |[ONNXMLTools](https://pypi.org/project/onnxmltools/)
CoreML, LightGBM, XGBoost, LibSVM|[Latest stable](https://github.com/onnx/onnxmltools/releases)|1.2-1.6| 33 | |[ONNXMLTools](https://pypi.org/project/onnxmltools/)
SparkML|[Latest stable](https://github.com/onnx/onnxmltools/releases)|1.4-1.5| 34 | |[SKLearn-ONNX](https://pypi.org/project/skl2onnx/)|[Latest stable](https://github.com/onnx/sklearn-onnx/releases)|1.2-1.6| 35 | |[Keras-ONNX](https://pypi.org/project/keras2onnx/)|[Latest stable](https://github.com/onnx/keras-onnx/releases)|1.2-1.6| 36 | |[Tensorflow-ONNX](https://pypi.org/project/tf2onnx/)|[Latest stable](https://github.com/onnx/tensorflow-onnx/releases)|1.2-1.6| 37 | |[WinMLTools](https://docs.microsoft.com/en-us/windows/ai/windows-ml/convert-model-winmltools)|[Latest stable](https://pypi.org/project/winmltools/)|1.2-1.6| 38 | |[Paddle2ONNX](https://pypi.org/project/paddle2onnx/)| [Latest stable](https://github.com/PaddlePaddle/Paddle2ONNX/releases) | 1.6-1.9 | 39 | |[AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml)|[1.0.39+](https://pypi.org/project/azureml-automl-core)|1.5| 40 | | |[1.0.33](https://pypi.org/project/azureml-automl-core/1.0.33/)|1.4| 41 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/WinML_principles.md: -------------------------------------------------------------------------------- 1 | # Contributing to Windows ML 2 | 3 | Window Machine Learning is a high-performance, reliable API for deploying hardware-accelerated ML inferences on Windows devices. Please visit the [Windows ML documentation](https://docs.microsoft.com/en-us/windows/ai/windows-ml/) to learn more about Windows ML. 4 | 5 | ## Windows ML Base Principles 6 | 7 | **We design and optimize for all Windows devices.** 8 | 9 | Our goal is to provide developers with a platform that enables new experiences that run well on all Windows devices. Our design drives innovation in the DirectX ecosystem through DirectML and gives developers the confidence that their applications will work for all Windows customers. 10 | 11 | **We maintain and curate the Windows ML APIs.** 12 | 13 | The API is designed to ensure consistency of developer’s experience across the Windows platform. We provide long-term servicing and support, and we are committed to ensuring application’s compatibility as we evolve the API. 14 | 15 | **Windows ML is a core component of Windows.** 16 | 17 | The Windows ML code is packaged and distributed with each new release of Windows. To provide consumers with high-quality products, Microsoft is responsible for distributing Windows ML and related binaries as part of Windows or standalone distributable packages. 18 | 19 | ## Open for Community Contributions 20 | 21 | We encourage community contributions to Windows ML to enhance users’ experience on Windows. We use the principles above to guide how we look at and evaluate all contributions. 22 | 23 | Ensure your feature request follows all these principles to help the review process and include information about the customer problem(s) the feature request addresses. 24 | 25 | Note: minor issues or bugs can be addressed more quickly using the [bug/performance issue request](https://github.com/microsoft/onnxruntime/issues/new/choose) rather than feature request. 26 | 27 | ## Start your Feature Request 28 | 29 | If you'd like to contribute to Windows ML and engage with the community to get feedback, please review to the contributing [process details](https://github.com/microsoft/onnxruntime/blob/main/CONTRIBUTING.md) and submit a new feature request [here](https://github.com/microsoft/onnxruntime/issues/new/choose). 30 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP1.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP2.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/ONNX_Runtime_EP3.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/Vitis-AI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/Vitis-AI.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/mkl-dnn_node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/mkl-dnn_node.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/mkl-dnn_subgraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/execution_providers/images/mkl-dnn_subgraph.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/Mobile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/Mobile.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_icon.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo - Docs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo - Docs.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ONNX_Runtime_logo_dark.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/layered-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/layered-architecture.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/mnist_optimization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/mnist_optimization.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/mnist_optimization_with_nnapi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/mnist_optimization_with_nnapi.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ngraph-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/ngraph-logo.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/nnapi_aware_ort_format_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/images/nnapi_aware_ort_format_model.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/onnxruntime_dependencies.dot: -------------------------------------------------------------------------------- 1 | digraph "GG" { 2 | compound=true; 3 | 4 | node [ 5 | fontsize = "12" 6 | ]; 7 | subgraph cluster_0 { 8 | label = "onnxruntime.dll"; 9 | "ort_graph" [ label="onnxruntime_graph\n(schemas)" shape="box"]; 10 | "ort_common" [ label="onnxruntime_common" shape="box"]; 11 | "ort_util" [ label="onnxruntime_util" shape="box"]; 12 | "ort_mlas" [ label="onnxruntime_mlas" shape="box"]; 13 | "ort_optimizer" [ label="onnxruntime_optimizer" shape="box"]; 14 | "ort_session" [ label="onnxruntime_session" shape="box"]; 15 | "ort_graph" -> "ort_common" 16 | "onnx" [ label="onnx" shape="box"]; 17 | "protobuf" [ label="Google Protobuf" shape="box"]; 18 | "onnx" -> "protobuf" 19 | "ort_graph" -> "protobuf" 20 | "ort_graph" -> "onnx" 21 | "ort_optimizer" -> "onnx" 22 | "ort_framework" [ label="onnxruntime_framework" shape="box"]; 23 | "ort_framework" -> "ort_graph" 24 | "ort_framework" -> "ort_common" 25 | "ort_framework" -> "onnx" 26 | "ort_cpu_provider" [ label="onnxruntime_cpu_provider\n(kernels)" shape="box"]; 27 | "ort_cpu_provider" -> "ort_common" 28 | "ort_cpu_provider" -> "ort_framework" 29 | "ort_cpu_provider" -> "ort_util" 30 | "ort_cpu_provider" -> "ort_mlas" 31 | "ort_cpu_provider" -> "onnx" 32 | "ort_cuda_provider" [ label="onnxruntime_cuda_provider\n(kernels)" shape="box"]; 33 | "ort_cuda_provider" -> "ort_common" 34 | "ort_cuda_provider" -> "ort_framework" 35 | "ort_cuda_provider" -> "ort_util" 36 | "ort_cuda_provider" -> "ort_mlas" 37 | "ort_cuda_provider" -> "onnx" 38 | "ort_util" -> "ort_common" 39 | "ort_util" -> "ort_framework" 40 | "ort_util" -> "ort_mlas" 41 | "ort_mlas" -> "ort_common" 42 | "ort_session" -> "ort_framework" 43 | "ort_session" -> "ort_common" 44 | "ort_session" -> "ort_graph" 45 | "ort_session" -> "ort_optimizer" 46 | "ort_session" -> "ort_cpu_provider" 47 | "ort_optimizer" -> "ort_cpu_provider" 48 | "ort_optimizer" -> "ort_common" 49 | "ort_optimizer" -> "ort_framework" 50 | "ort_optimizer" -> "ort_graph" 51 | "capi" [ label="C API" shape="box"]; 52 | } 53 | 54 | subgraph cluster_1 { 55 | label = "Application Interfaces"; 56 | style=filled; 57 | color=lightgrey; 58 | node [style=filled,color=white]; 59 | "javaapi" [ label="Java API" shape="box"]; 60 | "csharpapi" [ label="C# API" shape="box"]; 61 | "cppapi" [ label="C++ API\n(header only)" shape="box"]; 62 | "javaapi" -> "capi" 63 | "cppapi" -> "capi" 64 | "csharpapi" -> "capi" 65 | "pythonapi" [ label="Python API" shape="box"]; 66 | pythonapi -> ort_session [lhead=cluster_0] 67 | } 68 | 69 | "grpc" [ label="gRPC" shape="box"]; 70 | "boost" [ label="Boost" shape="box"]; 71 | "onnx2" [ label="onnx" shape="box"]; 72 | "protobuf2" [ label="Google Protobuf" shape="box"]; 73 | "onnx2" -> "protobuf2" 74 | "grpc" -> "protobuf2" 75 | 76 | subgraph cluster_2 { 77 | label = "Applications"; 78 | "onnxruntime_server" [ label="ONNX Runtime Server" shape="box"]; 79 | "onnxruntime_server" -> "cppapi" 80 | "app1" [ label="User application" shape="box"]; 81 | "app2" [ label="User application" shape="box"]; 82 | } 83 | "onnxruntime_server" -> "grpc" 84 | "onnxruntime_server" -> "boost" 85 | "onnxruntime_server" -> "onnx2" 86 | } 87 | 88 | 89 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/onnxruntime_dependencies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/onnxmd/onnxruntime_docs/onnxruntime_dependencies.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/other_pages.rst: -------------------------------------------------------------------------------- 1 | 2 | Other pages 3 | =========== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | doc 9 | i_ex 10 | i_faq 11 | i_cmd 12 | HISTORY 13 | README 14 | all_indexes 15 | all_report 16 | filechanges 17 | index_class 18 | index_function 19 | index_method 20 | index_module 21 | index_staticmethod 22 | blog/blogindex.rst 23 | license 24 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/requirements.txt: -------------------------------------------------------------------------------- 1 | codecov 2 | coverage 3 | flake8 4 | joblib 5 | loky 6 | matplotlib 7 | mlinsights 8 | mlprodict 9 | myst-parser 10 | nbsphinx 11 | onnx 12 | onnxruntime 13 | pillow 14 | py-spy 15 | pandas 16 | pyinstrument 17 | pyquickhelper>=1.9.3359 18 | pytest 19 | pytest-cov 20 | scikit-learn 21 | skl2onnx 22 | sphinx 23 | sphinxcontrib-blockdiag 24 | sphinx-gallery 25 | tqdm 26 | wheel 27 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-tutorials: 3 | 4 | Tutorials 5 | ========= 6 | 7 | .. contents:: 8 | :local: 9 | 10 | ONNX ecosystem 11 | ++++++++++++++ 12 | 13 | Following tutorials introduce the :epkg:`ONNX` ecosystem. It walk the 14 | user through the ONNX specficiations, how to execute an ONNX graph, 15 | how to create an ONNX graph, how to convert a model from :epkg:`scikit-learn`, 16 | and how to train them with :epkg:`onnxruntime-training`. 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | 21 | tutorial_onnx/index 22 | tutorial_onnxruntime/index 23 | tutorial_skl/index 24 | tutorial_training/index 25 | tutorial_bench/index 26 | tutorial_parallel/index 27 | 28 | Readings 29 | ++++++++ 30 | 31 | * `Add AI to mobile applications with Xamarin and ONNX Runtime 32 | `_ 33 | * `Announcing ONNX Runtime Availability in the NVIDIA Jetson Zoo for High Performance Inferencing 34 | `_ 35 | (8/2021) 36 | * `Speeding Up Deep Learning Inference Using TensorFlow, ONNX, and NVIDIA TensorRT 37 | `_ 38 | (7/2021) 39 | * `Journey to optimize large scale transformer model inference with ONNX Runtime 40 | `_ 41 | (6/2021) 42 | * `Accelerating Model Training with the ONNX Runtime 43 | `_ 44 | (5/2020) 45 | * `Accelerate and simplify Scikit-learn model inference with ONNX Runtime 46 | `_ 47 | (12/2020) 48 | * `Model Persistence scikit-learn and ONNX 49 | `_, 50 | short talk at `scikit-learn foundation `_ 51 | (2019) 52 | 53 | Current documention of ONNX and onnxruntime 54 | +++++++++++++++++++++++++++++++++++++++++++ 55 | 56 | Most of the documentation related on :epkg:`onnx` and :epkg:`onnxruntime` 57 | is written on :epkg:`markdown`. The following section is an attempt 58 | to render it and make it searchable. 59 | 60 | .. toctree:: 61 | :maxdepth: 2 62 | 63 | onnxmd/index 64 | 65 | Build 66 | +++++ 67 | 68 | Some useful pages. 69 | 70 | * :ref:`Build onnxruntime on WSL (Windows Linux Subsystem) (2021) `. 71 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_bench/index.rst: -------------------------------------------------------------------------------- 1 | 2 | Benchmarking and profiling Tutorial 3 | =================================== 4 | 5 | .. index:: tutorial 6 | 7 | Next sections shows how to measure performance of an ONNX graph 8 | when executing with :epkg:`onnxruntime`. 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | tutorial_op 14 | tutorial_benchmark 15 | tutorial_profile 16 | tutorial_training 17 | 18 | The tutorial was tested with following version: 19 | 20 | .. runpython:: 21 | :showcode: 22 | 23 | import sys 24 | import numpy 25 | import scipy 26 | import onnx 27 | import onnxruntime 28 | import lightgbm 29 | import xgboost 30 | import sklearn 31 | import onnxconverter_common 32 | import onnxmltools 33 | import skl2onnx 34 | import pyquickhelper 35 | import mlprodict 36 | import onnxcustom 37 | 38 | print("python {}".format(sys.version_info)) 39 | mods = [numpy, scipy, sklearn, lightgbm, xgboost, 40 | onnx, onnxmltools, onnxruntime, onnxcustom, 41 | onnxconverter_common, 42 | skl2onnx, mlprodict, pyquickhelper] 43 | mods = [(m.__name__, m.__version__) for m in mods] 44 | mx = max(len(_[0]) for _ in mods) + 1 45 | for name, vers in sorted(mods): 46 | print("{}{}{}".format(name, " " * (mx - len(name)), vers)) 47 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_bench/tutorial_benchmark.rst: -------------------------------------------------------------------------------- 1 | 2 | Inference 3 | ========= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | ../../gyexamples/plot_benchmark_ort_api 9 | ../../gyexamples/plot_benchmark_inference_standard 10 | ../../gyexamples/plot_benchmark_inference 11 | ../../gyexamples/plot_benchmark_eager_mode 12 | ../../gyexamples/plot_benchmark_graph_opt 13 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_bench/tutorial_op.rst: -------------------------------------------------------------------------------- 1 | 2 | Study behavior of one operator 3 | ============================== 4 | 5 | Following examples look into the processing time of one operator 6 | depending on the size of the tensors it processes. It may help 7 | optimizing an ONNX graph depending on the size of the data it 8 | consumes. 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | ../../gyexamples/plot_benchmark_op_leakyrelu 14 | ../../gyexamples/plot_benchmark_op_short 15 | ../../gyexamples/plot_benchmark_op 16 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_bench/tutorial_profile.rst: -------------------------------------------------------------------------------- 1 | 2 | Profiling 3 | ========= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | ../../gyexamples/plot_profile_ort 9 | ../../gyexamples/plot_profile_ort_onnx 10 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_bench/tutorial_training.rst: -------------------------------------------------------------------------------- 1 | 2 | Training 3 | ======== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | ../../gyexamples/plot_orttraining_benchmark 9 | ../../gyexamples/plot_orttraining_benchmark_fwbw 10 | ../../gyexamples/plot_orttraining_benchmark_fwbw_cls 11 | ../../gyexamples/plot_benchmark_onnx_function 12 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/iff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/iff.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/linreg1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/linreg1.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/linreg2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/linreg2.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/scanop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/tutorials/tutorial_onnx/images/scanop.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _onnx-tutorial: 3 | 4 | Introduction to ONNX 5 | ==================== 6 | 7 | This documentation describes the :epkg:`ONNX` concepts 8 | (**Open Neural Network Exchange**). 9 | It shows how it is used with examples in python and finally explains 10 | some of challenges faced when moving to ONNX in production. 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | concepts 16 | python 17 | challenges 18 | onnxops 19 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnx/onnxops.rst: -------------------------------------------------------------------------------- 1 | 2 | =========================== 3 | ONNX operators and function 4 | =========================== 5 | 6 | Full list of operators provided by :epkg:`onnx`. 7 | Links point to github page :epkg:`ONNX operators`. 8 | 9 | .. runpython:: 10 | :rst: 11 | 12 | import onnx 13 | 14 | fmt = "* `%s `_" 15 | names = list(sorted(set( 16 | sch.name for sch in onnx.defs.get_all_schemas_with_history()))) 17 | for n in names: 18 | print(fmt % (n, n)) 19 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnxruntime/extensions.rst: -------------------------------------------------------------------------------- 1 | 2 | ========== 3 | Extensions 4 | ========== 5 | 6 | .. contents:: 7 | :local: 8 | 9 | C API 10 | ===== 11 | 12 | :epkg:`onnxruntime` implements a C API in three files: 13 | 14 | * `onnxruntime_c_api.h `_ 15 | * `onnxruntime_cxx_api.h `_ 16 | * `onnxruntime_cxx_inline.h `_ 17 | 18 | Other languages 19 | =============== 20 | 21 | :epkg:`onnxruntime` is available in others languages such as C#, java, javascript, 22 | webassembly, Objective C. 23 | 24 | .. _l-custom-runtime-extensions: 25 | 26 | Custom runtime 27 | ============== 28 | 29 | Project :epkg:`onnxruntime-extensions` leverages the C API to implement 30 | a runtime for a couple of tokenizers used by :epkg:`tensorflow` models. 31 | `PR 148 `_ 32 | shows how to add a new operator dealing with text. 33 | 34 | Tools 35 | ===== 36 | 37 | `perfstats.py `_ 39 | reads a file produced by a profiling. It returns the time in every 40 | operator or type of operators in a table. It helps find where the 41 | runtime spends most of its time. 42 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnxruntime/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _onnxruntime-tutorial: 3 | 4 | Introduction to onnxruntime 5 | =========================== 6 | 7 | This tutorial introduces :epkg:`onnxruntime`, a runtime 8 | for ONNX available on many platforms and languages. 9 | This documentation is mostly about python but onnxruntime is 10 | available on many other platforms. 11 | :epkg:`onnxruntime` executes an ONNX graph. 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | ortvalue_doc 17 | inference 18 | training_ort_api 19 | extensions 20 | quantization 21 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_onnxruntime/quantization.rst: -------------------------------------------------------------------------------- 1 | 2 | ============ 3 | Quantization 4 | ============ 5 | 6 | .. contents:: 7 | :local: 8 | 9 | Examples 10 | ======== 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | 15 | ../../gyexamples/plot_quantization 16 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_parallel/index.rst: -------------------------------------------------------------------------------- 1 | 2 | Parallelization 3 | =============== 4 | 5 | .. index:: tutorial 6 | 7 | :epkg:`onnxruntime` is already parallelization the computation 8 | on multiple cores if the execution runs on CPU only and obvioulsy 9 | on GPU. Recent machines have multiple GPUs but :epkg:`onnxruntime` 10 | usually runs on one single GPUs. These examples tries to take 11 | advantage of that configuration. The first parallelize the execution 12 | of the same model on each GPU. It assumes a single GPU can host the 13 | whole model. The second model explores a way to split the model 14 | into pieces when the whole model does not hold in one single GPUs. 15 | This is done through function 16 | :func:`split_onnx `. 17 | 18 | .. toctree:: 19 | :maxdepth: 1 20 | 21 | ../../gyexamples/plot_parallel_execution 22 | ../../gyexamples/plot_parallel_execution_big_model 23 | 24 | The tutorial was tested with following version: 25 | 26 | .. runpython:: 27 | :showcode: 28 | 29 | import sys 30 | import numpy 31 | import scipy 32 | import onnx 33 | import onnxruntime 34 | import onnxcustom 35 | import sklearn 36 | import torch 37 | 38 | print("python {}".format(sys.version_info)) 39 | mods = [numpy, scipy, sklearn, onnx, 40 | onnxruntime, onnxcustom, torch] 41 | mods = [(m.__name__, m.__version__) for m in mods] 42 | mx = max(len(_[0]) for _ in mods) + 1 43 | for name, vers in sorted(mods): 44 | print("{}{}{}".format(name, " " * (mx - len(name)), vers)) 45 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/index.rst: -------------------------------------------------------------------------------- 1 | 2 | scikit-learn to ONNX Tutorial 3 | ============================= 4 | 5 | .. index:: tutorial 6 | 7 | The tutorial goes from a simple example which 8 | converts a pipeline to a more complex example 9 | involving operator not actually implemented in 10 | :epkg:`ONNX operators` or :epkg:`ONNX ML Operators`. 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | tutorial_1_simple 16 | tutorial_1-5_external 17 | tutorial_2_new_converter 18 | tutorial_3_new_operator 19 | tutorial_4_complex 20 | 21 | The tutorial was tested with following version: 22 | 23 | .. runpython:: 24 | :showcode: 25 | 26 | import sys 27 | import numpy 28 | import scipy 29 | import onnx 30 | import onnxruntime 31 | import lightgbm 32 | import xgboost 33 | import sklearn 34 | import onnxconverter_common 35 | import onnxmltools 36 | import skl2onnx 37 | import pyquickhelper 38 | import mlprodict 39 | import onnxcustom 40 | 41 | print("python {}".format(sys.version_info)) 42 | mods = [numpy, scipy, sklearn, lightgbm, xgboost, 43 | onnx, onnxmltools, onnxruntime, onnxcustom, 44 | onnxconverter_common, 45 | skl2onnx, mlprodict, pyquickhelper] 46 | mods = [(m.__name__, m.__version__) for m in mods] 47 | mx = max(len(_[0]) for _ in mods) + 1 48 | for name, vers in sorted(mods): 49 | print("{}{}{}".format(name, " " * (mx - len(name)), vers)) 50 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/tutorial_1-5_external.rst: -------------------------------------------------------------------------------- 1 | Using converter from other libraries 2 | ==================================== 3 | 4 | Before starting writing our own converter, 5 | we can use some available in other libraries 6 | than :epkg:`sklearn-onnx`. :epkg:`onnxmltools` implements 7 | converters for :epkg:`xgboost` and :epkg:`LightGBM`. 8 | Following examples show how to use the conveter when the 9 | model are part of a pipeline. 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | ../../gyexamples/plot_gexternal_lightgbm 15 | ../../gyexamples/plot_gexternal_xgboost 16 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/tutorial_1_simple.rst: -------------------------------------------------------------------------------- 1 | 2 | The easy case 3 | ============= 4 | 5 | The easy case is when the machine learned model 6 | can be converter into ONNX with a converting library 7 | without writing nay specific code. That means that a converter 8 | exists for the model or each piece of the model, 9 | the converter produces an ONNX graph where every node 10 | is part of the existing ONNX specifications, the runtime 11 | used to compute the predictions implements every node 12 | used in the ONNX graph. 13 | 14 | .. toctree:: 15 | :maxdepth: 1 16 | 17 | ../../gyexamples/plot_abegin_convert_pipeline 18 | ../../gyexamples/plot_bbegin_measure_time 19 | ../../gyexamples/plot_cbegin_opset 20 | ../../gyexamples/plot_dbegin_options 21 | ../../gyexamples/plot_dbegin_options_list 22 | ../../gyexamples/plot_dbegin_options_zipmap 23 | ../../gyexamples/plot_fbegin_investigate 24 | ../../gyexamples/plot_gbegin_dataframe 25 | ../../gyexamples/plot_gbegin_transfer_learning 26 | ../../gyexamples/plot_gbegin_cst 27 | ../../gyexamples/plot_gconverting 28 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/tutorial_2_new_converter.rst: -------------------------------------------------------------------------------- 1 | A custom converter for a custom model 2 | ===================================== 3 | 4 | When :epkg:`sklearn-onnx` converts a :epkg:`scikit-learn` 5 | pipeline, it looks into every transformer and predictor 6 | and fetches the associated converter. The resulting 7 | ONNX graph combines the outcome of every converter 8 | in a single graph. If a model does not have its converter, 9 | it displays an error message telling it misses a converter. 10 | 11 | .. runpython:: 12 | :showcode: 13 | 14 | import numpy 15 | from sklearn.linear_model import LogisticRegression 16 | from skl2onnx import to_onnx 17 | 18 | class MyLogisticRegression(LogisticRegression): 19 | pass 20 | 21 | X = numpy.array([[0, 0.1]]) 22 | try: 23 | to_onnx(MyLogisticRegression(), X) 24 | except Exception as e: 25 | print(e) 26 | 27 | Following section shows how to create a custom converter. 28 | 29 | .. toctree:: 30 | :maxdepth: 1 31 | 32 | ../../gyexamples/plot_icustom_converter 33 | ../../gyexamples/plot_jcustom_syntax 34 | ../../gyexamples/plot_kcustom_converter_wrapper 35 | ../../gyexamples/plot_lcustom_options 36 | ../../gyexamples/plot_mcustom_parser 37 | ../../gyexamples/plot_mcustom_parser_dataframe 38 | ../../gyexamples/plot_catwoe_transformer 39 | ../../gyexamples/plot_woe_transformer 40 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/tutorial_3_new_operator.rst: -------------------------------------------------------------------------------- 1 | 2 | Extend ONNX, extend runtime 3 | =========================== 4 | 5 | Existing converters assume it is possible to convert 6 | a model with the current list of :epkg:`ONNX operators`. 7 | This list is growing at every version but it may happen 8 | a new node is needed. It could be added to ONNX specifications, 9 | it requires a new release, but that's not mandatory. 10 | New nodes can easily be created by using a different domain. 11 | A domain defines a set of operators, there are currently two 12 | officially supported domains: :epkg:`ONNX operators` and 13 | :epkg:`ONNX ML Operators`. Custom domains can be used. 14 | Once this new node is defined, a converter can use it. 15 | That leaves the last issue: the runtime must be aware 16 | of the implementation attached to this new node. 17 | That's the difficult part. 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | ../../gyexamples/plot_pextend_python_runtime 23 | ../../gyexamples/plot_qextend_onnxruntime 24 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_skl/tutorial_4_complex.rst: -------------------------------------------------------------------------------- 1 | 2 | Complex Scenarios and discepancies 3 | ================================== 4 | 5 | Discrepencies may happen. Let's see some unexpected cases. 6 | 7 | Dealing with discrepancies 8 | ++++++++++++++++++++++++++ 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | ../../gyexamples/plot_ebegin_float_double 14 | ../../gyexamples/plot_funny_sigmoid 15 | 16 | Unexpected issues 17 | +++++++++++++++++ 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | ../../gyexamples/plot_usparse_xgboost 23 | ../../gyexamples/plot_gexternal_lightgbm_reg 24 | ../../gyexamples/plot_gexternal_lightgbm_reg_per 25 | ../../gyexamples/plot_gexternal_lightgbm_reg_mono 26 | ../../gyexamples/plot_transformer_discrepancy 27 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_training/images/onnxfwbwloss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_doc/sphinxdoc/source/tutorials/tutorial_training/images/onnxfwbwloss.png -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_training/index.rst: -------------------------------------------------------------------------------- 1 | 2 | Training Tutorial 3 | ================= 4 | 5 | .. index:: tutorial 6 | 7 | The tutorial assumes there exist an ONNX graph saved and 8 | introduces two ways to train this model assuming a gradient can 9 | be computed for every node of this graph. 10 | 11 | First part looks into the first API of :epkg:`onnxruntime-training` 12 | based on class :epkg:`TrainingSession`. This class assumes the loss 13 | function is part of the graph to train. The tutorial shows how to 14 | do that. 15 | 16 | Second part relies on class :epkg:`TrainingAgent`. It builds a new 17 | ONNX graph to compute the gradient. This design gives more freedom 18 | to the user but it requires to write more code to implement the 19 | whole training. 20 | 21 | Both parts rely on classes this package (*onnxcustom*) implements 22 | to simplify the code. 23 | 24 | **main difference between the two approaches** 25 | 26 | The second API handles less than the first one by letting the user 27 | implement the weight updating. However, this freedom gives more 28 | possibilities to the user. The first API is faster than second one mostly 29 | because all the computation happens in a single ONNX graph. 30 | :epkg:`onnxruntime` can better optimize if everything takes place 31 | in a single graph. It minimizes round trips between C++ and python. 32 | The major drawback of this approach is every change in the way weights 33 | are updated requires a code change. 34 | 35 | The second API works with multiple ONNX graph, one to update the weights, 36 | one to compute the loss and its gradients, one to compute the 37 | regularization and its gradient, one to compute the gradient of the model. 38 | The implementation tries to avoid copies when a tensor goes from 39 | one graph to the next one. The freedom provided by this API 40 | can be used to implement Nesterov method to update the weight. 41 | It can even stick to :epkg:`scikit-learn` API to leverage 42 | some of the functionalities of this packages such as 43 | :class:`sklearn.model_selection.GridSearchCV`. 44 | Following picture shows the four onnx graphs 45 | of this second approach. 46 | 47 | .. image:: images/onnxfwbwloss.png 48 | 49 | .. toctree:: 50 | :maxdepth: 2 51 | 52 | tutorial_6_training 53 | tutorial_6_training_partial 54 | tutorial_7_related_topics 55 | 56 | The tutorial was tested with following version: 57 | 58 | .. runpython:: 59 | :showcode: 60 | 61 | import sys 62 | import numpy 63 | import scipy 64 | import onnx 65 | import onnxruntime 66 | import lightgbm 67 | import xgboost 68 | import sklearn 69 | import onnxconverter_common 70 | import onnxmltools 71 | import skl2onnx 72 | import pyquickhelper 73 | import mlprodict 74 | import onnxcustom 75 | import torch 76 | 77 | print("python {}".format(sys.version_info)) 78 | mods = [numpy, scipy, sklearn, lightgbm, xgboost, 79 | onnx, onnxmltools, onnxruntime, onnxcustom, 80 | onnxconverter_common, 81 | skl2onnx, mlprodict, pyquickhelper, 82 | torch] 83 | mods = [(m.__name__, m.__version__) for m in mods] 84 | mx = max(len(_[0]) for _ in mods) + 1 85 | for name, vers in sorted(mods): 86 | print("{}{}{}".format(name, " " * (mx - len(name)), vers)) 87 | -------------------------------------------------------------------------------- /_doc/sphinxdoc/source/tutorials/tutorial_training/tutorial_7_related_topics.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _l-related-topics: 3 | 4 | Related Topics to Training 5 | ========================== 6 | 7 | A collection of examples or topics related to training. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | ../../gyexamples/plot_f8 13 | -------------------------------------------------------------------------------- /_unittests/ut_cli/test_cli_profile.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test tree node (time=4s) 3 | """ 4 | import unittest 5 | import sqlite3 6 | from pyquickhelper.loghelper import BufferedPrint 7 | from pyquickhelper.pycode import ExtTestCase 8 | from onnxcustom.__main__ import main 9 | 10 | 11 | class TestCliProfile(ExtTestCase): 12 | 13 | def test_profile_nvprof2json(self): 14 | st = BufferedPrint() 15 | main(args=['nvprof2json', '--help'], fLOG=st.fprint) 16 | res = str(st) 17 | self.assertIn("usage: nvprof2json", res) 18 | 19 | def test_profile_nvprof2json_fail(self): 20 | st = BufferedPrint() 21 | self.assertRaise( 22 | lambda: main(args=['nvprof2json', '-f', 'something'], 23 | fLOG=st.fprint), 24 | sqlite3.OperationalError) 25 | 26 | def test_profile_check(self): 27 | st = BufferedPrint() 28 | main(args=['check', '--help'], fLOG=st.fprint) 29 | res = str(st) 30 | self.assertIn("usage: check", res) 31 | 32 | 33 | if __name__ == "__main__": 34 | unittest.main() 35 | -------------------------------------------------------------------------------- /_unittests/ut_documentation/test_documentation_check_coverage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @brief test log(time=2800s) 4 | """ 5 | import os 6 | import unittest 7 | from pyquickhelper.pycode import ExtTestCase, skipif_appveyor 8 | 9 | 10 | class TestDocumentationCheckCoverage(ExtTestCase): 11 | 12 | @skipif_appveyor("not relevant") 13 | def test_examples_coverage(self): 14 | with open( 15 | os.path.join(os.path.dirname(__file__), 16 | "_test_example.txt"), "r", encoding='utf-8') as f: 17 | lines = f.read().split('\n') 18 | 19 | this = os.path.abspath(os.path.dirname(__file__)) 20 | fold = os.path.normpath( 21 | os.path.join(this, '..', '..', '_doc', 'examples')) 22 | found = os.listdir(fold) 23 | 24 | done = set(_ for _ in lines if os.path.splitext(_)[-1] == '.py') 25 | found = set(_ for _ in found 26 | if (os.path.splitext(_)[-1] == '.py' and 27 | _.startswith('plot_'))) 28 | if len(done) != len(found): 29 | missing = found - done 30 | raise AssertionError( 31 | "Following examples were not tested:\n%s." 32 | "" % "\n".join(sorted(missing))) 33 | 34 | 35 | if __name__ == "__main__": 36 | unittest.main() 37 | -------------------------------------------------------------------------------- /_unittests/ut_documentation/test_documentation_examples_show.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=60s) 3 | """ 4 | import unittest 5 | import os 6 | from pyquickhelper.pycode import ExtTestCase 7 | 8 | 9 | class TestDocumentationExampleShow(ExtTestCase): 10 | 11 | def test_documentation_examples_show(self): 12 | 13 | this = os.path.abspath(os.path.dirname(__file__)) 14 | fold = os.path.normpath( 15 | os.path.join(this, '..', '..', '_doc', 'examples')) 16 | found = os.listdir(fold) 17 | tested = 0 18 | for name in sorted(found): 19 | if not name.startswith("plot_") or not name.endswith(".py"): 20 | continue 21 | 22 | with self.subTest(name=name): 23 | full_name = os.path.join(fold, name) 24 | with open(full_name, "r", encoding="utf-8") as f: 25 | content = f.read() 26 | if "plt.show()" in content and "# plt.show()" not in content: 27 | raise AssertionError( 28 | f"plt.show() not found in {name!r}.") 29 | tested += 1 30 | if tested == 0: 31 | raise RuntimeError("No example was tested.") 32 | 33 | 34 | if __name__ == "__main__": 35 | unittest.main() 36 | -------------------------------------------------------------------------------- /_unittests/ut_documentation/test_documentation_notebooks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @brief test log(time=121s) 4 | """ 5 | import os 6 | import unittest 7 | from pyquickhelper.loghelper import fLOG 8 | from pyquickhelper.pycode import ExtTestCase, skipif_circleci, skipif_appveyor 9 | import onnxcustom 10 | 11 | 12 | class TestDocumentationNotebooksPython(ExtTestCase): 13 | 14 | def setUp(self): 15 | import jyquickhelper # pylint: disable=C0415 16 | self.assertTrue(jyquickhelper is not None) 17 | 18 | @skipif_circleci("stuck") 19 | @skipif_appveyor("too long") 20 | def test_notebook_tree(self): 21 | from pyquickhelper.ipythonhelper import test_notebook_execution_coverage 22 | fLOG( 23 | __file__, 24 | self._testMethodName, 25 | OutputPrint=__name__ == "__main__") 26 | 27 | self.assertTrue(onnxcustom is not None) 28 | folder = os.path.join(os.path.dirname(__file__), 29 | "..", "..", "_doc", "notebooks") 30 | test_notebook_execution_coverage( 31 | __file__, "tree", folder, 'onnxcustom', copy_files=[], fLOG=fLOG) 32 | 33 | @skipif_circleci("stuck") 34 | @skipif_appveyor("too long") 35 | def test_notebook_training(self): 36 | from pyquickhelper.ipythonhelper import test_notebook_execution_coverage 37 | fLOG( 38 | __file__, 39 | self._testMethodName, 40 | OutputPrint=__name__ == "__main__") 41 | 42 | self.assertTrue(onnxcustom is not None) 43 | folder = os.path.join(os.path.dirname(__file__), 44 | "..", "..", "_doc", "notebooks") 45 | test_notebook_execution_coverage( 46 | __file__, "training", folder, 'onnxcustom', copy_files=[], fLOG=fLOG) 47 | 48 | 49 | if __name__ == "__main__": 50 | unittest.main() 51 | -------------------------------------------------------------------------------- /_unittests/ut_module/test_check.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=0s) 3 | """ 4 | import unittest 5 | from contextlib import redirect_stdout 6 | import io 7 | from pyquickhelper.pycode import ExtTestCase 8 | from onnxcustom import check 9 | 10 | 11 | class TestCheck(ExtTestCase): 12 | """Test style.""" 13 | 14 | def test_check(self): 15 | test = check() 16 | self.assertEmpty(test) 17 | 18 | def test_check_out(self): 19 | f = io.StringIO() 20 | with redirect_stdout(f): 21 | res = check(verbose=1) 22 | self.assertIsInstance(res, list) 23 | if len(res) > 0: 24 | raise AssertionError(res) 25 | 26 | def test__main__(self): 27 | import onnxcustom.__main__ # pylint: disable=W0611 28 | 29 | 30 | if __name__ == "__main__": 31 | unittest.main() 32 | -------------------------------------------------------------------------------- /_unittests/ut_module/test_code_style.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=60s) 3 | """ 4 | import os 5 | import unittest 6 | from pyquickhelper.loghelper import fLOG 7 | from pyquickhelper.pycode import check_pep8, ExtTestCase 8 | 9 | 10 | class TestCodeStyle(ExtTestCase): 11 | """Test style.""" 12 | 13 | def test_style_src(self): 14 | thi = os.path.abspath(os.path.dirname(__file__)) 15 | src_ = os.path.normpath(os.path.join(thi, "..", "..", "onnxcustom")) 16 | check_pep8( 17 | src_, fLOG=fLOG, 18 | pylint_ignore=( 19 | 'C0103', 'C1801', 'R1705', 'W0108', 'W0613', 'C3001', 20 | 'W0201', 'W0221', 'E0632', 'R1702', 'W0212', 'W0223', 21 | 'W0107', "R1720", 'R1732', 'C0209', 'C0302', 'R1735'), 22 | skip=[]) 23 | 24 | def test_style_test(self): 25 | thi = os.path.abspath(os.path.dirname(__file__)) 26 | test = os.path.normpath(os.path.join(thi, "..", )) 27 | check_pep8( 28 | test, fLOG=fLOG, neg_pattern="temp_.*", 29 | pylint_ignore=( 30 | 'C0103', 'C1801', 'R1705', 'W0108', 'W0613', 'C3001', 31 | 'C0111', 'W0107', 'C0111', 'R1702', 'C0415', "R1720", 32 | 'R1732', 'C0209', 'C0302', 'R1735'), 33 | skip=[]) 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /_unittests/ut_module/test_onnx_runtimes.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=0s) 3 | """ 4 | import unittest 5 | import numpy 6 | from scipy.special import expit # pylint: disable=E0611 7 | from pyquickhelper.pycode import ExtTestCase 8 | from skl2onnx.algebra.onnx_ops import OnnxSigmoid # pylint: disable=E0611 9 | from skl2onnx.common.data_types import FloatTensorType 10 | from mlprodict.onnxrt import OnnxInference 11 | from onnxcustom import get_max_opset 12 | 13 | 14 | class TestOnnxRuntimes(ExtTestCase): 15 | """Test style.""" 16 | 17 | def test_check(self): 18 | opset = get_max_opset() 19 | min_values = [-41.621277, -40.621277, -30.621277, -20.621277, 20 | -19, -18, -17, -15, -14, -13, -12, -11, -10, -5, -2] 21 | data = numpy.array( 22 | [[0]], 23 | dtype=numpy.float32) 24 | 25 | node = OnnxSigmoid('X', op_version=opset, output_names=['Y']) 26 | onx = node.to_onnx({'X': FloatTensorType()}, 27 | {'Y': FloatTensorType()}, 28 | target_opset=opset) 29 | rts = ['numpy', 'python', 'onnxruntime1'] 30 | for mv in min_values: 31 | data[:, 0] = mv 32 | for rt in rts: 33 | if rt == 'numpy': 34 | y = expit(data) 35 | else: 36 | oinf = OnnxInference(onx, runtime=rt) 37 | y = oinf.run({'X': data})['Y'] 38 | self.assertNotEmpty(y) 39 | 40 | 41 | if __name__ == "__main__": 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /_unittests/ut_module/test_readme.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test tree node (time=50s) 3 | """ 4 | import os 5 | import unittest 6 | from pyquickhelper.loghelper import fLOG 7 | from pyquickhelper.pycode import get_temp_folder, ExtTestCase 8 | 9 | 10 | class TestReadme(ExtTestCase): 11 | 12 | def test_venv_docutils08_readme(self): 13 | fLOG( 14 | __file__, 15 | self._testMethodName, 16 | OutputPrint=__name__ == "__main__") 17 | 18 | fold = os.path.dirname(os.path.abspath(__file__)) 19 | readme = os.path.join(fold, "..", "..", "README.rst") 20 | self.assertTrue(os.path.exists(readme)) 21 | with open(readme, "r", encoding="utf8") as f: 22 | content = f.read() 23 | 24 | self.assertTrue(len(content) > 0) 25 | temp = get_temp_folder(__file__, "temp_readme") 26 | 27 | if __name__ != "__main__": 28 | # does not work from a virtual environment 29 | return 30 | 31 | from pyquickhelper.pycode import check_readme_syntax 32 | 33 | check_readme_syntax(readme, folder=temp, fLOG=fLOG) 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /_unittests/ut_training/test_learning_rate.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=3s) 3 | """ 4 | 5 | import unittest 6 | from pyquickhelper.pycode import ExtTestCase 7 | from onnxcustom.training.sgd_learning_rate import LearningRateSGD 8 | 9 | 10 | class TestLearningRate(ExtTestCase): 11 | 12 | def is_decreased(self, series): 13 | for i in range(1, len(series)): 14 | if series[i] >= series[i - 1]: 15 | raise AssertionError( 16 | "Not decreasing at index %d - %r." % ( 17 | i, series[i - 1: i + 1])) 18 | 19 | def test_learning_rate_sgd_regressor_default(self): 20 | cllr = LearningRateSGD() 21 | val = list(cllr.loop()) 22 | self.assertEqual(len(val), 1000) 23 | self.is_decreased(val) 24 | self.assertEqual(val[0], 0.01) 25 | self.assertGreater(val[-1], 0.001) 26 | 27 | def test_learning_rate_sgd_regressor_exc(self): 28 | self.assertRaise( 29 | lambda: LearningRateSGD(learning_rate='EXC'), 30 | ValueError) 31 | 32 | def test_learning_rate_sgd_regressor_optimal(self): 33 | cllr = LearningRateSGD(learning_rate='optimal') 34 | val = list(cllr.loop()) 35 | self.assertEqual(len(val), 1000) 36 | self.is_decreased(val) 37 | self.assertEqual(val[0], 0.01) 38 | self.assertGreater(val[-1], 0.009) 39 | 40 | def test_learning_rate_sgd_regressor_constant(self): 41 | cllr = LearningRateSGD(learning_rate='constant') 42 | val = list(cllr.loop()) 43 | self.assertEqual(len(val), 1000) 44 | self.assertEqual(val[0], 0.01) 45 | self.assertEqual(val[-1], val[0]) 46 | 47 | def test_learning_rate_sgd_exc(self): 48 | self.assertRaise( 49 | lambda: LearningRateSGD(learning_rate='CST'), 50 | ValueError) 51 | 52 | 53 | if __name__ == "__main__": 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /_unittests/ut_utils/data/bench_ortmodule_nn_gpu.nvvp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_unittests/ut_utils/data/bench_ortmodule_nn_gpu.nvvp -------------------------------------------------------------------------------- /_unittests/ut_utils/data/bench_ortmodule_nn_gpu.sql.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sdpython/onnxcustom/671b3c9927c37fb88a01d9a8c42df1c2b0ae092e/_unittests/ut_utils/data/bench_ortmodule_nn_gpu.sql.zip -------------------------------------------------------------------------------- /_unittests/ut_utils/test_doc_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=5s) 3 | """ 4 | import os 5 | import unittest 6 | from pyquickhelper.pycode import ExtTestCase, get_temp_folder 7 | from onnxcustom.utils.doc_helper import fix_link_operator_md 8 | 9 | 10 | class TestDocHelper(ExtTestCase): 11 | 12 | def test_doc_helper_op(self): 13 | temp = get_temp_folder(__file__, 'temp_doc_helper_op') 14 | data = os.path.join(temp, "..", "..", "..", "_doc", "sphinxdoc", 15 | "source", "onnxmd", "onnx_docs", 16 | "Operators.md") 17 | new_content = fix_link_operator_md(data) 18 | output = os.path.join(temp, "Operators.md") 19 | with open(output, "w", encoding="utf-8") as f: 20 | f.write(new_content) 21 | self.assertExists(output) 22 | self.assertIn( 23 | '|[Mul](#a-name-mul-a-a-name-mul-mul-a)|', new_content) 24 | self.assertNotIn( 25 | '|Mul|', 26 | new_content) 27 | 28 | def test_doc_helper_op_ml(self): 29 | temp = get_temp_folder(__file__, 'temp_doc_helper_op_ml') 30 | data = os.path.join(temp, "..", "..", "..", "_doc", "sphinxdoc", 31 | "source", "onnxmd", "onnx_docs", 32 | "Operators-ml.md") 33 | new_content = fix_link_operator_md(data) 34 | output = os.path.join(temp, "Operators-ml.md") 35 | with open(output, "w", encoding="utf-8") as f: 36 | f.write(new_content) 37 | self.assertExists(output) 38 | self.assertNotIn( 39 | '|ai.onnx.ml.SVMRegressor|', 40 | new_content) 41 | self.assertIn( 42 | '|[ai-onnx-ml-SVMRegressor](#a-name-ai-onnx-ml-svmregressor-a-a-name' 43 | '-ai-onnx-ml-svmregressor-ai-onnx-ml-svmregressor-a)|', new_content) 44 | 45 | 46 | if __name__ == "__main__": 47 | unittest.main() 48 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_onnx_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=9s) 3 | """ 4 | import unittest 5 | import numpy 6 | from pyquickhelper.pycode import ExtTestCase 7 | from skl2onnx.common.data_types import ( 8 | FloatTensorType, DoubleTensorType, Int64TensorType, Int32TensorType) 9 | from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611 10 | OnnxRelu, OnnxMatMul) 11 | from onnxcustom.utils.onnx_helper import ( 12 | onnx_rename_weights, proto_type_to_dtype, dtype_to_var_type, 13 | get_onnx_opset) 14 | 15 | 16 | class TestOnnxHelper(ExtTestCase): 17 | 18 | def test_onnx_rename_weights(self): 19 | N, D_in, D_out, H = 3, 3, 3, 3 20 | var = [('X', FloatTensorType([N, D_in]))] 21 | w1 = numpy.random.randn(D_in, H).astype(numpy.float32) 22 | w2 = numpy.random.randn(H, D_out).astype(numpy.float32) 23 | opv = 14 24 | onx_alg = OnnxMatMul( 25 | OnnxRelu(OnnxMatMul(*var, w1, op_version=opv), 26 | op_version=opv), 27 | w2, op_version=opv, output_names=['Y']) 28 | onx = onx_alg.to_onnx( 29 | var, target_opset=opv, outputs=[('Y', FloatTensorType())]) 30 | 31 | onx = onnx_rename_weights(onx) 32 | names = [init.name for init in onx.graph.initializer] 33 | self.assertEqual(['I0_Ma_MatMulcst', 'I1_Ma_MatMulcst1'], names) 34 | self.assertEqual(get_onnx_opset(onx), 14) 35 | self.assertRaise(lambda: get_onnx_opset(onx, "H"), ValueError) 36 | 37 | def test_dtype_to_var_type(self): 38 | self.assertEqual(dtype_to_var_type(numpy.float32), FloatTensorType) 39 | self.assertEqual(dtype_to_var_type(numpy.float64), DoubleTensorType) 40 | self.assertEqual(dtype_to_var_type(numpy.int64), Int64TensorType) 41 | self.assertEqual(dtype_to_var_type(numpy.int32), Int32TensorType) 42 | self.assertEqual(proto_type_to_dtype('tensor(double)'), numpy.float64) 43 | self.assertRaise(lambda: dtype_to_var_type(numpy.int8), ValueError) 44 | 45 | def test_proto_type_to_dtype(self): 46 | self.assertEqual(proto_type_to_dtype(1), numpy.float32) 47 | self.assertEqual(proto_type_to_dtype(11), numpy.float64) 48 | self.assertRaise(lambda: proto_type_to_dtype(9), ValueError) 49 | 50 | 51 | if __name__ == "__main__": 52 | unittest.main() 53 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_onnx_writer.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=9s) 3 | """ 4 | import unittest 5 | import numpy 6 | from pyquickhelper.pycode import ExtTestCase 7 | from skl2onnx.common.data_types import FloatTensorType 8 | from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611 9 | OnnxReciprocal, OnnxDiv) 10 | from mlprodict.onnxrt import OnnxInference 11 | from onnxcustom import get_max_opset 12 | from onnxcustom.utils.onnx_rewriter import onnx_rewrite_operator, _unique_name 13 | 14 | 15 | class TestOnnxWriter(ExtTestCase): 16 | 17 | def test_onnx_rewrite_operator(self): 18 | opset = get_max_opset() 19 | node1 = OnnxReciprocal('X', output_names=['Y'], 20 | op_version=opset) 21 | onx1 = node1.to_onnx( 22 | inputs={'X': FloatTensorType()}, 23 | outputs={'Y': FloatTensorType()}, 24 | target_opset=opset) 25 | onx1.graph.name = "jjj" 26 | oinf1 = OnnxInference(onx1) 27 | 28 | node2 = OnnxDiv(numpy.array([1], dtype=numpy.float32), 29 | 'X', output_names=['Y'], 30 | op_version=opset) 31 | onx2 = node2.to_onnx( 32 | inputs={'X': FloatTensorType()}, 33 | outputs={'Y': FloatTensorType()}, 34 | target_opset=opset) 35 | oinf2 = OnnxInference(onx2) 36 | X = numpy.array([[5, 6]], dtype=numpy.float32) 37 | y1 = oinf1.run({'X': X})['Y'] 38 | y2 = oinf2.run({'X': X})['Y'] 39 | self.assertEqualArray(y1, y2) 40 | 41 | onx3 = onnx_rewrite_operator(onx1, 'Reciprocal', onx2) 42 | self.assertNotIn('Reciprocal', str(onx3)) 43 | oinf3 = OnnxInference(onx3) 44 | y3 = oinf3.run({'X': X})['Y'] 45 | self.assertEqualArray(y1, y3) 46 | 47 | def test__unique_name(self): 48 | ex = set() 49 | got = [] 50 | for n in ['u', 'u', 'u', 'u', 'v', 'v']: 51 | got.append(_unique_name(ex, n)) 52 | self.assertEqual(ex, {'u', 'v_2', 'v', 'u_2', 'u_3', 'u_4'}) 53 | self.assertEqual(got, ['u', 'u_2', 'u_3', 'u_4', 'v', 'v_2']) 54 | 55 | 56 | if __name__ == "__main__": 57 | unittest.main() 58 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_print_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=3s) 3 | """ 4 | 5 | import unittest 6 | import numpy 7 | from onnxruntime import OrtValue 8 | from onnxruntime.capi._pybind_state import ( # pylint: disable=E0611 9 | OrtValue as C_OrtValue) 10 | from pyquickhelper.pycode import ExtTestCase 11 | from onnxcustom.utils import str_ortvalue 12 | from onnxcustom.utils.onnxruntime_helper import get_ort_device 13 | 14 | 15 | class TestUtilsPrintHelper(ExtTestCase): 16 | 17 | def test_print_ortvalue(self): 18 | expected = ( 19 | "device=Cpu dtype=dtype('float32') shape=(1, 4) " 20 | "value=[0.0, 1.0, 4.0, 4.5]") 21 | value = numpy.array([[0, 1, 4, 4.5]], dtype=numpy.float32) 22 | dev = get_ort_device('cpu') 23 | ort = C_OrtValue.ortvalue_from_numpy(value, dev) 24 | text = str_ortvalue(ort) 25 | self.assertEqual(expected, text) 26 | text = str_ortvalue(ort) # pylint: disable=W0212 27 | self.assertEqual(expected, text) 28 | 29 | expected = ( 30 | "device=Cpu dtype=dtype('int64') shape=(100,) " 31 | "value=[0, 1, 2, 3, 4, '...', 95, 96, 97, 98, 99]") 32 | value = numpy.arange(100).astype(numpy.int64) 33 | ort = C_OrtValue.ortvalue_from_numpy(value, dev) 34 | text = str_ortvalue(ort) # pylint: disable=W0212 35 | self.assertEqual(expected, text) 36 | 37 | def test_print_py_ortvalue(self): 38 | expected = ( 39 | "device=Cpu dtype=dtype('float32') shape=(1, 4) " 40 | "value=[0.0, 1.0, 4.0, 4.5]") 41 | value = numpy.array([[0, 1, 4, 4.5]], dtype=numpy.float32) 42 | ort = OrtValue.ortvalue_from_numpy(value, 'cpu') 43 | text = str_ortvalue(ort) 44 | self.assertEqual(expected, text) 45 | text = str_ortvalue(ort) # pylint: disable=W0212 46 | self.assertEqual(expected, text) 47 | 48 | expected = ( 49 | "device=Cpu dtype=dtype('int64') shape=(100,) " 50 | "value=[0, 1, 2, 3, 4, '...', 95, 96, 97, 98, 99]") 51 | value = numpy.arange(100).astype(numpy.int64) 52 | ort = OrtValue.ortvalue_from_numpy(value, 'cpu') 53 | text = str_ortvalue(ort) # pylint: disable=W0212 54 | self.assertEqual(expected, text) 55 | 56 | 57 | if __name__ == "__main__": 58 | unittest.main() 59 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_utils_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=3s) 3 | """ 4 | 5 | import unittest 6 | import numpy 7 | from onnxcustom.utils import measure_time 8 | 9 | 10 | class TestMeasureTime(unittest.TestCase): 11 | 12 | def test_vector_count(self): 13 | def fct(): 14 | X = numpy.ones((1000, 5)) 15 | return X 16 | res = measure_time( 17 | "fct", context={"fct": fct}, div_by_number=False, number=100) 18 | self.assertIn("average", res) 19 | res = measure_time( 20 | "fct", context={"fct": fct}, div_by_number=True, number=100) 21 | self.assertIn("average", res) 22 | res = measure_time( 23 | "fct", context={"fct": fct}, div_by_number=True, number=1000) 24 | self.assertIn("average", res) 25 | 26 | 27 | if __name__ == "__main__": 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_utils_classes.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=3s) 3 | """ 4 | 5 | import unittest 6 | from onnxcustom.utils.imagenet_classes import ( 7 | class_names, get_class_names) 8 | 9 | 10 | class TestUtilsClasses(unittest.TestCase): 11 | 12 | def test_classes(self): 13 | cl = class_names 14 | self.assertIsInstance(cl, dict) 15 | self.assertEqual(len(cl), 1000) 16 | 17 | def test_get_classes(self): 18 | cl = get_class_names() 19 | self.assertIsInstance(cl, dict) 20 | self.assertEqual(len(cl), 1000) 21 | 22 | 23 | if __name__ == "__main__": 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /_unittests/ut_utils/test_utils_nvjson.py: -------------------------------------------------------------------------------- 1 | """ 2 | @brief test log(time=5s) 3 | """ 4 | import os 5 | import unittest 6 | from pyquickhelper.pycode import ExtTestCase, get_temp_folder 7 | from pyquickhelper.loghelper.buffered_flog import BufferedPrint 8 | from onnxcustom.utils.nvprof2json import ( 9 | convert_trace_to_json, json_to_dataframe, 10 | json_to_dataframe_streaming) 11 | 12 | 13 | class TestConvertTraceToJson(ExtTestCase): 14 | 15 | def test_convert_trace_to_json(self): 16 | """ 17 | This file was generated with the following command line: 18 | 19 | :: 20 | 21 | nvprof -o bench_ortmodule_nn_gpu.sql python plot_orttraining_linear_regression_gpu.py 22 | 23 | To get the profile which can be displayed by the nvidia profiler: 24 | 25 | :: 26 | 27 | nvprof -o bench_ortmodule_nn_gpu.nvvp python plot_orttraining_linear_regression_gpu.py 28 | """ 29 | temp = get_temp_folder(__file__, 'temp_convert_trace_to_json') 30 | data = os.path.join(temp, "..", "data", 31 | "bench_ortmodule_nn_gpu.sql.zip") 32 | output = os.path.join(temp, "bench_ortmodule_nn_gpu.json") 33 | tempf = os.path.join(temp, "bench_ortmodule_nn_gpu.sql") 34 | buf = BufferedPrint() 35 | convert_trace_to_json(data, output=output, temporary_file=tempf, 36 | verbose=1, fLOG=buf.fprint) 37 | self.assertIn("step 1 begin.", str(buf)) 38 | jst = convert_trace_to_json(data, temporary_file=tempf) 39 | self.assertExists(output) 40 | self.assertExists(tempf) 41 | df = json_to_dataframe(jst) 42 | df2 = json_to_dataframe(output) 43 | with open(output, "r", encoding="utf-8") as f: 44 | df3 = json_to_dataframe(f) 45 | self.assertEqual(df.shape, df2.shape) 46 | self.assertEqual(df.shape, df3.shape) 47 | 48 | self.assertRaise(lambda: json_to_dataframe_streaming(jst, chunksize=100), 49 | RuntimeError) 50 | dfs2 = json_to_dataframe_streaming(output, chunksize=100) 51 | with open(output, "r", encoding="utf-8") as f: 52 | dfs3 = json_to_dataframe_streaming(f, chunksize=100) 53 | shape3 = dfs3.shape 54 | shape2 = dfs2.shape 55 | self.assertEqual(shape2, shape3) 56 | cols = list(df.columns) 57 | self.assertEqual(cols, 58 | ['name', 'ph', 'cat', 'ts', 59 | 'dur', 'tid', 'pid', 'args', 'ts_sec']) 60 | self.assertEqual(set(df.ph), {'X'}) 61 | self.assertEqual(set(df.cat), {'cuda'}) 62 | 63 | 64 | if __name__ == "__main__": 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | image: 2 | - Visual Studio 2019 3 | environment: 4 | matrix: 5 | - PYTHON: "C:\\Python310-x64" 6 | PYTHON_VERSION: "3.10.x" 7 | PYTHON_ARCH: "64" 8 | init: 9 | - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%" 10 | 11 | install: 12 | - "%PYTHON%\\python -m pip install -r requirements.txt" 13 | - "%PYTHON%\\python -m pip install -r requirements-dev.txt" 14 | - "%PYTHON%\\python -m pip install pytest" 15 | - "%PYTHON%\\python -m pip install https://github.com/sdpython/onnxruntime/releases/download/v1.14.0/onnxruntime_training-1.14.92+cpu-cp310-cp310-win_amd64.whl" 16 | - "%PYTHON%\\python -m pip install https://github.com/sdpython/onnxruntime/releases/download/v1.14.0/aten_op_executor-1.13.0+cpu-cp310-cp310-win_amd64.whl" 17 | - "%PYTHON%\\python -m pip install https://github.com/sdpython/onnxruntime/releases/download/v1.14.0/torch_interop_utils-1.13.0+cpu-cp310-cp310-win_amd64.whl" 18 | 19 | build: off 20 | 21 | before_test: 22 | - "%PYTHON%\\python -u setup.py build_ext --inplace" 23 | 24 | test_script: 25 | - "%PYTHON%\\python -m pytest -v _unittests" 26 | - "%PYTHON%\\python -m flake8 onnxcustom --max-line-length=100 --ignore=E731,W504" 27 | - "%PYTHON%\\python -m flake8 _doc/examples --max-line-length=100 --ignore=E731,W504" 28 | 29 | after_test: 30 | - "%PYTHON%\\python setup.py bdist_wheel" 31 | - "%PYTHON%\\python -m onnxcustom check" 32 | 33 | artifacts: 34 | - path: dist 35 | name: onnxcustom 36 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: 'TestLinux' 3 | pool: 4 | vmImage: 'ubuntu-latest' 5 | strategy: 6 | matrix: 7 | Python310-Linux: 8 | python.version: '3.10' 9 | maxParallel: 3 10 | 11 | steps: 12 | - task: UsePythonVersion@0 13 | inputs: 14 | versionSpec: '$(python.version)' 15 | architecture: 'x64' 16 | - script: sudo apt-get update 17 | displayName: 'AptGet Update' 18 | - script: sudo apt-get install -y pandoc 19 | displayName: 'Install Pandoc' 20 | - script: sudo apt-get install -y inkscape 21 | displayName: 'Install Inkscape' 22 | - script: sudo apt-get install -y graphviz 23 | displayName: 'Install Graphviz' 24 | - script: python -m pip install --upgrade pip setuptools wheel 25 | displayName: 'Install tools' 26 | - script: pip install -r requirements.txt 27 | displayName: 'Install Requirements' 28 | - script: pip install -r requirements-dev.txt 29 | displayName: 'Install Requirements dev' 30 | # - script: pip install onnxruntime-training --extra-index-url https://download.onnxruntime.ai/onnxruntime_nightly_cpu.html 31 | # displayName: 'Install onnxruntime-training' 32 | - script: pip install onnxruntime-training 33 | displayName: 'Install onnxruntime-training' 34 | - script: | 35 | python -u setup.py build_ext --inplace 36 | displayName: 'Runs Unit Tests' 37 | - script: | 38 | python -m pytest _unittests -v -v 39 | displayName: 'Runs Unit Tests' 40 | - script: | 41 | python -u setup.py bdist_wheel 42 | displayName: 'Build Package' 43 | - script: | 44 | python -m onnxcustom check 45 | displayName: 'Check speed' 46 | - task: PublishPipelineArtifact@0 47 | inputs: 48 | artifactName: 'wheel-linux-$(python.version)' 49 | targetPath: 'dist' 50 | 51 | - job: 'TestMac' 52 | pool: 53 | vmImage: 'macOS-latest' 54 | strategy: 55 | matrix: 56 | Python39-Mac: 57 | python.version: '3.9' 58 | maxParallel: 3 59 | 60 | steps: 61 | - task: UsePythonVersion@0 62 | inputs: 63 | versionSpec: '$(python.version)' 64 | architecture: 'x64' 65 | - script: gcc --version 66 | displayName: 'gcc version' 67 | - script: | 68 | brew update 69 | displayName: 'brew update' 70 | - script: export 71 | displayName: 'export' 72 | - script: gcc --version 73 | displayName: 'gcc version' 74 | - script: brew install llvm 75 | displayName: 'install llvm' 76 | - script: brew install libomp 77 | displayName: 'Install omp' 78 | - script: brew install p7zip 79 | displayName: 'Install p7zip' 80 | - script: python -m pip install --upgrade pip setuptools wheel 81 | displayName: 'Install tools' 82 | - script: brew install pybind11 83 | displayName: 'Install pybind11' 84 | - script: pip install -r requirements.txt 85 | displayName: 'Install Requirements' 86 | - script: pip install -r requirements-dev.txt 87 | displayName: 'Install Requirements dev' 88 | - script: pip install onnxruntime 89 | displayName: 'Install onnxruntime' 90 | - script: | 91 | export MACOSX_DEPLOYMENT_TARGET=10.13 92 | python setup.py build_ext --inplace 93 | displayName: 'Build package' 94 | - script: | 95 | python -m pytest _unittests -v -v 96 | displayName: 'Runs Unit Tests' 97 | - script: | 98 | python -u setup.py bdist_wheel 99 | displayName: 'Build Package' 100 | - script: | 101 | python -m onnxcustom check 102 | displayName: 'Check speed' 103 | - task: PublishPipelineArtifact@0 104 | inputs: 105 | artifactName: 'wheel-mac-$(python.version)' 106 | targetPath: 'dist' 107 | 108 | -------------------------------------------------------------------------------- /build_script.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | if "%1"=="" goto default_value_python: 3 | set pythonexe="%1" 4 | %pythonexe% setup.py write_version 5 | goto custom_python: 6 | 7 | :default_value_python: 8 | set pythonexe="c:\Python395_x64\python.exe" 9 | if not exist %pythonexe% set pythonexe="c:\Python391_x64\python.exe" 10 | :custom_python: 11 | @echo [python] %pythonexe% 12 | %pythonexe% -u setup.py build_script 13 | if %errorlevel% neq 0 exit /b %errorlevel% -------------------------------------------------------------------------------- /onnxcustom/__init__.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # flake8: noqa: F401 3 | # pylint: disable=W0611,C0415 4 | """ 5 | @file 6 | @brief Experimentation with ONNX, examples. 7 | """ 8 | 9 | __version__ = "0.4.344" 10 | __author__ = "Xavier Dupré, ..." 11 | __max_supported_opset__ = 15 # Converters are tested up to this version. 12 | __max_supported_opsets__ = { 13 | '': __max_supported_opset__, 14 | 'ai.onnx.ml': 2} 15 | 16 | 17 | def check(verbose=1): 18 | """ 19 | Runs a couple of functions to check the module is working. 20 | 21 | :param verbose: 0 to hide the standout output 22 | :return: list of dictionaries, result of each test 23 | """ 24 | tests = [] 25 | try: 26 | import onnx 27 | import onnx.helper 28 | except ImportError as e: # pragma: no cover 29 | tests.append(dict(test='onnx', exc=e)) 30 | try: 31 | import onnxruntime 32 | from onnxruntime import InferenceSession 33 | except ImportError as e: # pragma: no cover 34 | tests.append(dict(test='onnxruntime', exc=e)) 35 | try: 36 | import onnxruntime.training 37 | from onnxruntime.training import TrainingSession 38 | except ImportError as e: # pragma: no cover 39 | tests.append(dict(test='onnxruntime_training', exc=e)) 40 | try: 41 | import skl2onnx 42 | from skl2onnx import to_onnx 43 | except ImportError as e: # pragma: no cover 44 | tests.append(dict(test='skl2onnx', exc=e)) 45 | return tests 46 | 47 | 48 | def get_max_opset(): 49 | """ 50 | Returns the highest available onnx opset version. 51 | """ 52 | from onnx.defs import onnx_opset_version 53 | return min(onnx_opset_version(), __max_supported_opset__) 54 | -------------------------------------------------------------------------------- /onnxcustom/__main__.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=C0415 2 | """ 3 | @file 4 | @brief Implements command line ``python -m onnxcustom ``. 5 | """ 6 | 7 | 8 | def main(args, fLOG=print): 9 | """ 10 | Implements ``python -m onnxcustom ``. 11 | 12 | :param args: command line arguments 13 | :param fLOG: logging function 14 | """ 15 | from pyquickhelper.cli import cli_main_helper 16 | try: 17 | from . import check 18 | from .cli.profiling import nvprof2json 19 | except ImportError: # pragma: no cover 20 | from onnxcustom import check 21 | from onnxcustom.cli.profiling import nvprof2json 22 | 23 | fcts = dict(nvprof2json=nvprof2json, check=check) 24 | return cli_main_helper(fcts, args=args, fLOG=fLOG) 25 | 26 | 27 | if __name__ == "__main__": 28 | import sys # pragma: no cover 29 | main(sys.argv[1:]) # pragma: no cover 30 | -------------------------------------------------------------------------------- /onnxcustom/cli/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Shortcuts to *cli*. 4 | """ 5 | -------------------------------------------------------------------------------- /onnxcustom/cli/profiling.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Command lines for profiling. 4 | """ 5 | from ..utils.nvprof2json import convert_trace_to_json 6 | 7 | 8 | def nvprof2json(filename, output="", temporary_file="", verbose=1, 9 | fLOG=print): 10 | """ 11 | Converts traces produced by :epkg:`nvprof` and saved with 12 | format :epkg:`sqlite3` (extension `.sql`). 13 | 14 | :param filename: filename 15 | :param output: output file, if left empty, the result is printed 16 | on the standard output 17 | :param temporary_file: if the file needs to be unzipped, 18 | this file will be created to be the unzipped file, 19 | it is not cleaned after the unzipping. 20 | :param verbose: verbosity 21 | :param fLOG: logging function 22 | :return: json (if output is None, the list of events otherwise) 23 | 24 | .. cmdref:: 25 | :title: Converts a profile stored by nvprof into json 26 | :cmd: -m onnxcustom nvprof2json --help 27 | 28 | The sqlite dump is generated with a command line similar to: 29 | 30 | :: 31 | 32 | nvprof -o gpu_profile.sql python plot_gpu_training.py 33 | 34 | The command produces a json file following the *Trace Event Format*. 35 | """ 36 | verbose = int(verbose) 37 | res = convert_trace_to_json(filename, output, verbose=verbose, 38 | temporary_file=temporary_file, fLOG=fLOG) 39 | if output is None: # pragma: no cover 40 | fLOG(res) 41 | -------------------------------------------------------------------------------- /onnxcustom/experiment/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | """ 3 | @file 4 | @brief Shortcuts to f8. 5 | """ 6 | -------------------------------------------------------------------------------- /onnxcustom/plotting/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | """ 3 | @file 4 | @brief Shortcuts to plotting. 5 | """ 6 | -------------------------------------------------------------------------------- /onnxcustom/plotting/plotting_onnx.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F401 2 | """ 3 | @file 4 | @brief Shortcuts to plotting. 5 | """ 6 | from mlprodict.plotting.plotting_onnx import plot_onnx 7 | 8 | 9 | def plot_onnxs(*onx, ax=None, dpi=300, temp_dot=None, temp_img=None, 10 | show=False, title=None): 11 | """ 12 | Plots one or several ONNX graph into a :epkg:`matplotlib` graph. 13 | 14 | :param onx: ONNX objects 15 | :param ax: existing axes 16 | :param dpi: resolution 17 | :param temp_dot: temporary file, 18 | if None, a file is created and removed 19 | :param temp_img: temporary image, 20 | if None, a file is created and removed 21 | :param show: calls `plt.show()` 22 | :param title: graph title 23 | :return: axes 24 | """ 25 | if len(onx) == 1: 26 | if ax is None: 27 | import matplotlib.pyplot as plt # pylint: disable=C0415 28 | ax = plt.gca() 29 | elif isinstance(ax, str) and ax == 'new': 30 | import matplotlib.pyplot as plt # pylint: disable=C0415 31 | _, ax = plt.subplots(1, 1) 32 | ax = plot_onnx(onx[0], ax=ax, dpi=dpi, temp_dot=temp_dot, 33 | temp_img=temp_img) 34 | if title is not None: 35 | ax.set_title(title) 36 | return ax 37 | elif len(onx) > 1 and isinstance(ax, str) and ax == 'new': 38 | ax = None 39 | 40 | if len(onx) == 0: 41 | raise ValueError( 42 | "Empty list of graph to plot.") 43 | 44 | if ax is None: 45 | import matplotlib.pyplot as plt # pylint: disable=C0415 46 | fig, ax = plt.subplots(1, len(onx)) 47 | else: 48 | fig = None 49 | if ax.shape[0] != len(onx): 50 | raise ValueError( 51 | f"ax must be an array of shape ({len(onx)}, ).") 52 | for i, ox in enumerate(onx): 53 | plot_onnx(ox, ax=ax[i], dpi=dpi, temp_dot=temp_dot, 54 | temp_img=temp_img) 55 | if title is None or isinstance(title, str): 56 | continue 57 | if i < len(title): 58 | ax[i].set_title(title[i]) 59 | if len(onx) > 1 and isinstance(title, str): 60 | if fig is None: 61 | raise ValueError( # pragma: no cover 62 | "Main title cannot be set if fig is undefined (title=%r, " 63 | "len(onx)=%d)" % (title, len(onx))) 64 | fig.suptitle(title) 65 | elif len(onx) == 1: 66 | if isinstance(title, list): 67 | title = title[0] 68 | ax.set_title(title) 69 | return ax 70 | -------------------------------------------------------------------------------- /onnxcustom/training/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Shortcuts to *training*. 4 | """ 5 | from .excs import ( # noqa 6 | ConvergenceError, ConvergenceWarning, 7 | EvaluationError, ProviderError) 8 | -------------------------------------------------------------------------------- /onnxcustom/training/excs.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Exceptions. 4 | """ 5 | 6 | 7 | class ConvergenceError(RuntimeError): 8 | """ 9 | Raised when a learning algorithm failed 10 | to converge. 11 | """ 12 | pass 13 | 14 | 15 | class ConvergenceWarning(UserWarning): 16 | """ 17 | Raised when a learning algorithm failed 18 | to converge. 19 | """ 20 | pass 21 | 22 | 23 | class EvaluationError(RuntimeError): 24 | """ 25 | Raised when an evaluation failed. 26 | """ 27 | pass 28 | 29 | 30 | class ProviderError(RuntimeError): 31 | """ 32 | Raised when an input is not on the expected device (CPU, GPU). 33 | """ 34 | pass 35 | -------------------------------------------------------------------------------- /onnxcustom/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Shortcuts to *utils*. 4 | """ 5 | 6 | from .benchmark import measure_time # noqa 7 | from .print_helper import str_ortvalue # noqa 8 | -------------------------------------------------------------------------------- /onnxcustom/utils/benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Tools to help benchmarking. 4 | """ 5 | from timeit import Timer 6 | import numpy 7 | 8 | 9 | def measure_time(stmt, context=None, repeat=10, number=50, div_by_number=False): 10 | """ 11 | Measures a statement and returns the results as a dictionary. 12 | 13 | :param stmt: string 14 | :param context: variable to know in a dictionary 15 | :param repeat: average over *repeat* experiment 16 | :param number: number of executions in one row 17 | :param div_by_number: divide by the number of executions 18 | :return: dictionary 19 | 20 | .. exref:: 21 | :title: Measure the processing time of a function 22 | 23 | .. runpython:: 24 | :showcode: 25 | 26 | from onnxcustom.utils import measure_time 27 | from math import cos 28 | 29 | res = measure_time("cos(x)", context=dict(cos=cos, x=5.)) 30 | print(res) 31 | 32 | See `Timer.repeat `_ 34 | for a better understanding of parameter *repeat* and *number*. 35 | The function returns a duration corresponding to 36 | *number* times the execution of the main statement. 37 | """ 38 | tim = Timer(stmt, globals=context or {}) 39 | res = numpy.array(tim.repeat(repeat=repeat, number=number)) 40 | if div_by_number: 41 | res /= number 42 | mean = numpy.mean(res) 43 | dev = numpy.mean(res ** 2) 44 | dev = (dev - mean**2) ** 0.5 45 | mes = dict(average=mean, deviation=dev, min_exec=numpy.min(res), 46 | max_exec=numpy.max(res), repeat=repeat, number=number) 47 | return mes 48 | -------------------------------------------------------------------------------- /onnxcustom/utils/doc_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Helpers to improve documentation rendering. 4 | """ 5 | import re 6 | 7 | 8 | def fix_link_operator_md(markdown): 9 | """ 10 | The redering of file `Operator.md `_ breaks links. This function 12 | restores some of them. 13 | 14 | :param markdown: a string or a filename 15 | :return: modified content 16 | """ 17 | if len(markdown) < 5000 and markdown.endswith('.md'): 18 | with open(markdown, 'r', encoding='utf-8') as f: 19 | content = f.read() 20 | else: 21 | content = markdown # pragma: no cover 22 | 23 | reg = re.compile( 24 | "([|][.A-Za-z]+)\\\">(?P=name)[|])") 25 | pattern = "|[{1}](#a-name-{0}-a-a-name-{0}-{0}-a)|" 26 | 27 | lines = content.split('\n') 28 | new_lines = [] 29 | for line in lines: 30 | find = reg.search(line) 31 | if find: 32 | gr = find.groups() 33 | exp = gr[0] 34 | op = gr[1] 35 | rep = pattern.format(op.lower(), op).replace(".", "-") 36 | line = line.replace(exp, rep) 37 | new_lines.append(line) 38 | return "\n".join(new_lines) 39 | -------------------------------------------------------------------------------- /onnxcustom/utils/print_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file 3 | @brief Helpers to display internal structures. 4 | """ 5 | 6 | 7 | def str_ortvalue(ov): 8 | """ 9 | Displays the content of an :epkg:`C_OrtValue`. 10 | 11 | :param ov: :epkg:`OrtValue` or :epkg:`C_OrtValue` 12 | :return: str 13 | """ 14 | if hasattr(ov, '_ortvalue'): 15 | return str_ortvalue(ov._ortvalue) 16 | device = ov.device_name() 17 | value = ov.numpy() 18 | values = value.ravel().tolist() 19 | if len(values) > 10: 20 | values = values[:5] + ["..."] + values[-5:] 21 | return "device=%s dtype=%r shape=%r value=%r" % ( 22 | device, value.dtype, value.shape, values) 23 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | autopep8 2 | category_encoders 3 | cerberus 4 | codecov 5 | coverage 6 | cpyquickhelper 7 | cxxfilt 8 | flake8 9 | h5py 10 | joblib 11 | jupyter_sphinx 12 | jyquickhelper 13 | lightgbm 14 | loky 15 | matplotlib 16 | mlinsights 17 | mlprodict>=0.8.1697 18 | myst-parser 19 | nbsphinx 20 | onnxconverter-common 21 | onnxmltools 22 | openpyxl 23 | pillow 24 | py-spy 25 | pandas 26 | pandas_streaming>=0.3 27 | pydata-sphinx-theme 28 | pydot 29 | pyinstrument 30 | pylint 31 | pyquickhelper>=1.10 32 | pytest 33 | pytest-cov 34 | scikit-learn>=1.0 35 | skl2onnx>=1.10.3 36 | sphinx 37 | sphinxcontrib-blockdiag 38 | sphinxcontrib.imagesvg 39 | sphinx-gallery 40 | sympy 41 | tabulate 42 | tqdm 43 | xgboost 44 | wheel 45 | 46 | # this dependency is needed for onnxruntime-training 47 | torch 48 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fire 2 | numpy 3 | onnx>=1.10.2 4 | protobuf<4 5 | scipy 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | import os 4 | import warnings 5 | from setuptools import setup, Extension, find_packages 6 | from pyquicksetup import read_version, read_readme, default_cmdclass 7 | 8 | ######### 9 | # settings 10 | ######### 11 | 12 | project_var_name = "onnxcustom" 13 | versionPython = f"{sys.version_info.major}.{sys.version_info.minor}" 14 | path = "Lib/site-packages/" + project_var_name 15 | readme = 'README.rst' 16 | history = "HISTORY.rst" 17 | requirements = None 18 | 19 | KEYWORDS = [project_var_name, 'Xavier Dupré', 'onnx', 'machine learning', 20 | 'training', 'onnxruntime'] 21 | DESCRIPTION = """Extends scikit-learn with a couple of new models, transformers, metrics, plotting.""" 22 | CLASSIFIERS = [ 23 | 'Programming Language :: Python :: 3', 24 | 'Intended Audience :: Developers', 25 | 'Topic :: Scientific/Engineering', 26 | 'Topic :: Education', 27 | 'License :: OSI Approved :: MIT License', 28 | 'Development Status :: 5 - Production/Stable' 29 | ] 30 | 31 | 32 | ####### 33 | # data 34 | ####### 35 | 36 | packages = find_packages() 37 | package_dir = {k: os.path.join('.', k.replace(".", "/")) for k in packages} 38 | package_data = { 39 | project_var_name + ".data": ["*.csv"], 40 | } 41 | 42 | 43 | # setup 44 | 45 | setup( 46 | name=project_var_name, 47 | version=read_version(__file__, project_var_name), 48 | author='Xavier Dupré', 49 | author_email='xavier.dupre@gmail.com', 50 | license="MIT", 51 | url=f"http://www.xavierdupre.fr/app/{project_var_name}/helpsphinx/index.html", 52 | download_url=f"https://github.com/sdpython/{project_var_name}/", 53 | description=DESCRIPTION, 54 | long_description=read_readme(__file__), 55 | cmdclass=default_cmdclass(), 56 | keywords=KEYWORDS, 57 | classifiers=CLASSIFIERS, 58 | packages=packages, 59 | package_dir=package_dir, 60 | package_data=package_data, 61 | setup_requires=["pyquicksetup"], 62 | install_requires=["fire", "numpy", "onnx>=1.10.1", "scipy"], 63 | extras_require={ 64 | 'all': ["fire", "numpy", "onnx>=1.10.1", 65 | "scipy", "pandas_streaming>=0.3", "cxxfilt"] 66 | } 67 | ) 68 | --------------------------------------------------------------------------------