├── .github ├── ISSUE_TEMPLATE │ ├── bug.md │ └── question.md ├── pull_request_template.md └── workflows │ └── validate.yml ├── .gitignore ├── LICENSE ├── PyTorchCustomOperator ├── README.md ├── ort_custom_op │ ├── CMakeLists.txt │ ├── custom_op.cc │ ├── custom_op.h │ └── custom_op_test.cc └── pytorch_custom_op │ ├── custom_group_norm.cpp │ ├── export_custom_op.py │ └── setup.py ├── README.md ├── examples └── CoreML │ └── ONNXLive │ ├── Models │ ├── candy.mlmodel │ ├── mosaic.mlmodel │ ├── rain_princess.mlmodel │ └── udnie.mlmodel │ ├── ONNXLive.xcodeproj │ ├── project.pbxproj │ ├── project.xcworkspace │ │ ├── contents.xcworkspacedata │ │ └── xcshareddata │ │ │ └── IDEWorkspaceChecks.plist │ └── xcshareddata │ │ └── xcschemes │ │ └── ONNXLive.xcscheme │ ├── README.md │ ├── Resources │ ├── Assets.xcassets │ │ └── AppIcon.appiconset │ │ │ ├── Contents.json │ │ │ ├── Icon.png │ │ │ ├── icon_20pt.png │ │ │ ├── icon_20pt@2x.png │ │ │ ├── icon_20pt@3x.png │ │ │ ├── icon_29pt-1.png │ │ │ ├── icon_29pt.png │ │ │ ├── icon_29pt@2x-1.png │ │ │ ├── icon_29pt@2x.png │ │ │ ├── icon_29pt@3x.png │ │ │ ├── icon_40pt-1.png │ │ │ ├── icon_40pt.png │ │ │ ├── icon_40pt@2x-1.png │ │ │ ├── icon_40pt@2x.png │ │ │ ├── icon_40pt@3x.png │ │ │ ├── icon_60pt@2x.png │ │ │ ├── icon_60pt@3x.png │ │ │ ├── icon_76pt.png │ │ │ ├── icon_76pt@2x.png │ │ │ └── icon_83.5@2x.png │ ├── Base.lproj │ │ └── LaunchScreen.storyboard │ └── Info.plist │ └── Source │ ├── AppDelegate.swift │ ├── CameraViewController.swift │ ├── Model.swift │ └── ModelExecutor.swift ├── setup.cfg ├── tutorials ├── BatchProcessingSequenceMap.ipynb ├── Caffe2OnnxExport.ipynb ├── ChainerOnnxExport.ipynb ├── CntkOnnxExport.ipynb ├── CorrectnessVerificationAndPerformanceComparison.ipynb ├── ExportModelFromPyTorchForWinML.md ├── ONNXMXNetServer.ipynb ├── OnnxCaffe2Import.ipynb ├── OnnxCntkImport.ipynb ├── OnnxCoremlImport.ipynb ├── OnnxMenohHaskellImport.ipynb ├── OnnxMxnetImport.ipynb ├── OnnxRuntimeServerSSDModel.ipynb ├── OnnxTensorflowExport.ipynb ├── OnnxTensorflowImport.ipynb ├── PreprocessingResnet50.ipynb ├── PytorchAddExportSupport.md ├── PytorchCaffe2MobileSqueezeNet.ipynb ├── PytorchCaffe2SuperResolution.ipynb ├── PytorchOnnxExport.ipynb ├── PytorchTensorflowMnist.ipynb ├── TensorflowToOnnx-1.ipynb ├── TensorflowToOnnx-2.ipynb ├── VersionConversion.md ├── VisualizingAModel.md ├── XAI4ONNX_dianna_overview.ipynb ├── assets │ ├── batchnorm.png │ ├── blueangels.jpg │ ├── cat.jpg │ ├── coco_classes.txt │ ├── dog.jpg │ ├── image.npz │ ├── onnx-ml.proto │ ├── onnx_ml_pb2.py │ ├── predict.proto │ ├── predict_pb2.py │ ├── squeezenet.onnx │ ├── squeezenet.png │ ├── super-res-input.jpg │ ├── super_resolution.onnx │ ├── tensorflow_to_onnx_example.py │ ├── tf-train-mnist.py │ ├── three.png │ └── two.png └── output │ └── README.md └── workflow_scripts ├── url_validator.py ├── validate_all_urls.py └── validate_changed_urls.py /.github/ISSUE_TEMPLATE/bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help improve the ONNX. 4 | title: '' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | # Bug Report 10 | If the model conversion is failing for a **tutorial** in this repo, report the bug here. However, if the bug is related to general model conversion, please go to the appropriate converter repo. 11 | 12 | ### Describe the bug 13 | Please describe the bug clearly and concisely. 14 | 15 | ### System information 16 | - OS Platform and Distribution (*e.g. Linux Ubuntu 16.04*): 17 | - ONNX version (*e.g. 1.7*): 18 | - Python version: 19 | - GCC/Compiler version (if compiling from source): 20 | - CMake version: 21 | - Protobuf version: 22 | - Visual Studio version (if applicable): 23 | 24 | 25 | ### Reproduction instructions 26 | - Describe the code to reproduce the behavior. 27 | ``` 28 | import onnx 29 | model = onnx.load('model.onnx') 30 | ... 31 | ``` 32 | - Attach the ONNX model to the issue (where applicable) 33 | 34 | ### Expected behavior 35 | A clear and concise description of what you expected to happen. 36 | 37 | ### Notes 38 | 39 | Any additional information 40 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Ask a question about the ONNX. 4 | title: '' 5 | labels: 'question' 6 | assignees: '' 7 | 8 | 9 | 10 | --- 11 | # Ask a Question 12 | 13 | ### Question 14 | Explain your question here. 15 | 16 | ### Further information 17 | - Relevant Area (*e.g. model usage, best practices, shape_inference, version_converter, training, test*): 18 | 19 | - Is this issue related to a specific model? 20 | **Model name** (*e.g. mnist*): 21 | **Model opset** (*e.g. 7*): 22 | Please upload the model and provide the link if possible. 23 | 24 | ### Notes 25 | Any additional information, code snippets. 26 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | **Description** 2 | - Describe your changes. 3 | 4 | **Motivation and Context** 5 | - Why is this change required? What problem does it solve? 6 | - If it fixes an open issue, please link to the issue here. -------------------------------------------------------------------------------- /.github/workflows/validate.yml: -------------------------------------------------------------------------------- 1 | name: validate 2 | 3 | on: 4 | schedule: 5 | - cron: '00 00 * * MON' 6 | push: 7 | branches: [main] 8 | pull_request: 9 | branches: [main] 10 | workflow_dispatch: 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: ['3.9',] 18 | architecture: ['x64'] 19 | steps: 20 | - name: Checkout onnx/tutorials 21 | uses: actions/checkout@v2 22 | 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | architecture: ${{ matrix.architecture }} 28 | 29 | - name: flake8 check 30 | run: | 31 | python -m pip install -q --upgrade pip 32 | python -m pip install -q flake8 33 | flake8 34 | 35 | - name: Validate changed URLs from this PR 36 | run: | 37 | git fetch --all 38 | python workflow_scripts/validate_changed_urls.py 39 | 40 | - name: Weekly validate all URLs under onnx/tutorials 41 | if: (github.event_name == 'schedule') # Only triggered by weekly event 42 | run: | 43 | python workflow_scripts/validate_all_urls.py -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | /tutorials/output/* 3 | /tutorials/output/*/ 4 | .DS_Store 5 | .idea/ 6 | !/tutorials/output/README.md 7 | *.pyc -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | https://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | https://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/ort_custom_op/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project (customop) 3 | add_definitions(-std=c++11) 4 | 5 | 6 | set(TEST_SOURCE custom_op_test.cc) 7 | set(HEADER custom_op.h) 8 | set(SOURCE custom_op.h) 9 | add_executable(customop ${SOURCE} ${HEADER} ${TEST_SOURCE}) 10 | 11 | #Include path to header files for Custom Op 12 | include_directories() 13 | include_directories() 14 | 15 | #Include path to header files for Custom Op Test 16 | include_directories() 17 | 18 | #Linking dependencies for Custom Op 19 | find_library(ONNXRUNTIME_LIBRARY onnxruntime HINTS ) 20 | target_link_libraries(customop PUBLIC ${ONNXRUNTIME_LIBRARY}) 21 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/ort_custom_op/custom_op.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: Apache-2.0 3 | */ 4 | 5 | #include 6 | #include "Eigen/Dense" 7 | #include "onnxruntime_cxx_api.h" 8 | 9 | template 10 | using ConstEigenVectorArrayMap = Eigen::Map>; 11 | template 12 | using EigenVectorArrayMap = Eigen::Map>; 13 | 14 | template 15 | void GroupNormKernel::Compute(OrtKernelContext* context) { 16 | // Setup inputs 17 | const OrtValue* input_X = ort_.KernelContext_GetInput(context, 0); 18 | const T* X_data = reinterpret_cast(ort_.GetTensorData(input_X)); 19 | const OrtValue* input_num_groups = ort_.KernelContext_GetInput(context, 1); 20 | const T* num_groups = reinterpret_cast(ort_.GetTensorData(input_num_groups)); 21 | const OrtValue* input_scale = ort_.KernelContext_GetInput(context, 2); 22 | const T* scale_data = reinterpret_cast(ort_.GetTensorData(input_scale)); 23 | const OrtValue* input_B = ort_.KernelContext_GetInput(context, 3); 24 | const T* B_data = reinterpret_cast(ort_.GetTensorData(input_B)); 25 | 26 | // Setup output 27 | OrtTensorDimensions dimensions(ort_, input_X); 28 | OrtValue* output = ort_.KernelContext_GetOutput(context, 0, dimensions.data(), dimensions.size()); 29 | float* out = ort_.GetTensorMutableData(output); 30 | const int64_t N = dimensions[0]; 31 | const int64_t C = dimensions[1] / num_groups[0]; // assume [N C*num_groups H W] per the spec 32 | 33 | OrtTensorTypeAndShapeInfo* output_info = ort_.GetTensorTypeAndShape(output); 34 | ort_.ReleaseTensorTypeAndShapeInfo(output_info); 35 | 36 | // Do computation 37 | int64_t sample_size = 1; 38 | for (size_t i = 2; i < dimensions.size(); ++i) { 39 | sample_size *= dimensions[i]; 40 | } 41 | sample_size *= C; 42 | 43 | for (auto i = 0; i < N * num_groups[0]; ++i) { 44 | ConstEigenVectorArrayMap Xi(X_data + sample_size * i, sample_size); 45 | const float Xi_mean = Xi.mean(); 46 | const float squared_norm = (Xi - Xi_mean).matrix().squaredNorm(); 47 | const float inv_stdev = 1.0f / std::sqrt(squared_norm / sample_size + epsilon_); 48 | EigenVectorArrayMap Yi(out + sample_size * i, sample_size); 49 | const float channel_scale = inv_stdev * scale_data[i % (C * int(num_groups[0]))]; 50 | const float channel_shift = B_data[i % (C * int(num_groups[0]))] - Xi_mean * channel_scale; 51 | Yi = Xi * channel_scale + channel_shift; 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/ort_custom_op/custom_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: Apache-2.0 3 | */ 4 | 5 | #include 6 | #include "onnxruntime_cxx_api.h" 7 | 8 | struct Input { 9 | const char* name; 10 | std::vector dims; 11 | std::vector values; 12 | }; 13 | 14 | struct OrtTensorDimensions : std::vector { 15 | OrtTensorDimensions(Ort::CustomOpApi ort, const OrtValue* value) { 16 | OrtTensorTypeAndShapeInfo* info = ort.GetTensorTypeAndShape(value); 17 | std::vector::operator=(ort.GetTensorShape(info)); 18 | ort.ReleaseTensorTypeAndShapeInfo(info); 19 | } 20 | }; 21 | 22 | template 23 | struct GroupNormKernel { 24 | private: 25 | float epsilon_; 26 | Ort::CustomOpApi ort_; 27 | 28 | public: 29 | GroupNormKernel(Ort::CustomOpApi ort, const OrtKernelInfo* info) : ort_(ort) { 30 | epsilon_ = ort_.KernelInfoGetAttribute(info, "epsilon"); 31 | } 32 | 33 | void Compute(OrtKernelContext* context); 34 | }; 35 | 36 | 37 | struct GroupNormCustomOp : Ort::CustomOpBase> { 38 | void* CreateKernel(Ort::CustomOpApi api, const OrtKernelInfo* info) { return new GroupNormKernel(api, info); }; 39 | const char* GetName() const { return "testgroupnorm"; }; 40 | 41 | size_t GetInputTypeCount() const { return 4; }; 42 | ONNXTensorElementDataType GetInputType(size_t /*index*/) const { return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; }; 43 | 44 | size_t GetOutputTypeCount() const { return 1; }; 45 | ONNXTensorElementDataType GetOutputType(size_t /*index*/) const { return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT; }; 46 | }; 47 | 48 | #include "custom_op.cc" 49 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/ort_custom_op/custom_op_test.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: Apache-2.0 3 | */ 4 | 5 | #include 6 | #include "custom_op.h" 7 | #include "onnxruntime_cxx_api.h" 8 | 9 | typedef const char* PATH_TYPE; 10 | #define TSTR(X) (X) 11 | static constexpr PATH_TYPE MODEL_URI = TSTR("../../pytorch_custom_op/model.onnx"); 12 | 13 | template 14 | bool TestInference(Ort::Env& env, T model_uri, 15 | const std::vector& inputs, 16 | const char* output_name, 17 | const std::vector& expected_dims_y, 18 | const std::vector& expected_values_y, 19 | OrtCustomOpDomain* custom_op_domain_ptr) { 20 | Ort::SessionOptions session_options; 21 | std::cout << "Running simple inference with default provider" << std::endl; 22 | 23 | if (custom_op_domain_ptr) { 24 | session_options.Add(custom_op_domain_ptr); 25 | } 26 | 27 | Ort::Session session(env, model_uri, session_options); 28 | 29 | auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU); 30 | std::vector input_tensors; 31 | std::vector input_names; 32 | 33 | for (size_t i = 0; i < inputs.size(); i++) { 34 | input_names.emplace_back(inputs[i].name); 35 | input_tensors.emplace_back(Ort::Value::CreateTensor(memory_info, const_cast(inputs[i].values.data()), inputs[i].values.size(), inputs[i].dims.data(), inputs[i].dims.size())); 36 | } 37 | 38 | std::vector ort_outputs; 39 | ort_outputs = session.Run(Ort::RunOptions{nullptr}, input_names.data(), input_tensors.data(), input_tensors.size(), &output_name, 1); 40 | 41 | 42 | Ort::Value output_tensor{nullptr}; 43 | output_tensor = Ort::Value::CreateTensor(memory_info, const_cast(expected_values_y.data()), expected_values_y.size(), expected_dims_y.data(), expected_dims_y.size()); 44 | assert(ort_outputs.size() == 1); 45 | 46 | auto type_info = output_tensor.GetTensorTypeAndShapeInfo(); 47 | assert(type_info.GetShape() == expected_dims_y); 48 | size_t total_len = type_info.GetElementCount(); 49 | assert(expected_values_y.size() == total_len); 50 | 51 | float* f = output_tensor.GetTensorMutableData(); 52 | for (size_t i = 0; i != total_len; ++i) { 53 | assert(expected_values_y[i] == f[i]); 54 | } 55 | 56 | return true; 57 | 58 | } 59 | 60 | int main(int argc, char** argv) { 61 | 62 | Ort::Env env_= Ort::Env(ORT_LOGGING_LEVEL_INFO, "Default"); 63 | 64 | std::vector inputs(4); 65 | auto input = inputs.begin(); 66 | input->name = "X"; 67 | input->dims = {3, 2, 1, 2}; 68 | input->values = { 1.5410f, -0.2934f, -2.1788f, 0.5684f, -1.0845f, -1.3986f , 0.4033f, 0.8380f, -0.7193f, -0.4033f ,-0.5966f, 0.1820f}; 69 | 70 | input = std::next(input, 1); 71 | input->name = "num_groups"; 72 | input->dims = {1}; 73 | input->values = {2.f}; 74 | 75 | input = std::next(input, 1); 76 | input->name = "scale"; 77 | input->dims = {2}; 78 | input->values = {2.0f, 1.0f}; 79 | 80 | input = std::next(input, 1); 81 | input->name = "bias"; 82 | input->dims = {2}; 83 | input->values = {1.f, 0.f}; 84 | 85 | // prepare expected inputs and outputs 86 | std::vector expected_dims_y = {3, 2, 1, 2}; 87 | std::vector expected_values_y = { 3.0000f, -1.0000f, -1.0000f, 1.0000f, 2.9996f, -0.9996f, -0.9999f, 0.9999f, -0.9996f, 2.9996f, -1.0000f, 1.0000f}; 88 | 89 | GroupNormCustomOp custom_op; 90 | Ort::CustomOpDomain custom_op_domain("mydomain"); 91 | custom_op_domain.Add(&custom_op); 92 | 93 | return TestInference(env_, MODEL_URI, inputs, "Y", expected_dims_y, expected_values_y, custom_op_domain); 94 | } 95 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/pytorch_custom_op/custom_group_norm.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: Apache-2.0 3 | */ 4 | 5 | #include 6 | #include "Eigen/Dense" 7 | 8 | using ConstEigenVectorArrayMap = Eigen::Map>; 9 | using EigenVectorArrayMap = Eigen::Map>; 10 | 11 | torch::Tensor custom_group_norm(torch::Tensor X, torch::Tensor num_groups, torch::Tensor scale, torch::Tensor bias, double eps) { 12 | 13 | float* X_data = X.data(); 14 | float* scale_data = scale.data(); 15 | float* bias_data = bias.data(); 16 | int num_groups_i = int(num_groups.data()[0]); 17 | torch::Tensor output = torch::zeros(X.sizes()); 18 | float* out = output.data(); 19 | const int64_t N = X.size(0); 20 | const int64_t C = X.size(1) / num_groups_i; // assume [N C*num_groups H W] per the spec 21 | 22 | // Do computation 23 | int64_t sample_size = 1; 24 | for (auto i = 2; i < X.dim(); ++i) { 25 | sample_size *= X.size(i); 26 | } 27 | sample_size *= C; 28 | 29 | for (auto i = 0; i < N * num_groups_i; ++i) { 30 | ConstEigenVectorArrayMap Xi(X_data + sample_size * i, sample_size); 31 | const float Xi_mean = Xi.mean(); 32 | const float squared_norm = (Xi - Xi_mean).matrix().squaredNorm(); 33 | const float inv_stdev = 1.f / std::sqrt(squared_norm / sample_size + eps); 34 | EigenVectorArrayMap Yi(out + sample_size * i, sample_size); 35 | const float channel_scale = inv_stdev * scale_data[i % (C * num_groups_i)]; 36 | const float channel_shift = bias_data[i % (C * num_groups_i)] - Xi_mean * channel_scale; 37 | Yi = Xi * channel_scale + channel_shift; 38 | } 39 | 40 | return output.clone(); 41 | } 42 | 43 | static auto registry = 44 | torch::RegisterOperators("mynamespace::custom_group_norm", &custom_group_norm); 45 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/pytorch_custom_op/export_custom_op.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | import torch 4 | 5 | 6 | def register_custom_op(): 7 | def my_group_norm(g, input, num_groups, scale, bias, eps): 8 | return g.op("mydomain::testgroupnorm", input, num_groups, scale, bias, epsilon_f=0.) 9 | 10 | from torch.onnx import register_custom_op_symbolic 11 | 12 | register_custom_op_symbolic("mynamespace::custom_group_norm", my_group_norm, 9) 13 | 14 | 15 | def export_custom_op(): 16 | class CustomModel(torch.nn.Module): 17 | def forward(self, x, num_groups, scale, bias): 18 | return torch.ops.mynamespace.custom_group_norm(x, num_groups, scale, bias, 0.) 19 | 20 | X = torch.randn(3, 2, 1, 2) 21 | num_groups = torch.tensor([2.]) 22 | scale = torch.tensor([1., 1.]) 23 | bias = torch.tensor([0., 0.]) 24 | inputs = (X, num_groups, scale, bias) 25 | 26 | f = './model.onnx' 27 | torch.onnx.export(CustomModel(), inputs, f, 28 | opset_version=9, 29 | example_outputs=None, 30 | input_names=["X", "num_groups", "scale", "bias"], output_names=["Y"], 31 | custom_opsets={"mydomain": 1}) 32 | 33 | 34 | torch.ops.load_library( 35 | "build/lib.linux-x86_64-3.7/custom_group_norm.cpython-37m-x86_64-linux-gnu.so") 36 | register_custom_op() 37 | export_custom_op() 38 | -------------------------------------------------------------------------------- /PyTorchCustomOperator/pytorch_custom_op/setup.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | from setuptools import setup 4 | from torch.utils import cpp_extension 5 | 6 | setup(name='custom_group_norm', 7 | ext_modules=[cpp_extension.CppExtension('custom_group_norm', ['custom_group_norm.cpp'], 8 | include_dirs=[''])], 9 | license='Apache License v2.0', 10 | cmdclass={'build_ext': cpp_extension.BuildExtension}) 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # [ONNX](https://github.com/onnx/onnx) Tutorials 4 | 5 | [Open Neural Network Exchange (ONNX)](https://onnx.ai/) is an open standard format for representing machine learning models. ONNX is supported by [a community of partners](https://onnx.ai/supported-tools) who have implemented it in many frameworks and tools. 6 | 7 | 8 | ## Getting ONNX models 9 | 10 | * Pre-trained models (validated): Many pre-trained ONNX models are provided for common scenarios in the [ONNX Model Zoo](https://github.com/onnx/models/tree/main/validated) 11 | * Pre-trained models (non-validated): Many pre-trained ONNX models are provided for common scenarios in the [ONNX Model Zoo](https://github.com/onnx/models). 12 | * Services: Customized ONNX models are generated for your data by cloud based services (see below) 13 | * Convert models from various frameworks (see below) 14 | 15 | ### Services 16 | Below is a list of services that can output ONNX models customized for your data. 17 | * [Azure Custom Vision service](https://docs.microsoft.com/en-us/azure/cognitive-services/Custom-Vision-Service/custom-vision-onnx-windows-ml) 18 | * [Azure Machine Learning automated ML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml#use-with-onnx-in-c-apps) 19 | * [Lobe desktop app](https://lobe.ai) 20 | 21 | ### Converting to ONNX format 22 | | Framework / Tool | Installation | Tutorial | 23 | | --- | --- | --- | 24 | | [Caffe](https://github.com/BVLC/caffe) | [apple/coremltools](https://github.com/apple/coremltools) and [onnx/onnxmltools](https://github.com/onnx/onnxmltools) | [Example](https://github.com/onnx/onnx-docker/blob/master/onnx-ecosystem/converter_scripts/caffe_coreml_onnx.ipynb) | 25 | | [Caffe2](https://caffe2.ai) | [part of caffe2 package](https://github.com/pytorch/pytorch/tree/master/caffe2/python/onnx) | [Example](tutorials/Caffe2OnnxExport.ipynb) | 26 | | [Chainer](https://chainer.org/) | [chainer/onnx-chainer](https://github.com/chainer/onnx-chainer) | [Example](tutorials/ChainerOnnxExport.ipynb) | 27 | | [Cognitive Toolkit (CNTK)](https://learn.microsoft.com/en-us/cognitive-toolkit//) | [built-in](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine) | [Example](tutorials/CntkOnnxExport.ipynb) | 28 | | [CoreML (Apple)](https://developer.apple.com/documentation/coreml) | [onnx/onnxmltools](https://github.com/onnx/onnxmltools) | [Example](https://github.com/onnx/onnx-docker/blob/master/onnx-ecosystem/converter_scripts/coreml_onnx.ipynb) | 29 | | [Keras](https://github.com/keras-team/keras) | [onnx/tensorflow-onnx](https://github.com/onnx/tensorflow-onnx) | [Example](https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/keras-resnet50.ipynb) | n/a | 30 | | [LibSVM](https://github.com/cjlin1/libsvm) | [onnx/onnxmltools](https://github.com/onnx/onnxmltools) | [Example](https://github.com/onnx/onnx-docker/blob/master/onnx-ecosystem/converter_scripts/libsvm_onnx.ipynb) | n/a | 31 | | [LightGBM](https://github.com/Microsoft/LightGBM) | [onnx/onnxmltools](https://github.com/onnx/onnxmltools) | [Example](https://github.com/onnx/onnx-docker/blob/master/onnx-ecosystem/converter_scripts/lightgbm_onnx.ipynb) | n/a | 32 | | [MATLAB](https://www.mathworks.com/) | [Deep Learning Toolbox](https://www.mathworks.com/matlabcentral/fileexchange/67296) | [Example](https://www.mathworks.com/help/deeplearning/ref/exportonnxnetwork.html) | 33 | | [ML.NET](https://github.com/dotnet/machinelearning/) | [built-in](https://www.nuget.org/packages/Microsoft.ML/) | [Example](https://github.com/dotnet/machinelearning/blob/master/test/Microsoft.ML.Tests/OnnxConversionTest.cs) | 34 | | [MXNet (Apache)](https://mxnet.incubator.apache.org/) | part of mxnet package [docs](https://mxnet.incubator.apache.org/api/python/contrib/onnx.html) [github](https://github.com/apache/incubator-mxnet/tree/master/python/mxnet/contrib/onnx) | [Example](tutorials/MXNetONNXExport.ipynb) | 35 | | [PyTorch](https://pytorch.org/) | [part of pytorch package](https://pytorch.org/docs/master/onnx.html) | [Example1](https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html), [Example2](tutorials/PytorchOnnxExport.ipynb), [export for Windows ML](tutorials/ExportModelFromPyTorchForWinML.md), [Extending support](tutorials/PytorchAddExportSupport.md) | 36 | | [SciKit-Learn](https://scikit-learn.org/) | [onnx/sklearn-onnx](https://github.com/onnx/sklearn-onnx) | [Example](https://onnx.ai/sklearn-onnx/index.html) | n/a | 37 | | [SINGA (Apache)](https://singa.apache.org/) - [Github](https://github.com/apache/incubator-singa/blob/master/python/singa/sonnx.py) (experimental) | [built-in](https://singa.apache.org/docs/installation/) | [Example](https://github.com/apache/incubator-singa/tree/master/examples/onnx) | 38 | | [TensorFlow](https://www.tensorflow.org/) | [onnx/tensorflow-onnx](https://github.com/onnx/tensorflow-onnx) | [Examples](https://github.com/onnx/tutorials/blob/master/tutorials/TensorflowToOnnx-1.ipynb) | 39 | 40 | 41 | ## Scoring ONNX Models 42 | Once you have an ONNX model, it can be scored with a variety of tools. 43 | 44 | | Framework / Tool | Installation | Tutorial | 45 | | --- | --- | --- | 46 | | [Caffe2](https://caffe2.ai) | [Caffe2](https://github.com/pytorch/pytorch/tree/master/caffe2/python/onnx) | [Example](tutorials/OnnxCaffe2Import.ipynb) | 47 | | [Cognitive Toolkit (CNTK)](https://learn.microsoft.com/en-us/cognitive-toolkit//) | [built-in](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine) | [Example](tutorials/OnnxCntkImport.ipynb)| 48 | | [CoreML (Apple)](https://developer.apple.com/documentation/coreml) | [onnx/onnx-coreml](https://github.com/onnx/onnx-coreml) | [Example](tutorials/OnnxCoremlImport.ipynb)| 49 | | [MATLAB](https://www.mathworks.com/) | [Deep Learning Toolbox Converter](https://www.mathworks.com/matlabcentral/fileexchange/67296) | [Documentation and Examples](https://www.mathworks.com/help/deeplearning/ref/importonnxnetwork.html) | 50 | | [Menoh](https://github.com/pfnet-research/menoh) | [Github Packages](https://github.com/pfnet-research/menoh/releases) or from [Nuget](https://www.nuget.org/packages/Menoh/) | [Example](tutorials/OnnxMenohHaskellImport.ipynb) | 51 | | [ML.NET](https://github.com/dotnet/machinelearning/) | [Microsoft.ML Nuget Package](https://www.nuget.org/packages/Microsoft.ML/) | [Example](https://github.com/dotnet/machinelearning/blob/master/test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs) | 52 | | [MXNet (Apache)](https://mxnet.incubator.apache.org/) - [Github](https://github.com/apache/incubator-mxnet/tree/master/python/mxnet/contrib/onnx) | [MXNet](https://mxnet.incubator.apache.org/versions/master/install/index.html?platform=Linux&language=Python&processor=CPU) | [API](https://mxnet.incubator.apache.org/api/python/contrib/onnx.html)
[Example](tutorials/OnnxMxnetImport.ipynb) | 53 | [ONNX Runtime](https://github.com/microsoft/onnxruntime) | See [onnxruntime.ai](https://onnxruntime.ai)| [Documentation](https://onnxruntime.ai/docs/) | 54 | | [SINGA (Apache)](https://singa.apache.org/) - [Github](https://github.com/apache/incubator-singa/blob/master/python/singa/sonnx.py) [experimental]| [built-in](https://singa.apache.org/docs/installation/) | [Example](https://github.com/apache/incubator-singa/tree/master/examples/onnx) | 55 | | [Tensorflow](https://www.tensorflow.org/) | [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) | [Example](tutorials/OnnxTensorflowImport.ipynb)| 56 | | [TensorRT](https://developer.nvidia.com/tensorrt) | [onnx-tensorrt](https://github.com/onnx/onnx-tensorrt) | [Example](https://github.com/onnx/onnx-tensorrt/blob/master/README.md) | 57 | | [Windows ML](https://docs.microsoft.com/en-us/windows/ai/windows-ml) | Pre-installed on [Windows 10](https://docs.microsoft.com/en-us/windows/ai/release-notes) | [API](https://docs.microsoft.com/en-us/windows/ai/api-reference)
Tutorials - [C++ Desktop App](https://docs.microsoft.com/en-us/windows/ai/get-started-desktop), [C# UWP App](https://docs.microsoft.com/en-us/windows/ai/get-started-uwp)
[Examples](https://docs.microsoft.com/en-us/windows/ai/tools-and-samples) | 58 | | [Vespa.ai](https://vespa.ai) | [Vespa Getting Started Guide](https://docs.vespa.ai/en/getting-started.html) | [Real Time ONNX Inference](https://github.com/vespa-engine/sample-apps/tree/master/model-inference)
Distributed Real Time ONNX Inference for [Search and Passage Ranking](https://github.com/vespa-engine/sample-apps/blob/master/msmarco-ranking/README.md)| 59 | 60 | 61 | ## End-to-End Tutorials 62 | Tutorials demonstrating how to use ONNX in practice for varied scenarios across frameworks, platforms, and device types 63 | 64 | ### General 65 | * [AI-Serving](https://github.com/autodeployai/ai-serving/blob/master/examples/AIServingMnistOnnxModel.ipynb) 66 | * [AWS Lambda](https://github.com/michaelulin/pytorch-caffe2-aws-lambda) 67 | * [Cortex](https://towardsdatascience.com/how-to-deploy-onnx-models-in-production-60bd6abfd3ae) 68 | * MXNet 69 | * [MXNet Model Server](tutorials/ONNXMXNetServer.ipynb) 70 | * [AWS SageMaker and MXNet](https://github.com/aws/amazon-sagemaker-examples/blob/v0.1.0/sagemaker-python-sdk/mxnet_onnx_eia/mxnet_onnx_eia.ipynb) 71 | * [MXNet to ONNX to ML.NET with SageMaker, ECS and ECR](https://cosminsanda.com/posts/mxnet-to-onnx-to-ml.net-with-sagemaker-ecs-and-ecr/) - external link 72 | * ONNX Runtime 73 | * [ONNX Runtime Tutorials](https://onnxruntime.ai/docs/tutorials/) 74 | * [Azure ML and ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment/onnx) 75 | 76 | 77 | ### Mobile 78 | * [Converting SuperResolution model from PyTorch to Caffe2 with ONNX and deploying on mobile device](tutorials/PytorchCaffe2SuperResolution.ipynb) 79 | * [Transferring SqueezeNet from PyTorch to Caffe2 with ONNX and to Android app](tutorials/PytorchCaffe2MobileSqueezeNet.ipynb) 80 | * [Converting Style Transfer model from PyTorch to CoreML with ONNX and deploying to an iPhone](https://github.com/onnx/tutorials/tree/master/examples/CoreML/ONNXLive) 81 | * [Deploy ONNX Runtime on Mobile/Edge devices](https://onnxruntime.ai/docs/tutorials/mobile/) 82 | 83 | 84 | 85 | ### ONNX Quantization 86 | * [HuggingFace Bert Quantization with ONNX Runtime](https://github.com/microsoft/onnxruntime-inference-examples/blob/main/quantization/notebooks/bert/Bert-GLUE_OnnxRuntime_quantization.ipynb) 87 | 88 | 89 | ### ONNX as an intermediary format 90 | * [Convert a PyTorch model to Tensorflow using ONNX](tutorials/PytorchTensorflowMnist.ipynb) 91 | 92 | ### ONNX Custom Operators 93 | * [How to export Pytorch model with custom op to ONNX and run it in ONNX Runtime](PyTorchCustomOperator/README.md) 94 | 95 | ## Visualizing ONNX Models 96 | 97 | * [Netdrawer: Visualizing ONNX models](tutorials/VisualizingAModel.md) 98 | * [Netron: Viewer for ONNX models](https://github.com/lutzroeder/Netron) 99 | * [Zetane: 3D visualizer for ONNX models and internal tensors](https://github.com/zetane/viewer) 100 | 101 | ## Other ONNX tools 102 | 103 | * [Verifying correctness and comparing performance](tutorials/CorrectnessVerificationAndPerformanceComparison.ipynb) 104 | * [Example of operating on ONNX protobuf](https://github.com/onnx/onnx/blob/main/onnx/examples/Protobufs.ipynb) 105 | * [Float16 <-> Float32 converter](https://github.com/onnx/onnx-docker/blob/master/onnx-ecosystem/converter_scripts/float32_float16_onnx.ipynb) 106 | * [Version conversion](tutorials/VersionConversion.md) 107 | 108 | ## Application of ONNX 109 | * [Explainable AI for ONNX models](tutorials/XAI4ONNX_dianna_overview.ipynb) 110 | 111 | ## Contributing 112 | 113 | We welcome improvements to the convertor tools and contributions of new ONNX bindings. Check out [contributor guide](https://github.com/onnx/onnx/blob/main/CONTRIBUTING.md) to get started. 114 | 115 | Use ONNX for something cool? Send the tutorial to this repo by submitting a PR. 116 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Models/candy.mlmodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Models/candy.mlmodel -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Models/mosaic.mlmodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Models/mosaic.mlmodel -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Models/rain_princess.mlmodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Models/rain_princess.mlmodel -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Models/udnie.mlmodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Models/udnie.mlmodel -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/ONNXLive.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/ONNXLive.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | IDEDidComputeMac32BitWarning 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/ONNXLive.xcodeproj/xcshareddata/xcschemes/ONNXLive.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 8 | 9 | 15 | 21 | 22 | 23 | 24 | 25 | 30 | 31 | 32 | 33 | 39 | 40 | 41 | 42 | 43 | 44 | 55 | 57 | 63 | 64 | 65 | 66 | 67 | 68 | 74 | 76 | 82 | 83 | 84 | 85 | 87 | 88 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # ONNXLive Tutorial: 4 | This tutorial will show you to convert a neural style transfer model that has been exported from PyTorch and into the Apple CoreML format using ONNX. This will allow you to easily run deep learning models on Apple devices and, in this case, live stream from the camera. 5 | 6 | ## What is ONNX? 7 | ONNX (Open Neural Network Exchange) is an open format to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. ONNX is developed and supported by a community of partners. You can learn more about ONNX and what tools are supported by going to [onnx.ai](https://onnx.ai/). 8 | 9 | ## Tutorial Overview 10 | 11 | This tutorial will walk you through 4 main steps: 12 | 13 | 1. [Download (or train) PyTorch style transfer models](#download-or-train-pytorch-style-transfer-models) 14 | 2. [Convert the PyTorch models to ONNX models](#convert-the-pytorch-models-to-onnx-models) 15 | 3. [Convert the ONNX models to CoreML models](#convert-the-onnx-models-to-coreml-models) 16 | 4. [Run the CoreML models in a style transfer iOS App](#run-the-coreml-models-in-a-style-transfer-ios-app) 17 | 18 | ## Preparing the Environment 19 | 20 | We will be working in a virtualenv in order to avoid conflicts with your local packages. 21 | We are also using Python 3.6 for this tutorial, but other versions should work as well. 22 | 23 | python3.6 -m venv venv 24 | source ./venv/bin/activate 25 | 26 | You need to install pytorch and the onnx->coreml converter 27 | 28 | pip install torchvision onnx-coreml 29 | 30 | You will also need to install XCode if you want to run the iOS style transfer app on your iPhone. 31 | You can also convert models in Linux, however to run the iOS app itself, you will need a Mac. 32 | 33 | ## Download (or train) PyTorch style transfer models 34 | 35 | For this tutorial, we will use the style transfer models that are published with pytorch in https://github.com/pytorch/examples/tree/master/fast_neural_style . 36 | If you would like to use a different PyTorch or ONNX model, feel free to skip this step. 37 | 38 | These models are meant for applying style transfer on still images and really not optimized to be fast enough for video. However if we reduce the resolution low enough, they can also work well on videos. 39 | 40 | Let's download the models. 41 | 42 | git clone https://github.com/pytorch/examples 43 | cd examples/fast_neural_style 44 | 45 | If you would like to train the models yourself, the pytorch/examples repository you just cloned has more information on how to do this. 46 | For now, we'll just download pre-trained models with the script provided by the repository: 47 | 48 | ./download_saved_models.py 49 | 50 | This script downloads the pre-trained PyTorch models and puts them into the `saved_models` folder. 51 | There should now be 4 files, `candy.pth`, `mosaic.pth`, `rain_princess.pth` and `udnie.pth` in your directory. 52 | 53 | ## Convert the PyTorch models to ONNX models 54 | 55 | Now that we have the pre-trained PyTorch models as `.pth` files in the `saved_models` folder, we will need to convert them to ONNX format. 56 | The model definition is in the pytorch/examples repository we cloned previously, and with a few lines of python we can export it to ONNX. 57 | In this case, instead of actually running the neural net, we will call `torch.onnx._export`, which is provided with PyTorch as an api to directly export ONNX formatted models from PyTorch. 58 | However, in this case we don't even need to do that, because a script already exists `neural_style/neural_style.py` that will do this for us. 59 | You can also take a look at that script if you would like to apply it to other models. 60 | 61 | Exporting the ONNX format from PyTorch is essentially tracing your neural network so this api call will internally run the network on 'dummy data' in order to generate the graph. 62 | For this, it needs an input image to apply the style transfer to which can simply be a blank image. 63 | However, the pixel size of this image is important, as this will be the size for the exported style transfer model. 64 | To get good performance, we'll use a resolution of 250x540. Feel free to take a larger resolution if you care less about 65 | FPS and more about style transfer quality. 66 | 67 | Let's create a blank image of the resolution we want 68 | 69 | convert -size 250x540 xc:white png24:dummy.jpg 70 | 71 | and use that to export the PyTorch models 72 | 73 | python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/candy.pth --cuda 0 --export_onnx ./saved_models/candy.onnx 74 | python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/udnie.pth --cuda 0 --export_onnx ./saved_models/udnie.onnx 75 | python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/rain_princess.pth --cuda 0 --export_onnx ./saved_models/rain_princess.onnx 76 | python ./neural_style/neural_style.py eval --content-image dummy.jpg --output-image dummy-out.jpg --model ./saved_models/mosaic.pth --cuda 0 --export_onnx ./saved_models/mosaic.onnx 77 | 78 | You should end up with 4 files, `candy.onnx`, `mosaic.onnx`, `rain_princess.onnx` and `udnie.onnx`, 79 | created from the corresponding `.pth` files. 80 | 81 | ## Convert the ONNX models to CoreML models 82 | 83 | Now that we have ONNX models, we can convert them to CoreML models in order to run them on Apple devices. 84 | For this, we use the onnx-coreml converter we installed previously. 85 | The converter comes with a `convert-onnx-to-coreml` script, which the installation steps above added to our path. Unfortunately that won't work for us as we need to mark the input and output of the network as an image 86 | and, while this is supported by the converter, it is only supported when calling the converter from python. 87 | 88 | Looking at the style transfer model (for example opening the .onnx file in an application like [Netron](https://github.com/lutzroeder/Netron)), 89 | we see that the input is named '0' and the output is named '186'. These are just numeric ids assigned by PyTorch. 90 | We will need to mark these as images. 91 | 92 | So let's create a small python file and call it `onnx_to_coreml.py`. This can be created by using the touch command and edited with your favorite editor to add the following lines of code. 93 | 94 | import sys 95 | from onnx import onnx_pb 96 | from onnx_coreml import convert 97 | 98 | model_in = sys.argv[1] 99 | model_out = sys.argv[2] 100 | 101 | model_file = open(model_in, 'rb') 102 | model_proto = onnx_pb.ModelProto() 103 | model_proto.ParseFromString(model_file.read()) 104 | coreml_model = convert(model_proto, image_input_names=['0'], image_output_names=['186']) 105 | coreml_model.save(model_out) 106 | 107 | we now run it 108 | 109 | python onnx_to_coreml.py ./saved_models/candy.onnx ./saved_models/candy.mlmodel 110 | python onnx_to_coreml.py ./saved_models/udnie.onnx ./saved_models/udnie.mlmodel 111 | python onnx_to_coreml.py ./saved_models/rain_princess.onnx ./saved_models/rain_princess.mlmodel 112 | python onnx_to_coreml.py ./saved_models/mosaic.onnx ./saved_models/mosaic.mlmodel 113 | 114 | Now, there should be 4 CoreML models in your `saved_models` directory: `candy.mlmodel`, `mosaic.mlmodel`, `rain_princess.mlmodel` and `udnie.mlmodel`. 115 | 116 | ## Run the CoreML models in a style transfer iOS App 117 | 118 | This repository (i.e. the one you're currently reading the README.md of) contains an iOS app able to run CoreML style transfer models on a live camera stream from your phone camera. Let's clone the repository 119 | 120 | git clone https://github.com/onnx/tutorials 121 | 122 | and open the `tutorials/examples/CoreML/ONNXLive/ONNXLive.xcodeproj` project in XCode. 123 | We recommend using XCode 9.3 and an iPhone X. There might be issues running on older devices or XCode versions. 124 | 125 | In the `Models/` folder, the project contains some .mlmodel files. We're going to replace them with the models we just created. 126 | 127 | You then run the app on your iPhone and you are all set. Tapping on the screen switches through the models. 128 | 129 | ## Conclusion 130 | 131 | We hope this tutorial gave you an overview of what ONNX is about and how you can use it to convert neural networks 132 | between frameworks, in this case neural style transfer models moving from PyTorch to CoreML. 133 | 134 | Feel free to experiment with these steps and test them on your own models. 135 | Please let us know if you hit any issues or want to give feedback. We'd like to hear what you think. 136 | 137 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "size" : "20x20", 5 | "idiom" : "iphone", 6 | "filename" : "icon_20pt@2x.png", 7 | "scale" : "2x" 8 | }, 9 | { 10 | "size" : "20x20", 11 | "idiom" : "iphone", 12 | "filename" : "icon_20pt@3x.png", 13 | "scale" : "3x" 14 | }, 15 | { 16 | "size" : "29x29", 17 | "idiom" : "iphone", 18 | "filename" : "icon_29pt.png", 19 | "scale" : "1x" 20 | }, 21 | { 22 | "size" : "29x29", 23 | "idiom" : "iphone", 24 | "filename" : "icon_29pt@2x.png", 25 | "scale" : "2x" 26 | }, 27 | { 28 | "size" : "29x29", 29 | "idiom" : "iphone", 30 | "filename" : "icon_29pt@3x.png", 31 | "scale" : "3x" 32 | }, 33 | { 34 | "size" : "40x40", 35 | "idiom" : "iphone", 36 | "filename" : "icon_40pt@2x.png", 37 | "scale" : "2x" 38 | }, 39 | { 40 | "size" : "40x40", 41 | "idiom" : "iphone", 42 | "filename" : "icon_40pt@3x.png", 43 | "scale" : "3x" 44 | }, 45 | { 46 | "size" : "60x60", 47 | "idiom" : "iphone", 48 | "filename" : "icon_60pt@2x.png", 49 | "scale" : "2x" 50 | }, 51 | { 52 | "size" : "60x60", 53 | "idiom" : "iphone", 54 | "filename" : "icon_60pt@3x.png", 55 | "scale" : "3x" 56 | }, 57 | { 58 | "size" : "20x20", 59 | "idiom" : "ipad", 60 | "filename" : "icon_20pt.png", 61 | "scale" : "1x" 62 | }, 63 | { 64 | "size" : "20x20", 65 | "idiom" : "ipad", 66 | "filename" : "icon_40pt.png", 67 | "scale" : "2x" 68 | }, 69 | { 70 | "size" : "29x29", 71 | "idiom" : "ipad", 72 | "filename" : "icon_29pt-1.png", 73 | "scale" : "1x" 74 | }, 75 | { 76 | "size" : "29x29", 77 | "idiom" : "ipad", 78 | "filename" : "icon_29pt@2x-1.png", 79 | "scale" : "2x" 80 | }, 81 | { 82 | "size" : "40x40", 83 | "idiom" : "ipad", 84 | "filename" : "icon_40pt-1.png", 85 | "scale" : "1x" 86 | }, 87 | { 88 | "size" : "40x40", 89 | "idiom" : "ipad", 90 | "filename" : "icon_40pt@2x-1.png", 91 | "scale" : "2x" 92 | }, 93 | { 94 | "size" : "76x76", 95 | "idiom" : "ipad", 96 | "filename" : "icon_76pt.png", 97 | "scale" : "1x" 98 | }, 99 | { 100 | "size" : "76x76", 101 | "idiom" : "ipad", 102 | "filename" : "icon_76pt@2x.png", 103 | "scale" : "2x" 104 | }, 105 | { 106 | "size" : "83.5x83.5", 107 | "idiom" : "ipad", 108 | "filename" : "icon_83.5@2x.png", 109 | "scale" : "2x" 110 | }, 111 | { 112 | "size" : "1024x1024", 113 | "idiom" : "ios-marketing", 114 | "filename" : "Icon.png", 115 | "scale" : "1x" 116 | } 117 | ], 118 | "info" : { 119 | "version" : 1, 120 | "author" : "xcode" 121 | } 122 | } -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/Icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/Icon.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt@3x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_20pt@3x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt-1.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@2x-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@2x-1.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@3x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_29pt@3x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt-1.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@2x-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@2x-1.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@3x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_40pt@3x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_60pt@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_60pt@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_60pt@3x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_60pt@3x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_76pt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_76pt.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_76pt@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_76pt@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_83.5@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/examples/CoreML/ONNXLive/Resources/Assets.xcassets/AppIcon.appiconset/icon_83.5@2x.png -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Base.lproj/LaunchScreen.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Resources/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | $(DEVELOPMENT_LANGUAGE) 7 | CFBundleDisplayName 8 | ONNXLive 9 | CFBundleExecutable 10 | $(EXECUTABLE_NAME) 11 | CFBundleIdentifier 12 | $(PRODUCT_BUNDLE_IDENTIFIER) 13 | CFBundleInfoDictionaryVersion 14 | 6.0 15 | CFBundleName 16 | $(PRODUCT_NAME) 17 | CFBundlePackageType 18 | APPL 19 | CFBundleShortVersionString 20 | 1.0 21 | CFBundleVersion 22 | 1 23 | LSRequiresIPhoneOS 24 | 25 | NSCameraUsageDescription 26 | To see ML model results. 27 | UILaunchStoryboardName 28 | LaunchScreen 29 | UIRequiredDeviceCapabilities 30 | 31 | armv7 32 | 33 | UIRequiresFullScreen 34 | 35 | UISupportedInterfaceOrientations 36 | 37 | UIInterfaceOrientationPortrait 38 | UIInterfaceOrientationLandscapeLeft 39 | UIInterfaceOrientationLandscapeRight 40 | 41 | UISupportedInterfaceOrientations~ipad 42 | 43 | UIInterfaceOrientationPortrait 44 | UIInterfaceOrientationPortraitUpsideDown 45 | UIInterfaceOrientationLandscapeLeft 46 | UIInterfaceOrientationLandscapeRight 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Source/AppDelegate.swift: -------------------------------------------------------------------------------- 1 | /** 2 | * SPDX-License-Identifier: Apache-2.0 3 | * 4 | * Copyright (c) Facebook, Inc. and Microsoft Corporation. 5 | */ 6 | 7 | import UIKit 8 | 9 | @UIApplicationMain 10 | class AppDelegate: UIResponder, UIApplicationDelegate { 11 | 12 | var window: UIWindow? 13 | 14 | func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplicationLaunchOptionsKey: Any]?) -> Bool { 15 | let window = UIWindow() 16 | window.rootViewController = CameraViewController() 17 | window.makeKeyAndVisible() 18 | 19 | self.window = window 20 | 21 | return true 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Source/CameraViewController.swift: -------------------------------------------------------------------------------- 1 | /** 2 | * SPDX-License-Identifier: Apache-2.0 3 | * 4 | * Copyright (c) Facebook, Inc. and Microsoft Corporation. 5 | */ 6 | 7 | import UIKit 8 | import MobileCoreServices 9 | import Vision 10 | import CoreML 11 | import AVKit 12 | 13 | class CameraViewController: UIViewController { 14 | 15 | var imageView: UIImageView! 16 | 17 | var captureSession: AVCaptureSession? 18 | let videoOutputQueue = DispatchQueue(label: "com.facebook.onnx.videoOutputQueue", 19 | qos: .userInitiated) 20 | 21 | var model = Model.Candy 22 | var modelExecutor: ModelExecutor? 23 | 24 | ///-------------------------------------- 25 | // MARK: - View 26 | ///-------------------------------------- 27 | 28 | override func loadView() { 29 | imageView = UIImageView() 30 | imageView.addGestureRecognizer(UITapGestureRecognizer(target: self, 31 | action: #selector(switchToNextModel))) 32 | imageView.isUserInteractionEnabled = true 33 | imageView.contentMode = .scaleAspectFill 34 | self.view = imageView 35 | } 36 | 37 | override func viewDidLoad() { 38 | super.viewDidLoad() 39 | 40 | setupExecutor(for: model) 41 | prepareCaptureSession() 42 | } 43 | 44 | ///-------------------------------------- 45 | // MARK: - Actions 46 | ///-------------------------------------- 47 | 48 | @objc func switchToNextModel() { 49 | // Capture model into local stack variable to make everything synchronized. 50 | let model = self.model.nextModel 51 | self.model = model 52 | 53 | // Stop the session and start it after we switch the model 54 | // All in all, this makes sure we switch fast and are not blocked by running the model. 55 | captureSession?.stopRunning() 56 | videoOutputQueue.async { 57 | self.modelExecutor = nil 58 | self.setupExecutor(for: model) 59 | 60 | DispatchQueue.main.async { 61 | self.captureSession?.startRunning() 62 | } 63 | } 64 | } 65 | 66 | ///-------------------------------------- 67 | // MARK: - Setup 68 | ///-------------------------------------- 69 | 70 | fileprivate func setupExecutor(for model: Model) { 71 | // Make sure we destroy existing executor before creating a new one. 72 | modelExecutor = nil 73 | 74 | // Create new one and store it in a var 75 | modelExecutor = try? ModelExecutor(for: model, 76 | executionHandler: (DispatchQueue.main, didGetPredictionResult)) 77 | } 78 | 79 | fileprivate func prepareCaptureSession() { 80 | guard self.captureSession == nil else { return } 81 | 82 | let captureSession = AVCaptureSession() 83 | captureSession.sessionPreset = .hd1280x720 84 | 85 | let backCamera = AVCaptureDevice.default(for: .video)! 86 | let input = try! AVCaptureDeviceInput(device: backCamera) 87 | 88 | captureSession.addInput(input) 89 | 90 | let videoOutput = AVCaptureVideoDataOutput() 91 | videoOutput.setSampleBufferDelegate(self, queue: videoOutputQueue) 92 | captureSession.addOutput(videoOutput) 93 | 94 | if let videoOutputConnection = videoOutput.connection(with: .video) { 95 | videoOutputConnection.videoOrientation = .portrait 96 | } 97 | 98 | captureSession.startRunning() 99 | 100 | self.captureSession = captureSession; 101 | } 102 | 103 | ///-------------------------------------- 104 | // MARK: - Prediction 105 | ///-------------------------------------- 106 | 107 | fileprivate func predict(_ pixelBuffer: CVPixelBuffer) { 108 | guard let modelExecutor = modelExecutor else { 109 | DispatchQueue.main.async { 110 | self.didGetPredictionResult(pixelBuffer: pixelBuffer, error: nil) 111 | } 112 | return 113 | } 114 | modelExecutor.execute(with: pixelBuffer) 115 | } 116 | 117 | fileprivate func didGetPredictionResult(pixelBuffer: CVPixelBuffer?, error: Error?) { 118 | guard let pixelBuffer = pixelBuffer else { 119 | print("Failed to get prediction result with error \(String(describing:error))") 120 | return 121 | } 122 | 123 | imageView.image = UIImage(ciImage: CIImage(cvPixelBuffer: pixelBuffer)) 124 | } 125 | } 126 | 127 | ///-------------------------------------- 128 | // MARK: - AVCaptureVideoDataOutputSampleBufferDelegate 129 | ///-------------------------------------- 130 | 131 | extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate { 132 | func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { 133 | guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return } 134 | 135 | predict(pixelBuffer) 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Source/Model.swift: -------------------------------------------------------------------------------- 1 | /** 2 | * SPDX-License-Identifier: Apache-2.0 3 | * 4 | * Copyright (c) Facebook, Inc. and Microsoft Corporation. 5 | */ 6 | 7 | import Foundation 8 | import CoreML 9 | 10 | enum Model { 11 | case Candy 12 | case Mosaic 13 | case RainPrincess 14 | case Udnie 15 | 16 | var MLModel: MLModel { 17 | switch self { 18 | case .Candy: 19 | return candy().model 20 | case .Mosaic: 21 | return mosaic().model 22 | case .RainPrincess: 23 | return rain_princess().model 24 | case .Udnie: 25 | return udnie().model 26 | } 27 | } 28 | 29 | var nextModel: Model { 30 | switch self { 31 | case .Candy: 32 | return .Mosaic 33 | case .Mosaic: 34 | return .RainPrincess 35 | case .RainPrincess: 36 | return .Udnie 37 | case .Udnie: 38 | return .Candy 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/CoreML/ONNXLive/Source/ModelExecutor.swift: -------------------------------------------------------------------------------- 1 | /** 2 | * SPDX-License-Identifier: Apache-2.0 3 | * 4 | * Copyright (c) Facebook, Inc. and Microsoft Corporation. 5 | */ 6 | 7 | import Foundation 8 | import CoreML 9 | import Vision 10 | 11 | final class ModelExecutor { 12 | 13 | typealias ExecutionHandler = (DispatchQueue, (CVPixelBuffer?, Error?) -> ()) 14 | 15 | fileprivate let queue = DispatchQueue(label: "com.facebook.onnx.modelExecutor", 16 | qos: .userInitiated) 17 | fileprivate let vnModel: VNCoreMLModel 18 | fileprivate let vnRequest: VNCoreMLRequest 19 | 20 | init(for model: Model, 21 | executionHandler: ExecutionHandler) throws { 22 | self.vnModel = try VNCoreMLModel(for: model.MLModel) 23 | self.vnRequest = VNCoreMLRequest(model: vnModel, completionHandler: executionHandler) 24 | self.vnRequest.imageCropAndScaleOption = .centerCrop 25 | } 26 | 27 | func execute(with pixelBuffer: CVPixelBuffer) { 28 | queue.sync { 29 | let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]) 30 | try? handler.perform([ self.vnRequest ]) 31 | } 32 | } 33 | 34 | func executeAsync(with pixelBuffer: CVPixelBuffer) { 35 | queue.async { 36 | let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]) 37 | try? handler.perform([ self.vnRequest ]) 38 | } 39 | } 40 | } 41 | 42 | fileprivate extension VNCoreMLRequest { 43 | convenience init(model: VNCoreMLModel, completionHandler: ModelExecutor.ExecutionHandler) { 44 | self.init(model: model) { (request, error) in 45 | if let error = error { 46 | completionHandler.0.async { 47 | completionHandler.1(nil, error) 48 | } 49 | return 50 | } 51 | 52 | guard 53 | let results = request.results as? [VNPixelBufferObservation], 54 | let result = results.first 55 | else { 56 | // TODO: Error handling here 57 | return 58 | } 59 | 60 | completionHandler.0.async { 61 | completionHandler.1(result.pixelBuffer, nil) 62 | } 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 130 -------------------------------------------------------------------------------- /tutorials/Caffe2OnnxExport.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Exporting models from Caffe2 to ONNX\n", 8 | "\n", 9 | "In this tutorial we are going to show you how to export a Caffe2 model to ONNX. You can either\n", 10 | "\n", 11 | "- Convert a Caffe2 model to an ONNX model in Python\n", 12 | "\n", 13 | "or \n", 14 | "\n", 15 | "- Convert a Caffe2 model file to an ONNX model file in the shell\n", 16 | "\n", 17 | "We are going to use the squeezenet model in Caffe2 model zoo, its model files can be downloaded by running:\n", 18 | "\n", 19 | "```shell\n", 20 | "$ python -m caffe2.python.models.download squeezenet\n", 21 | "```" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "### Installation\n", 29 | "\n", 30 | "`onnx-caffe2` is now integrated as part of `caffe2` under `caffe2/python/onnx`." 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "### Note\n", 38 | "\n", 39 | "In ONNX, the type and shape of the inputs and outpus are required to be presented in the model, while in Caffe2, they are not stored in the model files. So when doing the convertion, we need to provide these extra information to onnx-caffe2 (through a dictionary in Python/a json string in the shell interface)." 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "### Exporting in Python" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 0, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "import onnx\n", 56 | "import caffe2.python.onnx.frontend\n", 57 | "from caffe2.proto import caffe2_pb2\n", 58 | "\n", 59 | "# We need to provide type and shape of the model inputs, \n", 60 | "# see above Note section for explanation\n", 61 | "data_type = onnx.TensorProto.FLOAT\n", 62 | "data_shape = (1, 3, 224, 224)\n", 63 | "value_info = {\n", 64 | " 'data': (data_type, data_shape)\n", 65 | "}\n", 66 | "\n", 67 | "predict_net = caffe2_pb2.NetDef()\n", 68 | "with open('predict_net.pb', 'rb') as f:\n", 69 | " predict_net.ParseFromString(f.read())\n", 70 | "\n", 71 | "init_net = caffe2_pb2.NetDef()\n", 72 | "with open('init_net.pb', 'rb') as f:\n", 73 | " init_net.ParseFromString(f.read())\n", 74 | "\n", 75 | "onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(\n", 76 | " predict_net,\n", 77 | " init_net,\n", 78 | " value_info,\n", 79 | ")\n", 80 | "\n", 81 | "onnx.checker.check_model(onnx_model)" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "### Exporting in shell\n", 89 | "\n", 90 | "`onnx-caffe2` has bundled a shell command `convert-caffe2-to-onnx` for exporting Caffe2 model file to ONNX model file.\n", 91 | "\n", 92 | "```shell\n", 93 | "\n", 94 | "$ convert-caffe2-to-onnx predict_net.pb --caffe2-init-net init_net.pb --value-info '{\"data\": [1, [1, 3, 224, 224]]}' -o sqeezenet.onnx\n", 95 | "\n", 96 | "```\n", 97 | "\n", 98 | "Regarding to the `--value-info` flag, see above Note section for explanation." 99 | ] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "Python 2", 105 | "language": "python", 106 | "name": "python2" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 2 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython2", 118 | "version": "2.7.12" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 2 123 | } 124 | -------------------------------------------------------------------------------- /tutorials/ChainerOnnxExport.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Exporting model from Chainer to ONNX\n", 8 | "\n", 9 | "In this tutorial, we describe how to use ONNX-Chainer to convert a model defined in Chainer into the ONNX format.\n", 10 | "\n", 11 | "ONNX export is provided as a separate package [onnx-chainer](https://github.com/chainer/onnx-chainer). You can install it via pip like this:\n", 12 | "\n", 13 | "```\n", 14 | "pip install onnx-chainer\n", 15 | "```" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "`onnx_chainer` provides `export` function that takes a Chainer model and its expected arguments given to `__call__` method of the model. It executes a forward pass once with the given model object and arguments to construct a computational graph. Because Chainer is the first deep learning framework that proposed Define-by-Run approach, the computational graph for backward computation is constructed on-the-fly. `onnx-chainer` is a trace-based exporter, so it needs to run a model once before converting the structure into ONNX.\n", 23 | "\n", 24 | "`onnx_chainer.export()` function have some other options for example `filename` which is to save the converted model to a disk." 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "## Limitations\n", 32 | "\n", 33 | "The [onnx-chainer](https://github.com/chainer/onnx-chainer) currently does not support exporting dynamic models which change their behavior depending on input data, because ONNX format currently cannot represent such dynamic behavior.\n", 34 | "\n", 35 | "Additionally, some ONNX operators, for example, `Reshape`, keeps the explicit batch size in the shape of dummy input data, so that the exported ONNX model will be runnable only with the same batch size. You may want to run the model with different batch size at inference time, but it might fail due to this limitation. To modify the batch size in the `Reshape` operator of ONNX, you need to modify the value by hand after exporting." 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 1, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "name": "stdout", 45 | "output_type": "stream", 46 | "text": [ 47 | "Help on function export in module onnx_chainer.export:\n", 48 | "\n", 49 | "export(model, args, filename=None, export_params=True, graph_name='Graph', save_text=False)\n", 50 | " Export function for chainer.Chain in ONNX format.\n", 51 | " \n", 52 | " This function performs a forward computation of the given\n", 53 | " :class:`~chainer.Chain`, ``model``, by passing the given argments ``args``\n", 54 | " directly. It means, the output :class:`~chainer.Variable` object ``y`` to\n", 55 | " make the computational graph will be created by:\n", 56 | " \n", 57 | " y = model(*args)\n", 58 | " \n", 59 | " Args:\n", 60 | " model (~chainer.Chain): The model object you want to export in ONNX\n", 61 | " format. It should have :meth:`__call__` method because the second\n", 62 | " argment ``args`` is directly given to the model by the ``[]``\n", 63 | " accessor.\n", 64 | " args (list or dict): The argments which are given to the model\n", 65 | " directly.\n", 66 | " filename (str or file-like object): The filename used for saving the\n", 67 | " resulting ONNX model. If None, nothing is saved to the disk.\n", 68 | " export_params (bool): If True, this function exports all the parameters\n", 69 | " included in the given model at the same time. If False, the\n", 70 | " exported ONNX model doesn't include any parameter values.\n", 71 | " graph_name (str): A string to be used for the ``name`` field of the\n", 72 | " graph in the exported ONNX model.\n", 73 | " save_text (bool): If True, the text format of the output ONNX model is\n", 74 | " also saved with ``.txt`` extention.\n", 75 | " \n", 76 | " Returns:\n", 77 | " A ONNX model object.\n", 78 | "\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "import onnx_chainer\n", 84 | "help(onnx_chainer.export)" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "## Export VGG16 into ONNX\n", 92 | "\n", 93 | "Chainer provides VGG16 implementation with pre-trained weights, so let's see how the network that is a `chainer.Chain` object can be exported into ONNX." 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 2, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "import numpy as np\n", 103 | "import chainer\n", 104 | "import chainer.links as L\n", 105 | "import onnx_chainer\n", 106 | "\n", 107 | "model = L.VGG16Layers()\n", 108 | "\n", 109 | "# Pseudo input\n", 110 | "x = np.zeros((1, 3, 224, 224), dtype=np.float32)\n", 111 | "\n", 112 | "# Don't forget to set train flag off!\n", 113 | "chainer.config.train = False\n", 114 | "\n", 115 | "onnx_chainer.export(model, x, filename='output/VGG16.onnx')" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "**That's all.**\n", 123 | "\n", 124 | "Now you can find the exported ONNX binary named `VGG16.onnx` under the `output` dir." 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "## Inspecting the exported model\n", 132 | "\n", 133 | "You can use ONNX tooling to check the validity of the exported ONNX model and inspect the details." 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 4, 139 | "metadata": {}, 140 | "outputs": [ 141 | { 142 | "name": "stdout", 143 | "output_type": "stream", 144 | "text": [ 145 | "graph Graph (\n", 146 | " %4704767504[FLOAT, 1x3x224x224]\n", 147 | ") initializers (\n", 148 | " %/conv5_1/b[FLOAT, 512]\n", 149 | " %/conv5_1/W[FLOAT, 512x512x3x3]\n", 150 | " %/conv5_3/b[FLOAT, 512]\n", 151 | " %/conv5_3/W[FLOAT, 512x512x3x3]\n", 152 | " %/conv3_2/b[FLOAT, 256]\n", 153 | " %/conv3_2/W[FLOAT, 256x256x3x3]\n", 154 | " %/fc6/b[FLOAT, 4096]\n", 155 | " %/fc6/W[FLOAT, 4096x25088]\n", 156 | " %/fc8/b[FLOAT, 1000]\n", 157 | " %/fc8/W[FLOAT, 1000x4096]\n", 158 | " %/conv1_2/b[FLOAT, 64]\n", 159 | " %/conv1_2/W[FLOAT, 64x64x3x3]\n", 160 | " %/conv2_1/b[FLOAT, 128]\n", 161 | " %/conv2_1/W[FLOAT, 128x64x3x3]\n", 162 | " %/conv4_1/b[FLOAT, 512]\n", 163 | " %/conv4_1/W[FLOAT, 512x256x3x3]\n", 164 | " %/conv2_2/b[FLOAT, 128]\n", 165 | " %/conv2_2/W[FLOAT, 128x128x3x3]\n", 166 | " %/conv3_3/b[FLOAT, 256]\n", 167 | " %/conv3_3/W[FLOAT, 256x256x3x3]\n", 168 | " %/conv1_1/b[FLOAT, 64]\n", 169 | " %/conv1_1/W[FLOAT, 64x3x3x3]\n", 170 | " %/conv3_1/b[FLOAT, 256]\n", 171 | " %/conv3_1/W[FLOAT, 256x128x3x3]\n", 172 | " %/conv4_3/b[FLOAT, 512]\n", 173 | " %/conv4_3/W[FLOAT, 512x512x3x3]\n", 174 | " %/conv5_2/b[FLOAT, 512]\n", 175 | " %/conv5_2/W[FLOAT, 512x512x3x3]\n", 176 | " %/fc7/b[FLOAT, 4096]\n", 177 | " %/fc7/W[FLOAT, 4096x4096]\n", 178 | " %/conv4_2/b[FLOAT, 512]\n", 179 | " %/conv4_2/W[FLOAT, 512x512x3x3]\n", 180 | ") {\n", 181 | " %4704767392 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704767504, %/conv1_1/W, %/conv1_1/b)\n", 182 | " %4704767840 = Relu(%4704767392)\n", 183 | " %4704767728 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704767840, %/conv1_2/W, %/conv1_2/b)\n", 184 | " %4704767952 = Relu(%4704767728)\n", 185 | " %4533214120 = MaxPool[kernel_shape = [2, 2], pads = [0, 0], strides = [2, 2]](%4704767952)\n", 186 | " %4555989232 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4533214120, %/conv2_1/W, %/conv2_1/b)\n", 187 | " %4697163368 = Relu(%4555989232)\n", 188 | " %4704818960 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4697163368, %/conv2_2/W, %/conv2_2/b)\n", 189 | " %4704819184 = Relu(%4704818960)\n", 190 | " %4704819408 = MaxPool[kernel_shape = [2, 2], pads = [0, 0], strides = [2, 2]](%4704819184)\n", 191 | " %4704819520 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704819408, %/conv3_1/W, %/conv3_1/b)\n", 192 | " %4704819632 = Relu(%4704819520)\n", 193 | " %4704819744 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704819632, %/conv3_2/W, %/conv3_2/b)\n", 194 | " %4704819856 = Relu(%4704819744)\n", 195 | " %4704819968 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704819856, %/conv3_3/W, %/conv3_3/b)\n", 196 | " %4704820080 = Relu(%4704819968)\n", 197 | " %4704820192 = MaxPool[kernel_shape = [2, 2], pads = [0, 0], strides = [2, 2]](%4704820080)\n", 198 | " %4704820304 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704820192, %/conv4_1/W, %/conv4_1/b)\n", 199 | " %4704820416 = Relu(%4704820304)\n", 200 | " %4704820528 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704820416, %/conv4_2/W, %/conv4_2/b)\n", 201 | " %4704820640 = Relu(%4704820528)\n", 202 | " %4704820752 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704820640, %/conv4_3/W, %/conv4_3/b)\n", 203 | " %4704820864 = Relu(%4704820752)\n", 204 | " %4704820976 = MaxPool[kernel_shape = [2, 2], pads = [0, 0], strides = [2, 2]](%4704820864)\n", 205 | " %4704821088 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704820976, %/conv5_1/W, %/conv5_1/b)\n", 206 | " %4704821200 = Relu(%4704821088)\n", 207 | " %4704895216 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704821200, %/conv5_2/W, %/conv5_2/b)\n", 208 | " %4704895440 = Relu(%4704895216)\n", 209 | " %4704895664 = Conv[dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%4704895440, %/conv5_3/W, %/conv5_3/b)\n", 210 | " %4704895888 = Relu(%4704895664)\n", 211 | " %4704896168 = MaxPool[kernel_shape = [2, 2], pads = [0, 0], strides = [2, 2]](%4704895888)\n", 212 | " %4704896448 = Reshape[shape = [1, -1]](%4704896168)\n", 213 | " %4704896672 = FC[axis = 1, axis_w = 1](%4704896448, %/fc6/W, %/fc6/b)\n", 214 | " %4704896784 = Relu(%4704896672)\n", 215 | " %4704897008 = FC[axis = 1, axis_w = 1](%4704896784, %/fc7/W, %/fc7/b)\n", 216 | " %4704897232 = Relu(%4704897008)\n", 217 | " %4704897456 = FC[axis = 1, axis_w = 1](%4704897232, %/fc8/W, %/fc8/b)\n", 218 | " %4704897680 = Softmax[axis = 1](%4704897456)\n", 219 | " return %4704897680\n", 220 | "}\n" 221 | ] 222 | } 223 | ], 224 | "source": [ 225 | "import onnx\n", 226 | "\n", 227 | "# Load the ONNX model\n", 228 | "model = onnx.load(\"output/VGG16.onnx\")\n", 229 | "\n", 230 | "# Check that the IR is well formed\n", 231 | "onnx.checker.check_model(model)\n", 232 | "\n", 233 | "# Print a human readable representation of the graph\n", 234 | "print(onnx.helper.printable_graph(model.graph))" 235 | ] 236 | }, 237 | { 238 | "cell_type": "markdown", 239 | "metadata": {}, 240 | "source": [ 241 | "You can see all the parameters and layers in the VGG16 model here. The actual values those parameters have are stored in `model.graph.initializers`." 242 | ] 243 | } 244 | ], 245 | "metadata": { 246 | "kernelspec": { 247 | "display_name": "Python 3", 248 | "language": "python", 249 | "name": "python3" 250 | }, 251 | "language_info": { 252 | "codemirror_mode": { 253 | "name": "ipython", 254 | "version": 3 255 | }, 256 | "file_extension": ".py", 257 | "mimetype": "text/x-python", 258 | "name": "python", 259 | "nbconvert_exporter": "python", 260 | "pygments_lexer": "ipython3", 261 | "version": "3.6.4" 262 | } 263 | }, 264 | "nbformat": 4, 265 | "nbformat_minor": 2 266 | } 267 | -------------------------------------------------------------------------------- /tutorials/CntkOnnxExport.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Exporting models from CNTK to ONNX" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "In this tutorial, we will demonstrate how to export a CNTK model to the ONNX format." 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": {}, 20 | "source": [ 21 | "## Installation" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "To export to ONNX, simply make sure you have CNTK 2.3.1 or higher installed.
\n", 29 | "Follow CNTK installation instructions __[here](https://docs.microsoft.com/en-us/cognitive-toolkit/Setup-CNTK-on-your-machine)__." 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## API Usage" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "To save a CNTK model to the ONNX format, specify the ONNX format in the format parameter of the save function." 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "** Using Python API ** " 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "```python\n", 58 | "import cntk as C\n", 59 | "\n", 60 | "x = C.input_variable()\n", 61 | "z = create_model(x) #your create model function\n", 62 | "z.save(, format=C.ModelFormat.ONNX)\n", 63 | "```" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "** Exporting in C# **" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "```csharp\n", 78 | "var x = CNTKLib.InputVariable();\n", 79 | "Function z = CreateModel(x); //your create model function\n", 80 | "z.Save(, ModelFormat.ONNX);\n", 81 | "```\n" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "## Trying it out with ResNet-20" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "Let's go through an example of exporting a pretrained CNTK model to ONNX." 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "### Step 1: Prepare a CNTK model to export" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "For this tutorial, we will be using a pretrained ResNet-20 model (trained on the CIFAR-10 dataset) from the collection of pretrained CNTK models found [here](https://github.com/Microsoft/CNTK/blob/master/PretrainedModels/Image.md). Download the model to your working directory. (Note that not all of the models found here are exportable to the ONNX format yet.) \n", 110 | "Download link: https://www.cntk.ai/Models/CNTK_Pretrained/ResNet20_CIFAR10_CNTK.model" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "### Step 2: Load the model into CNTK" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 10, 123 | "metadata": { 124 | "collapsed": true 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "import cntk as C" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 11, 134 | "metadata": { 135 | "collapsed": false 136 | }, 137 | "outputs": [], 138 | "source": [ 139 | "model_path = \"ResNet20_CIFAR10_CNTK.model\"\n", 140 | "z = C.Function.load(model_path, device=C.device.cpu())" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "### Step 3: Export the model to ONNX" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "Next, export the CNTK model by saving it out to the ONNX format." 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 12, 160 | "metadata": { 161 | "collapsed": true 162 | }, 163 | "outputs": [], 164 | "source": [ 165 | "z.save(\"model.onnx\", format=C.ModelFormat.ONNX)" 166 | ] 167 | } 168 | ], 169 | "metadata": { 170 | "anaconda-cloud": {}, 171 | "kernelspec": { 172 | "display_name": "Python [Root]", 173 | "language": "python", 174 | "name": "Python [Root]" 175 | }, 176 | "language_info": { 177 | "codemirror_mode": { 178 | "name": "ipython", 179 | "version": 3 180 | }, 181 | "file_extension": ".py", 182 | "mimetype": "text/x-python", 183 | "name": "python", 184 | "nbconvert_exporter": "python", 185 | "pygments_lexer": "ipython3", 186 | "version": "3.5.2" 187 | } 188 | }, 189 | "nbformat": 4, 190 | "nbformat_minor": 0 191 | } 192 | -------------------------------------------------------------------------------- /tutorials/CorrectnessVerificationAndPerformanceComparison.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Verify the Correctness of Exported Model and Compare the Performance\n", 8 | "\n", 9 | "In this tutorial, we are going to show:\n", 10 | "- how to verify the correctness of the exported model\n", 11 | "- how to compare the performance with the original model\n", 12 | "\n", 13 | "We choose PyTorch to export the ONNX model, and use Caffe2 as the backend.\n", 14 | "After that, the outputs and performance of two models are compared.\n", 15 | "\n", 16 | "To run this tutorial, please make sure that `caffe2`, `pytorch` and `onnx` are already installed.\n", 17 | "\n", 18 | "First, let's create a PyTorch model and prepare the inputs of the model." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "from __future__ import absolute_import\n", 28 | "from __future__ import division\n", 29 | "from __future__ import print_function\n", 30 | "from __future__ import unicode_literals\n", 31 | "\n", 32 | "import io\n", 33 | "import time\n", 34 | "import numpy as np\n", 35 | "import torch\n", 36 | "import onnx\n", 37 | "import torch.nn as nn\n", 38 | "import torch.nn.functional as F\n", 39 | "\n", 40 | "from caffe2.proto import caffe2_pb2\n", 41 | "from caffe2.python import core\n", 42 | "from torch.autograd import Variable\n", 43 | "from caffe2.python.onnx.backend import Caffe2Backend\n", 44 | "from caffe2.python.onnx.helper import c2_native_run_net, save_caffe2_net, load_caffe2_net,benchmark_pytorch_model\n", 45 | "\n", 46 | "\n", 47 | "class MNIST(nn.Module):\n", 48 | "\n", 49 | " def __init__(self):\n", 50 | " super(MNIST, self).__init__()\n", 51 | " self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n", 52 | " self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n", 53 | " self.conv2_drop = nn.Dropout2d()\n", 54 | " self.fc1 = nn.Linear(320, 50)\n", 55 | " self.fc2 = nn.Linear(50, 10)\n", 56 | "\n", 57 | " def forward(self, x):\n", 58 | " x = F.relu(F.max_pool2d(self.conv1(x), 2))\n", 59 | " x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n", 60 | " x = x.view(-1, 320)\n", 61 | " x = F.relu(self.fc1(x))\n", 62 | " x = F.dropout(x, training=self.training)\n", 63 | " x = self.fc2(x)\n", 64 | " return F.log_softmax(x)\n", 65 | "\n", 66 | "\n", 67 | "# Create a pytorch model.\n", 68 | "pytorch_model = MNIST()\n", 69 | "pytorch_model.train(False)\n", 70 | "\n", 71 | "# Make the inputs in tuple format.\n", 72 | "inputs = (Variable(torch.randn(3, 1, 28, 28), requires_grad=True), )" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "Run the PyTorch exporter to generate an ONNX model." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 2, 85 | "metadata": {}, 86 | "outputs": [ 87 | { 88 | "name": "stdout", 89 | "output_type": "stream", 90 | "text": [ 91 | "graph(%input.1 : Float(3, 1, 28, 28),\n", 92 | " %conv1.weight : Float(10, 1, 5, 5),\n", 93 | " %conv1.bias : Float(10),\n", 94 | " %conv2.weight : Float(20, 10, 5, 5),\n", 95 | " %conv2.bias : Float(20),\n", 96 | " %fc1.weight : Float(50, 320),\n", 97 | " %fc1.bias : Float(50),\n", 98 | " %fc2.weight : Float(10, 50),\n", 99 | " %fc2.bias : Float(10)):\n", 100 | " %9 : Float(3, 10, 24, 24) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[0, 0, 0, 0], strides=[1, 1]](%input.1, %conv1.weight, %conv1.bias), scope: MNIST/Conv2d[conv1] # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/modules/conv.py:340:0\n", 101 | " %10 : Float(3, 10, 12, 12) = onnx::MaxPool[kernel_shape=[2, 2], pads=[0, 0, 0, 0], strides=[2, 2]](%9), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:487:0\n", 102 | " %11 : Float(3, 10, 12, 12) = onnx::Relu(%10), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:913:0\n", 103 | " %12 : Float(3, 20, 8, 8) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[0, 0, 0, 0], strides=[1, 1]](%11, %conv2.weight, %conv2.bias), scope: MNIST/Dropout2d[conv2_drop] # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:844:0\n", 104 | " %13 : Float(3, 20, 4, 4) = onnx::MaxPool[kernel_shape=[2, 2], pads=[0, 0, 0, 0], strides=[2, 2]](%12), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:487:0\n", 105 | " %14 : Float(3, 20, 4, 4) = onnx::Relu(%13), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:913:0\n", 106 | " %15 : Tensor = onnx::Constant[value= -1 320 [ Variable[CPULongType]{2} ]](), scope: MNIST\n", 107 | " %16 : Float(3, 320) = onnx::Reshape(%14, %15), scope: MNIST # :34:0\n", 108 | " %17 : Float(3, 50) = onnx::Gemm[alpha=1, beta=1, transB=1](%16, %fc1.weight, %fc1.bias), scope: MNIST/Linear[fc1] # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:1369:0\n", 109 | " %18 : Float(3, 50) = onnx::Relu(%17), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:806:0\n", 110 | " %19 : Float(3, 10) = onnx::Gemm[alpha=1, beta=1, transB=1](%18, %fc2.weight, %fc2.bias), scope: MNIST/Linear[fc2] # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:1369:0\n", 111 | " %20 : Float(3, 10) = onnx::LogSoftmax[axis=1](%19), scope: MNIST # /home/marouenez/anaconda3/envs/masterthesis/lib/python3.7/site-packages/torch/nn/functional.py:1316:0\n", 112 | " return (%20)\n", 113 | "\n", 114 | "Check the ONNX model.\n" 115 | ] 116 | } 117 | ], 118 | "source": [ 119 | "# Export an ONNX model.\n", 120 | "f = io.BytesIO()\n", 121 | "torch.onnx.export(pytorch_model, inputs, f, verbose=True)\n", 122 | "onnx_model = onnx.ModelProto.FromString(f.getvalue())\n", 123 | "\n", 124 | "# Check whether the onnx_model is valid or not.\n", 125 | "print(\"Check the ONNX model.\")\n", 126 | "onnx.checker.check_model(onnx_model)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "Now, we have an ONNX model, let's turn it into a Caffe2 one." 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 3, 139 | "metadata": {}, 140 | "outputs": [ 141 | { 142 | "name": "stdout", 143 | "output_type": "stream", 144 | "text": [ 145 | "Convert the model to a Caffe2 model.\n" 146 | ] 147 | } 148 | ], 149 | "source": [ 150 | "# Convert the ONNX model to a Caffe2 model.\n", 151 | "print(\"Convert the model to a Caffe2 model.\")\n", 152 | "init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model, device=\"CPU\")" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "Caffe2 takes a list of numpy array as inputs. So we need to change the format." 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 4, 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "# Prepare the inputs for Caffe2.\n", 169 | "caffe2_inputs = [var.data.numpy() for var in inputs]" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "The following code shows how to save and load a Caffe2 model. It is purely for demonstration purpose here." 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 5, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "# Save the converted Caffe2 model in the protobuf files. (Optional)\n", 186 | "init_file = \"./output/mymodel_init.pb\"\n", 187 | "predict_file = \"./output/mymodel_predict.pb\"\n", 188 | "save_caffe2_net(init_net, init_file, output_txt=False)\n", 189 | "save_caffe2_net(predict_net, predict_file, output_txt=True)\n", 190 | "\n", 191 | "# Load the Caffe2 model.\n", 192 | "init_net = load_caffe2_net(init_file)\n", 193 | "predict_net = load_caffe2_net(predict_file)" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": {}, 199 | "source": [ 200 | "Run PyTorch and Caffe2 models separately, and get the results." 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 6, 206 | "metadata": { 207 | "collapsed": true 208 | }, 209 | "outputs": [], 210 | "source": [ 211 | "# Compute the results using the PyTorch model.\n", 212 | "pytorch_results = pytorch_model(*inputs)\n", 213 | "\n", 214 | "# Compute the results using the Caffe2 model.\n", 215 | "_, caffe2_results = c2_native_run_net(init_net, predict_net, caffe2_inputs)" 216 | ] 217 | }, 218 | { 219 | "cell_type": "markdown", 220 | "metadata": {}, 221 | "source": [ 222 | "Now we have the results, let's check the correctness of the exported model.\n", 223 | "If no assertion fails, our model has achieved expected precision." 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 7, 229 | "metadata": {}, 230 | "outputs": [ 231 | { 232 | "name": "stdout", 233 | "output_type": "stream", 234 | "text": [ 235 | "The exported model achieves 5-decimal precision.\n" 236 | ] 237 | } 238 | ], 239 | "source": [ 240 | "# Check the decimal precision of the exported Caffe2.\n", 241 | "expected_decimal = 5\n", 242 | "for p, c in zip([pytorch_results], caffe2_results):\n", 243 | " np.testing.assert_almost_equal(p.data.cpu().numpy(), c, decimal=expected_decimal)\n", 244 | "print(\"The exported model achieves {}-decimal precision.\".format(expected_decimal))" 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": 8, 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "def benchmark_caffe2_model(init_net, predict_net,inputs, warmup_iters=3, main_iters=10):\n", 254 | " '''\n", 255 | " Run the model several times, and measure the execution time.\n", 256 | " Print the execution time per iteration (millisecond) and the number of iterations per second.\n", 257 | " '''\n", 258 | " for _i in range(warmup_iters):\n", 259 | " ws, caffe2_results = c2_native_run_net(init_net, predict_net, inputs) \n", 260 | "\n", 261 | " total_time = 0.0\n", 262 | " for _i in range(main_iters):\n", 263 | " ts = time.time()\n", 264 | " ws, caffe2_results = c2_native_run_net(init_net, predict_net, inputs)\n", 265 | " te = time.time()\n", 266 | " total_time += te - ts\n", 267 | " \n", 268 | " return total_time / main_iters * 1000" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "The following code measures the performance of PyTorch and Caffe2 models.\n", 276 | "We report:\n", 277 | "- Execution time per iteration\n", 278 | "- Iterations per second" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": 9, 284 | "metadata": {}, 285 | "outputs": [ 286 | { 287 | "name": "stdout", 288 | "output_type": "stream", 289 | "text": [ 290 | "PyTorch model's execution time is 0.6218433380126953 milliseconds/ iteration, 1608.1220765278736 iterations per second.\n", 291 | "Caffe2 model's execution time is 3.189969062805176 milliseconds / iteration, 313.48266403581545 iterations per second\n" 292 | ] 293 | } 294 | ], 295 | "source": [ 296 | "pytorch_time = benchmark_pytorch_model(pytorch_model, inputs)\n", 297 | "caffe2_time = benchmark_caffe2_model(init_net, predict_net, caffe2_inputs)\n", 298 | "\n", 299 | "print(\"PyTorch model's execution time is {} milliseconds/ iteration, {} iterations per second.\".format(\n", 300 | " pytorch_time, 1000 / pytorch_time))\n", 301 | "print(\"Caffe2 model's execution time is {} milliseconds / iteration, {} iterations per second\".format(\n", 302 | " caffe2_time, 1000 / caffe2_time))" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": null, 308 | "metadata": {}, 309 | "outputs": [], 310 | "source": [] 311 | } 312 | ], 313 | "metadata": { 314 | "kernelspec": { 315 | "display_name": "Python 3", 316 | "language": "python", 317 | "name": "python3" 318 | }, 319 | "language_info": { 320 | "codemirror_mode": { 321 | "name": "ipython", 322 | "version": 3 323 | }, 324 | "file_extension": ".py", 325 | "mimetype": "text/x-python", 326 | "name": "python", 327 | "nbconvert_exporter": "python", 328 | "pygments_lexer": "ipython3", 329 | "version": "3.7.4" 330 | } 331 | }, 332 | "nbformat": 4, 333 | "nbformat_minor": 2 334 | } 335 | -------------------------------------------------------------------------------- /tutorials/ExportModelFromPyTorchForWinML.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Export PyTorch models for Windows ML 4 | 5 | [Windows Machine Learning](https://docs.microsoft.com/windows/ai/windows-ml/) makes it easy to integrate AI into your Windows applications using ONNX models. 6 | 7 | ## Step 1: Determine the ONNX version your model needs to be in 8 | This depends on which releases of Windows you are targeting. Newer releases of Windows support newer versions of ONNX. This [page](https://docs.microsoft.com/windows/ai/windows-ml/onnx-versions) lists the opset versions supported by different releases of Windows. ONNX 1.2 (opset 7) is the lowest one supported and will work on all versions of Windows ML. Newer versions of ONNX support more types of models. 9 | 10 | ## Step 2: Export your PyTorch model to that ONNX version 11 | PyTorch's ONNX export support is documented [here](https://pytorch.org/docs/stable/onnx.html). As of PyTorch 1.2, the `torch.onnx.export` function takes a parameter that lets you specify the ONNX opset version. 12 | 13 | ```python 14 | import torch 15 | import torchvision 16 | 17 | dummy_in = torch.randn(10, 3, 224, 224) 18 | model = torchvision.models.resnet18(pretrained=True) 19 | 20 | in_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ] 21 | out_names = [ "output1" ] 22 | 23 | torch.onnx.export(model, dummy_in, "resnet18.onnx", input_names=in_names, output_names=out_names, opset_version=7, verbose=True) 24 | ``` 25 | 26 | ## Step 3: Integrate the ONNX model into your Windows app 27 | Follow the [tutorials and documentation](https://docs.microsoft.com/windows/ai/windows-ml/) to start using the model in your application. You can code directly against the [Windows ML APIs](https://docs.microsoft.com/windows/ai/windows-ml/integrate-model) or use the [mlgen tool](https://docs.microsoft.com/windows/ai/windows-ml/mlgen) to automatically generate wrapper classes for you. 28 | -------------------------------------------------------------------------------- /tutorials/ONNXMXNetServer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Serving ONNX model with MXNet Model Server\n", 8 | "\n", 9 | "This tutorial dmeonstrates how to serve an ONNX model with MXNet Model Server. We'll use a pre-trained SqueezeNet model from ONNX model zoo. The same example can be easily applied to other ONNX models.\n", 10 | "\n", 11 | "Frameworks used in this tutorial:\n", 12 | "* [MXNet Model Server](https://github.com/awslabs/mxnet-model-server) that uses [MXNet](https://mxnet.io)\n", 13 | "* [ONNX](https://onnx.ai)" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "## Installing pre-requisites:\n", 21 | "Next we'll install the pre-requisites:\n", 22 | "* [ONNX](https://github.com/onnx/onnx)\n", 23 | "* [MXNetModelServer](https://github.com/awslabs/mxnet-model-server)" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "!conda install -y -c conda-forge onnx\n", 33 | "!pip install mxnet-model-server" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "## Downloading a pre-trained ONNX model\n", 41 | "\n", 42 | "Let's go ahead and download a aqueezenet onnx model into a new directory." 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": { 49 | "scrolled": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "!mkdir squeezenet\n", 54 | "%cd squeezenet\n", 55 | "!curl -O https://s3.amazonaws.com/model-server/models/onnx-squeezenet/squeezenet.onnx" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "## Inspecting the ONNX model\n", 63 | "\n", 64 | "Let's make sure the exported ONNX model is well formed" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "import onnx\n", 74 | "\n", 75 | "# Load the ONNX model\n", 76 | "model = onnx.load(\"squeezenet.onnx\")\n", 77 | "\n", 78 | "# Check that the IR is well formed, identified issues will error out\n", 79 | "onnx.checker.check_model(model)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": { 85 | "collapsed": true 86 | }, 87 | "source": [ 88 | "## Packaging the ONNX model for serving with MXNet Model Server (MMS)\n", 89 | "\n", 90 | "To serve the ONNX model with MMS, we will first need to prepare some files that will be bundled into a **Model Archive**. \n", 91 | "The Model Archive containes everything MMS needs to setup endpoints and serve the model. \n", 92 | "\n", 93 | "The files needed are:\n", 94 | "* squeezenet.onnx - the ONNX model file\n", 95 | "* signature.json - defining the input and output of the model\n", 96 | "* synset.txt - defining the set of classes the model was trained on, and returned by the model" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "# Let's go ahead and download the files we need:\n", 106 | "!curl -O https://s3.amazonaws.com/model-server/models/onnx-squeezenet/signature.json\n", 107 | "!curl -O https://s3.amazonaws.com/model-server/models/onnx-squeezenet/synset.txt" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "# Let's take a peek into the **signature.json** file:\n", 117 | "!cat signature.json" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "# Let's take a peek into the synset.txt file:\n", 127 | "!head -n 10 synset.txt" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "# Let's package everything up into a Model Archive bundle\n", 137 | "!mxnet-model-export --model-name squeezenet --model-path .\n", 138 | "!ls -l squeezenet.model" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "## Serving the Model Archive with MXNet Model Server\n", 146 | "Now that we have the **Model Archive**, we can start the server and ask it to setup HTTP endpoints to serve the model, emit real-time operational metrics and more.\n", 147 | "\n", 148 | "We'll also test the server's endpoints." 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "# Spawning a new process to run the server\n", 158 | "import subprocess as sp\n", 159 | "server = sp.Popen(\"mxnet-model-server --models squeezenet=squeezenet.model\", shell=True)" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": null, 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "# Check out the health endpoint\n", 169 | "!curl https://127.0.0.1:8080/ping" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "# Download an image Trying out image classification\n", 179 | "!curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg\n", 180 | "!curl -X POST https://127.0.0.1:8080/squeezenet/predict -F \"input_0=@kitten.jpg\"" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "# Lastly, we'll terminate the server\n", 190 | "server.terminate()" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [] 199 | } 200 | ], 201 | "metadata": { 202 | "kernelspec": { 203 | "display_name": "Python 3", 204 | "language": "python", 205 | "name": "python3" 206 | }, 207 | "language_info": { 208 | "codemirror_mode": { 209 | "name": "ipython", 210 | "version": 3 211 | }, 212 | "file_extension": ".py", 213 | "mimetype": "text/x-python", 214 | "name": "python", 215 | "nbconvert_exporter": "python", 216 | "pygments_lexer": "ipython3", 217 | "version": "3.6.4" 218 | } 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 2 222 | } 223 | -------------------------------------------------------------------------------- /tutorials/OnnxCaffe2Import.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Importing models from ONNX to Caffe2\n", 8 | "\n", 9 | "In this tutorial we are going to show you how to import ONNX models to Caffe2. You can either\n", 10 | "\n", 11 | "- Directly run an ONNX model with Caffe2 in Python\n", 12 | "\n", 13 | "or\n", 14 | "\n", 15 | "- Convert an ONNX model file to a Caffe2 model file, and then later run the converted Caffe2 model in any environment that Caffe2 supports, e.g. on mobile phones." 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "### Installation\n", 23 | "\n", 24 | "`onnx-caffe2` is now integrated as part of `caffe2` under `caffe2/python/onnx`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "### Run an ONNX model with Caffe2" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 0, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "import onnx\n", 41 | "import caffe2.python.onnx.backend\n", 42 | "\n", 43 | "# Prepare the inputs, here we use numpy to generate some random inputs for demo purpose\n", 44 | "import numpy as np\n", 45 | "img = np.random.randn(1, 3, 224, 224).astype(np.float32)\n", 46 | "\n", 47 | "# Load the ONNX model\n", 48 | "model = onnx.load('assets/squeezenet.onnx')\n", 49 | "# Run the ONNX model with Caffe2\n", 50 | "outputs = caffe2.python.onnx.backend.run_model(model, [img])" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Convert ONNX model file to Caffe2 model file\n", 58 | "`onnx-caffe2` has bundled a shell command `convert-onnx-to-caffe2` for converting ONNX model file to Caffe2 model file. \n", 59 | "\n", 60 | "\n", 61 | "```shell\n", 62 | "\n", 63 | "$ convert-onnx-to-caffe2 assets/squeezenet.onnx --output predict_net.pb --init-net-output init_net.pb\n", 64 | "\n", 65 | "```\n", 66 | "\n", 67 | "Note in ONNX model file, parameters and network structure are all stored in one model file, while in Caffe2, they are normally stored in separated `init_net.pb` (parameters) and `predict_net.pb` (network structure) files." 68 | ] 69 | } 70 | ], 71 | "metadata": { 72 | "kernelspec": { 73 | "display_name": "Python 2", 74 | "language": "python", 75 | "name": "python2" 76 | }, 77 | "language_info": { 78 | "codemirror_mode": { 79 | "name": "ipython", 80 | "version": 2 81 | }, 82 | "file_extension": ".py", 83 | "mimetype": "text/x-python", 84 | "name": "python", 85 | "nbconvert_exporter": "python", 86 | "pygments_lexer": "ipython2", 87 | "version": "2.7.12" 88 | } 89 | }, 90 | "nbformat": 4, 91 | "nbformat_minor": 2 92 | } 93 | -------------------------------------------------------------------------------- /tutorials/OnnxTensorflowExport.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "deletable": true, 7 | "editable": true 8 | }, 9 | "source": [ 10 | "# Train in Tensorflow, Export to ONNX\n", 11 | "In this tutorial, we will demonstrate the complete process of training a MNIST model in Tensorflow and exporting the trained model to ONNX.\n", 12 | "\n", 13 | "### Training\n", 14 | "\n", 15 | "Firstly, we can initiate the [training script](./assets/tf-train-mnist.py) by issuing the command `python tf-train-mnist.py` on your terminal. Shortly, we should obtain a trained MNIST model. The training process needs no special instrumentation. However, to successfully convert the trained model, onnx-tensorflow requires three pieces of information, all of which can be obtained after training is complete:\n", 16 | "\n", 17 | " - *Graph definition*: You need to obtain information about the graph definition in the form of GraphProto. The easiest way to achieve this is to use the following snippet of code as shown in the example training script:\n", 18 | "```\n", 19 | " with open(\"graph.proto\", \"wb\") as file:\n", 20 | " graph = tf.get_default_graph().as_graph_def(add_shapes=True)\n", 21 | " file.write(graph.SerializeToString())\n", 22 | "```\n", 23 | " - *Shape information*: By default, `as_graph_def` does not serialize any information about the shapes of the intermediate tensor and such information is required by onnx-tensorflow. Thus we request Tensorflow to serialize the shape information by adding the keyword argument `add_shapes=True` as demonstrated above.\n", 24 | " - *Checkpoint*: Tensorflow checkpoint files contain information about the obtained weight; thus they are needed to convert the trained model to ONNX format.\n", 25 | "\n", 26 | "### Graph Freezing\n", 27 | "\n", 28 | "Secondly, we freeze the graph. Here, we include quotes from Tensorflow documentation about what graph freezing is:\n", 29 | "> One confusing part about this is that the weights usually aren't stored inside the file format during training. Instead, they're held in separate checkpoint files, and there are Variable ops in the graph that load the latest values when they're initialized. It's often not very convenient to have separate files when you're deploying to production, so there's the freeze_graph.py script that takes a graph definition and a set of checkpoints and freezes them together into a single file.\n", 30 | "\n", 31 | "Thus here we build the freeze_graph tool in the Tensorflow source folder and execute it with the information about where the GraphProto is, where the checkpoint file is and where to put the frozen graph. One caveat is that you need to supply the name of the output node to this utility. If you are having trouble finding the name of the output node, please refer to [this article](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md#inspecting-graphs) for help.\n", 32 | "```\n", 33 | "bazel build tensorflow/python/tools:freeze_graph\n", 34 | "bazel-bin/tensorflow/python/tools/freeze_graph \\\n", 35 | " --input_graph=/home/mnist-tf/graph.proto \\\n", 36 | " --input_checkpoint=/home/mnist-tf/ckpt/model.ckpt \\\n", 37 | " --output_graph=/tmp/frozen_graph.pb \\\n", 38 | " --output_node_names=fc2/add \\\n", 39 | " --input_binary=True\n", 40 | "```\n", 41 | "\n", 42 | "Note that now we have obtained the `frozen_graph.pb` with graph definition as well as weight information in one file.\n", 43 | "\n", 44 | "### Model Conversion\n", 45 | "\n", 46 | "Thirdly, we convert the model to ONNX format using onnx-tensorflow. Using `tensorflow_graph_to_onnx_model` from onnx-tensorflow API (documentation available at https://github.com/onnx/onnx-tensorflow/blob/master/doc/API.md)." 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 4, 52 | "metadata": { 53 | "collapsed": false, 54 | "deletable": true, 55 | "editable": true 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "import tensorflow as tf\n", 60 | "from onnx_tf.frontend import tensorflow_graph_to_onnx_model\n", 61 | "\n", 62 | "with tf.gfile.GFile(\"frozen_graph.pb\", \"rb\") as f:\n", 63 | " graph_def = tf.GraphDef()\n", 64 | " graph_def.ParseFromString(f.read())\n", 65 | " onnx_model = tensorflow_graph_to_onnx_model(graph_def,\n", 66 | " \"fc2/add\",\n", 67 | " opset=6)\n", 68 | "\n", 69 | " file = open(\"mnist.onnx\", \"wb\")\n", 70 | " file.write(onnx_model.SerializeToString())\n", 71 | " file.close()" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": { 77 | "deletable": true, 78 | "editable": true 79 | }, 80 | "source": [ 81 | "Performing a simple sanity check to ensure that we have obtained the correct model, we print out the first node of the ONNX model graph converted, which corresponds to the reshape operation performed to convert the 1D serial input to a 2D image tensor:" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 11, 87 | "metadata": { 88 | "collapsed": false, 89 | "deletable": true, 90 | "editable": true 91 | }, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "input: \"Placeholder\"\n", 98 | "input: \"reshape/Reshape/shape\"\n", 99 | "output: \"reshape/Reshape\"\n", 100 | "op_type: \"Reshape\"\n", 101 | "\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "print(onnx_model.graph.node[0])" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": { 112 | "deletable": true, 113 | "editable": true 114 | }, 115 | "source": [ 116 | "### Inference using Backend\n", 117 | "\n", 118 | "In this tutorial, we continue our demonstration by performing inference using this obtained ONNX model. Here, we exported an image representing a handwritten 7 and stored the numpy array as image.npz. Using our backend, we will classify this image using the converted ONNX model." 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 5, 124 | "metadata": { 125 | "collapsed": false, 126 | "deletable": true, 127 | "editable": true 128 | }, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "The digit is classified as 7\n" 135 | ] 136 | } 137 | ], 138 | "source": [ 139 | "import onnx\n", 140 | "import numpy as np\n", 141 | "from onnx_tf.backend import prepare\n", 142 | "\n", 143 | "model = onnx.load('mnist.onnx')\n", 144 | "tf_rep = prepare(model)\n", 145 | "\n", 146 | "img = np.load(\"./assets/image.npz\")\n", 147 | "output = tf_rep.run(img.reshape([1, 784]))\n", 148 | "print \"The digit is classified as \", np.argmax(output)\n" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "metadata": { 155 | "collapsed": true, 156 | "deletable": true, 157 | "editable": true 158 | }, 159 | "outputs": [], 160 | "source": [] 161 | } 162 | ], 163 | "metadata": { 164 | "kernelspec": { 165 | "display_name": "Python 2", 166 | "language": "python", 167 | "name": "python2" 168 | }, 169 | "language_info": { 170 | "codemirror_mode": { 171 | "name": "ipython", 172 | "version": 2 173 | }, 174 | "file_extension": ".py", 175 | "mimetype": "text/x-python", 176 | "name": "python", 177 | "nbconvert_exporter": "python", 178 | "pygments_lexer": "ipython2", 179 | "version": "2.7.5" 180 | } 181 | }, 182 | "nbformat": 4, 183 | "nbformat_minor": 2 184 | } 185 | -------------------------------------------------------------------------------- /tutorials/PytorchAddExportSupport.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Fail to export the model in PyTorch 4 | When you try to export a model, you may receive a message similar to the following: 5 | ``` 6 | UserWarning: ONNX export failed on elu because torch.onnx.symbolic.elu does not exist 7 | RuntimeError: ONNX export failed: Couldn't export operator elu 8 | ``` 9 | The export fails because PyTorch does not support exporting the `elu` operator. If you've already reached out to the ONNX team but haven't received a response, you can add support for this yourself. The difficulty of doing this depends on your answers to the following questions: 10 | 11 | ### Determine how difficult it is to add support for the operator 12 | #### Question 1: Is the operator you want standardized in ONNX? 13 | Answer: 14 | - **Yes.** Great! It will be straightforward to add support for the missing operator. 15 | - **No.** In this case, it may be difficult to do the work by yourself. 16 | Check the [Standardization Section](#standardize_op). 17 | 18 | #### Question 2: Can the ONNX operator be imported by the backend framework, such as Caffe2? 19 | Answer: 20 | - **Yes.** Terrific. We are able to run the exported ONNX model. 21 | - **No.** In this situation, you can only export model. Please contact the 22 | importer (such as onnx-caffe2) developers, as additional work is required. 23 | 24 | ### How to add support to export an operator in PyTorch 25 | #### Condition 1: If the operator in PyTorch is an ATen operator... 26 | To determine whether the operator is an ATen operator or not, check 27 | [`torch/csrc/autograd/generated/VariableType.h`](https://codebrowser.bddppq.com/pytorch/pytorch/torch/csrc/autograd/generated/VariableType.h.html) (available within generated code in the PyTorch install dir). If you find the corresponding function in this header file, it's most likely an ATen operator. 28 | 29 | **Define symbolic functions.** In this case, you should obey the following rules. 30 | - Define the symbolic function in [`torch/onnx/symbolic.py`](https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic_helper.py). Make sure the 31 | function has the same name as the ATen operator/function defined in 32 | [`VariableType.h`](https://codebrowser.bddppq.com/pytorch/pytorch/torch/csrc/autograd/generated/VariableType.h.html). 33 | - The first parameter is always the exported ONNX graph. 34 | - Parameter names must match the names in `VariableType.h` EXACTLY, because 35 | dispatch is done with keyword arguments. 36 | - Parameter ordering does NOT necessarily match what is in `VariableType.h`. 37 | Tensors (inputs) are always first, followed by non-tensor arguments. 38 | - In the symbolic function, if the target op is already standarized in ONNX, 39 | we just need to create a node to represent the ONNX operator in the graph. 40 | Here is an example to create a node for the `Elu` ONNX operator: 41 | `g.op("Elu", input, alpha_f=_scalar(alpha))`. More details are included in 42 | [API section](#api). 43 | - If the input argument is a tensor, but ONNX asks for a scalar, we have to 44 | explicitly do the conversion. The helper function, `_scalar`, can convert a 45 | scalar tensor into a Python scalar, and `_if_scalar_type_as` can turn a 46 | Python scalar into a PyTorch tensor. 47 | 48 | In the case of adding support for the operator `elu`, we can find the following declaration in `VariableType.h`: 49 | ```cpp 50 | virtual Tensor elu(const Tensor & input, Scalar alpha, bool inplace) const override; 51 | ``` 52 | From the above, it can be determined that `elu` is implemented in the ATen library. So we can define a symbolic 53 | function called `elu` in `torch/onnx/symbolic.py`, similar to the following: 54 | ```python 55 | def elu(g, input, alpha, inplace=False): 56 | return g.op("Elu", input, alpha_f=_scalar(alpha)) 57 | ``` 58 | 59 | #### Condition 2: If the operator in PyTorch is not an ATen operator... 60 | If you cannot find the corresponding function in `VariableType.h`, 61 | this means you need to define the symbolic function in the PyTorch 62 | Function class. For example, you need to create a `symbolic` function 63 | for operator `Dropout` in [torch/nn/_functions/dropout.py](https://github.com/pytorch/pytorch/blob/99037d627da68cdf53d3d0315deceddfadf03bba/torch/nn/_functions/dropout.py#L14). 64 | 65 | **Define symbolic functions.** To define the symbolic functions for 66 | non-ATen operators, the following rules should be obeyed. 67 | - Create a symbolic function, named `symbolic`, in the corresponding Function 68 | class. 69 | - The first parameter is always the exported ONNX graph. 70 | - Parameter names except the first must match the names in `forward` EXACTLY. 71 | - The output tuple size must match the outputs of `forward`. 72 | - In the symbolic function, if the target op is already standarized, 73 | we only need to create a node in the graph to represent the ONNX operator. 74 | Check the [API Section](#api) for more details. 75 | 76 | 77 | ## Export related APIs in PyTorch 78 | Symbolic functions should be implemented in Python. All of these functions interact with Python methods which are implemented via C++-Python bindings. The interface they provide looks like this: 79 | 80 | ```python 81 | def operator/symbolic(g, *inputs): 82 | """ 83 | Modifies Graph (e.g., using "op"), adding the ONNX operations representing 84 | this PyTorch function, and returning a Value or tuple of Values specifying the 85 | ONNX outputs whose values correspond to the original PyTorch return values 86 | of the autograd Function (or None if an output is not supported by ONNX). 87 | 88 | Arguments: 89 | g (Graph): graph to write the ONNX representation into 90 | inputs (Value...): list of values representing the variables which contain 91 | the inputs for this function 92 | """ 93 | 94 | class Value(object): 95 | """Represents an intermediate tensor value computed in ONNX.""" 96 | def type(self): 97 | """Returns the Type of the value.""" 98 | 99 | class Type(object): 100 | def sizes(self): 101 | """Returns a tuple of ints representing the shape of a tensor this describes.""" 102 | 103 | class Graph(object): 104 | def op(self, opname, *inputs, **attrs): 105 | """ 106 | Create an ONNX operator 'opname', taking 'args' as inputs 107 | and attributes 'kwargs' and add it to the current graph, 108 | returning the value representing the single output of this 109 | operator (see the `outputs` keyword argument for multi-return 110 | operators). 111 | 112 | The set of operators and the inputs/attributes they take 113 | is documented at https://github.com/onnx/onnx/blob/main/docs/Operators.md 114 | 115 | Arguments: 116 | opname (string): The ONNX operator name, e.g., `Abs` or `Add`. 117 | args (Value...): The inputs to the operator; usually provided 118 | as arguments to the `symbolic` definition. 119 | kwargs: The attributes of the ONNX operator, with keys named 120 | according to the following convention: `alpha_f` indicates 121 | the `alpha` attribute with type `f`. The valid type specifiers are 122 | `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute 123 | specified with type float accepts either a single float, or a 124 | list of floats (e.g., you would say `dims_i` for a `dims` attribute 125 | that takes a list of integers). 126 | outputs (int, optional): The number of outputs this operator returns; 127 | by default an operator is assumed to return a single output. 128 | If `outputs` is greater than one, this functions returns a tuple 129 | of output `Value`, representing each output of the ONNX operator 130 | in positional. 131 | """ 132 | ``` 133 | 134 | ## Standardize the operator in ONNX 135 | If there is no appropriate operator in ONNX to translate to, you will have to 136 | add it to ONNX (ONNX will reject any operators that it does not understand). 137 | 138 | **Experimental.** If you just need export to work in a one-off case, without 139 | getting it into ONNX proper, you can add your operator as an *experimental* 140 | operator. 141 | 142 | ## More ONNX symbolic examples 143 | - ATen operators in symbolic.py 144 | - [symbolic_opset10.py](https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic_opset10.py) 145 | - [symbolic_opset9.py](https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic_opset9.py) 146 | - [symbolic_helper.py](https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic_helper.py) 147 | - [Index](https://github.com/pytorch/pytorch/blob/99037d627da68cdf53d3d0315deceddfadf03bba/torch/autograd/_functions/tensor.py#L24) 148 | - [Negate](https://github.com/pytorch/pytorch/blob/99037d627da68cdf53d3d0315deceddfadf03bba/torch/autograd/_functions/basic_ops.py#L50) 149 | - [ConstantPadNd](https://github.com/pytorch/pytorch/blob/99037d627da68cdf53d3d0315deceddfadf03bba/torch/nn/_functions/padding.py#L8) 150 | -------------------------------------------------------------------------------- /tutorials/PytorchOnnxExport.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Exporting model from PyTorch to ONNX\n", 8 | "\n", 9 | "In this tutorial, we describe how to use ONNX to convert a model defined\n", 10 | "in PyTorch into the ONNX format.\n", 11 | "\n", 12 | "ONNX exporter is part of the [PyTorch repository](https://pytorch.org/docs/master/onnx.html).\n", 13 | "\n", 14 | "For working with this tutorial, you will need to install [onnx](https://github.com/onnx/onnx). You can get binary builds of onnx with\n", 15 | "``conda install -c conda-forge onnx``.\n", 16 | "\n", 17 | "``NOTE``: ONNX is under active development so for the best support consider building PyTorch master branch which can be installed by following\n", 18 | "[the instructions here](https://github.com/pytorch/pytorch#from-source)" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "## Invoking exporter\n", 26 | "\n", 27 | "Pretty much it's a matter of replacing `my_model(input)` with `torch.onnx.export(my_model, input, \"my_model.onnx\")` in your script.\n", 28 | "\n", 29 | "### Limitations\n", 30 | "\n", 31 | "The ONNX exporter is a trace-based exporter, which means that it operates by executing your model once, and exporting the operators which were actually run during this run. This means that if your model is dynamic, e.g., changes behavior depending on input data, the export won’t be accurate.\n", 32 | "\n", 33 | "Similarly, a trace might be valid only for a specific input size (which is one reason why we require explicit inputs on tracing). Most of the operators export size-agnostic versions and should work on different batch sizes or input sizes. We recommend examining the model trace and making sure the traced operators look reasonable." 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 1, 39 | "metadata": {}, 40 | "outputs": [ 41 | { 42 | "name": "stdout", 43 | "output_type": "stream", 44 | "text": [ 45 | "Help on function export in module torch.onnx:\n", 46 | "\n", 47 | "export(model, args, f, export_params=True, verbose=False, training=False)\n", 48 | " Export a model into ONNX format. This exporter runs your model\n", 49 | " once in order to get a trace of its execution to be exported; at the\n", 50 | " moment, it does not support dynamic models (e.g., RNNs.)\n", 51 | " \n", 52 | " See also: :ref:`onnx-export`\n", 53 | " \n", 54 | " Arguments:\n", 55 | " model (torch.nn.Module): the model to be exported.\n", 56 | " args (tuple of arguments): the inputs to\n", 57 | " the model, e.g., such that ``model(*args)`` is a valid\n", 58 | " invocation of the model. Any non-Variable arguments will\n", 59 | " be hard-coded into the exported model; any Variable arguments\n", 60 | " will become inputs of the exported model, in the order they\n", 61 | " occur in args. If args is a Variable, this is equivalent\n", 62 | " to having called it with a 1-ary tuple of that Variable.\n", 63 | " (Note: passing keyword arguments to the model is not currently\n", 64 | " supported. Give us a shout if you need it.)\n", 65 | " f: a file-like object (has to implement fileno that returns a file descriptor)\n", 66 | " or a string containing a file name. A binary Protobuf will be written\n", 67 | " to this file.\n", 68 | " export_params (bool, default True): if specified, all parameters will\n", 69 | " be exported. Set this to False if you want to export an untrained model.\n", 70 | " In this case, the exported model will first take all of its parameters\n", 71 | " as arguments, the ordering as specified by ``model.state_dict().values()``\n", 72 | " verbose (bool, default False): if specified, we will print out a debug\n", 73 | " description of the trace being exported.\n", 74 | " training (bool, default False): export the model in training mode. At\n", 75 | " the moment, ONNX is oriented towards exporting models for inference\n", 76 | " only, so you will generally not need to set this to True.\n", 77 | "\n" 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "import torch.onnx\n", 83 | "help(torch.onnx.export)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "## Trying it out on AlexNet\n", 91 | "\n", 92 | "If you already have your model built, it's just a few lines:" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 2, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "import torch.onnx\n", 102 | "import torchvision\n", 103 | "\n", 104 | "# Standard ImageNet input - 3 channels, 224x224,\n", 105 | "# values don't matter as we care about network structure.\n", 106 | "# But they can also be real inputs.\n", 107 | "dummy_input = torch.randn(1, 3, 224, 224)\n", 108 | "# Obtain your model, it can be also constructed in your script explicitly\n", 109 | "model = torchvision.models.alexnet(pretrained=True)\n", 110 | "# Invoke export\n", 111 | "torch.onnx.export(model, dummy_input, \"alexnet.onnx\")" 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": {}, 117 | "source": [ 118 | "**That's it!**\n", 119 | "\n", 120 | "## Inspecting model\n", 121 | "\n", 122 | "You can also use ONNX tooling to check the validity of the resulting model or inspect the details" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 3, 128 | "metadata": {}, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "graph torch-jit-export (\n", 135 | " %0[FLOAT, 1x3x224x224]\n", 136 | ") initializers (\n", 137 | " %1[FLOAT, 64x3x11x11]\n", 138 | " %2[FLOAT, 64]\n", 139 | " %3[FLOAT, 192x64x5x5]\n", 140 | " %4[FLOAT, 192]\n", 141 | " %5[FLOAT, 384x192x3x3]\n", 142 | " %6[FLOAT, 384]\n", 143 | " %7[FLOAT, 256x384x3x3]\n", 144 | " %8[FLOAT, 256]\n", 145 | " %9[FLOAT, 256x256x3x3]\n", 146 | " %10[FLOAT, 256]\n", 147 | " %11[FLOAT, 4096x9216]\n", 148 | " %12[FLOAT, 4096]\n", 149 | " %13[FLOAT, 4096x4096]\n", 150 | " %14[FLOAT, 4096]\n", 151 | " %15[FLOAT, 1000x4096]\n", 152 | " %16[FLOAT, 1000]\n", 153 | ") {\n", 154 | " %17 = Conv[dilations = [1, 1], group = 1, kernel_shape = [11, 11], pads = [2, 2, 2, 2], strides = [4, 4]](%0, %1)\n", 155 | " %18 = Add[axis = 1, broadcast = 1](%17, %2)\n", 156 | " %19 = Relu(%18)\n", 157 | " %20 = MaxPool[kernel_shape = [3, 3], pads = [0, 0], strides = [2, 2]](%19)\n", 158 | " %21 = Conv[dilations = [1, 1], group = 1, kernel_shape = [5, 5], pads = [2, 2, 2, 2], strides = [1, 1]](%20, %3)\n", 159 | " %22 = Add[axis = 1, broadcast = 1](%21, %4)\n", 160 | " %23 = Relu(%22)\n", 161 | " %24 = MaxPool[kernel_shape = [3, 3], pads = [0, 0], strides = [2, 2]](%23)\n", 162 | " %25 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%24, %5)\n", 163 | " %26 = Add[axis = 1, broadcast = 1](%25, %6)\n", 164 | " %27 = Relu(%26)\n", 165 | " %28 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%27, %7)\n", 166 | " %29 = Add[axis = 1, broadcast = 1](%28, %8)\n", 167 | " %30 = Relu(%29)\n", 168 | " %31 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%30, %9)\n", 169 | " %32 = Add[axis = 1, broadcast = 1](%31, %10)\n", 170 | " %33 = Relu(%32)\n", 171 | " %34 = MaxPool[kernel_shape = [3, 3], pads = [0, 0], strides = [2, 2]](%33)\n", 172 | " %35 = Reshape[shape = [1, 9216]](%34)\n", 173 | " %36, %37 = Dropout[is_test = 1, ratio = 0.5](%35)\n", 174 | " %38 = Transpose[perm = [1, 0]](%11)\n", 175 | " %40 = Gemm[alpha = 1, beta = 1, broadcast = 1](%36, %38, %12)\n", 176 | " %41 = Relu(%40)\n", 177 | " %42, %43 = Dropout[is_test = 1, ratio = 0.5](%41)\n", 178 | " %44 = Transpose[perm = [1, 0]](%13)\n", 179 | " %46 = Gemm[alpha = 1, beta = 1, broadcast = 1](%42, %44, %14)\n", 180 | " %47 = Relu(%46)\n", 181 | " %48 = Transpose[perm = [1, 0]](%15)\n", 182 | " %50 = Gemm[alpha = 1, beta = 1, broadcast = 1](%47, %48, %16)\n", 183 | " return %50\n", 184 | "}\n" 185 | ] 186 | } 187 | ], 188 | "source": [ 189 | "import onnx\n", 190 | "\n", 191 | "# Load the ONNX model\n", 192 | "model = onnx.load(\"alexnet.onnx\")\n", 193 | "\n", 194 | "# Check that the IR is well formed\n", 195 | "onnx.checker.check_model(model)\n", 196 | "\n", 197 | "# Print a human readable representation of the graph\n", 198 | "print(onnx.helper.printable_graph(model.graph))" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "Notice that all parameters are listed as graph's inputs but they also have stored values initialized in `model.graph.initializers`." 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": null, 211 | "metadata": { 212 | "collapsed": true 213 | }, 214 | "outputs": [], 215 | "source": [ 216 | "## What's next\n", 217 | "\n", 218 | "Check [PyTorch documentation on onnx file](https://pytorch.org/docs/master/onnx.html)\n", 219 | "Take a look at [other tutorials, including importing of ONNX models to other frameworks](https://github.com/onnx/tutorials/tree/master/tutorials)" 220 | ] 221 | } 222 | ], 223 | "metadata": { 224 | "kernelspec": { 225 | "display_name": "Python 2", 226 | "language": "python", 227 | "name": "python2" 228 | }, 229 | "language_info": { 230 | "codemirror_mode": { 231 | "name": "ipython", 232 | "version": 2 233 | }, 234 | "file_extension": ".py", 235 | "mimetype": "text/x-python", 236 | "name": "python", 237 | "nbconvert_exporter": "python", 238 | "pygments_lexer": "ipython2", 239 | "version": "2.7.11" 240 | } 241 | }, 242 | "nbformat": 4, 243 | "nbformat_minor": 2 244 | } 245 | -------------------------------------------------------------------------------- /tutorials/PytorchTensorflowMnist.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Convert a PyTorch model to Tensorflow using ONNX\n", 8 | "\n", 9 | "In this tutorial, we will show you how to export a model defined in PyTorch to ONNX and then import the ONNX model into Tensorflow to run it. We will also show you how to save this Tensorflow model into a file for later use. " 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## Installations\n", 17 | "\n", 18 | "First let's install [ONNX](https://github.com/onnx/onnx), [PyTorch](https://github.com/pytorch/pytorch), and [Tensorflow](https://github.com/tensorflow/tensorflow) by following the instructions on each of their repository.\n", 19 | "\n", 20 | "Then Install torchvision by the following command:\n", 21 | "```\n", 22 | "pip install torchvision\n", 23 | "```\n", 24 | "\n", 25 | "Next install [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) by the following commands:\n", 26 | "```\n", 27 | "git clone git@github.com:onnx/onnx-tensorflow.git && cd onnx-tensorflow\n", 28 | "pip install -e .\n", 29 | "```" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Define model\n", 37 | "\n", 38 | "In this tutorial we are going to use the [MNIST model](https://github.com/pytorch/examples/tree/master/mnist) from PyTorch examples. " 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "import torch\n", 48 | "import torch.nn as nn\n", 49 | "import torch.nn.functional as F\n", 50 | "\n", 51 | "\n", 52 | "class Net(nn.Module):\n", 53 | " def __init__(self):\n", 54 | " super(Net, self).__init__()\n", 55 | " self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n", 56 | " self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n", 57 | " self.conv2_drop = nn.Dropout2d()\n", 58 | " self.fc1 = nn.Linear(320, 50)\n", 59 | " self.fc2 = nn.Linear(50, 10)\n", 60 | "\n", 61 | " def forward(self, x):\n", 62 | " x = F.relu(F.max_pool2d(self.conv1(x), 2))\n", 63 | " x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n", 64 | " x = x.view(-1, 320)\n", 65 | " x = F.relu(self.fc1(x))\n", 66 | " x = F.dropout(x, training=self.training)\n", 67 | " x = self.fc2(x)\n", 68 | " return F.log_softmax(x, dim=1)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "## Train and test model\n", 76 | "Now let's train this model. By default if GPU is availalbe on your environment, it will use GPU instead of CPU to run the training. In this tutorial we will train this model with 60 epochs. It will takes about 15 minutes on an environment with 1 GPU to complete this training. You can always adjust the number of epoch base on how well you want your model to be trained." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "import argparse\n", 86 | "import torch.optim as optim\n", 87 | "from torchvision import datasets, transforms\n", 88 | "\n", 89 | "def train(args, model, device, train_loader, optimizer, epoch):\n", 90 | " model.train()\n", 91 | " for batch_idx, (data, target) in enumerate(train_loader):\n", 92 | " data, target = data.to(device), target.to(device)\n", 93 | " optimizer.zero_grad()\n", 94 | " output = model(data)\n", 95 | " loss = F.nll_loss(output, target)\n", 96 | " loss.backward()\n", 97 | " optimizer.step()\n", 98 | " if batch_idx % args.log_interval == 0:\n", 99 | " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", 100 | " epoch, batch_idx * len(data), len(train_loader.dataset),\n", 101 | " 100. * batch_idx / len(train_loader), loss.item()))\n", 102 | "\n", 103 | "def test(args, model, device, test_loader):\n", 104 | " model.eval()\n", 105 | " test_loss = 0\n", 106 | " correct = 0\n", 107 | " with torch.no_grad():\n", 108 | " for data, target in test_loader:\n", 109 | " data, target = data.to(device), target.to(device)\n", 110 | " output = model(data)\n", 111 | " test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n", 112 | " pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n", 113 | " correct += pred.eq(target.view_as(pred)).sum().item()\n", 114 | "\n", 115 | " test_loss /= len(test_loader.dataset)\n", 116 | " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", 117 | " test_loss, correct, len(test_loader.dataset),\n", 118 | " 100. * correct / len(test_loader.dataset)))\n", 119 | " \n", 120 | "# Training settings\n", 121 | "parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n", 122 | "parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n", 123 | " help='input batch size for training (default: 64)')\n", 124 | "parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n", 125 | " help='input batch size for testing (default: 1000)')\n", 126 | "parser.add_argument('--epochs', type=int, default=10, metavar='N',\n", 127 | " help='number of epochs to train (default: 10)')\n", 128 | "parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n", 129 | " help='learning rate (default: 0.01)')\n", 130 | "parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n", 131 | " help='SGD momentum (default: 0.5)')\n", 132 | "parser.add_argument('--no-cuda', action='store_true', default=False,\n", 133 | " help='disables CUDA training')\n", 134 | "parser.add_argument('--seed', type=int, default=1, metavar='S',\n", 135 | " help='random seed (default: 1)')\n", 136 | "parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n", 137 | " help='how many batches to wait before logging training status')\n", 138 | "\n", 139 | "# Train this model with 60 epochs and after process every 300 batches log the train status \n", 140 | "args = parser.parse_args(['--epochs', '60', '--log-interval', '300'])\n", 141 | "\n", 142 | "use_cuda = not args.no_cuda and torch.cuda.is_available()\n", 143 | "\n", 144 | "torch.manual_seed(args.seed)\n", 145 | "\n", 146 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n", 147 | "\n", 148 | "kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n", 149 | "train_loader = torch.utils.data.DataLoader(\n", 150 | " datasets.MNIST('../data', train=True, download=True,\n", 151 | " transform=transforms.Compose([\n", 152 | " transforms.ToTensor(),\n", 153 | " transforms.Normalize((0.1307,), (0.3081,))\n", 154 | " ])),\n", 155 | " batch_size=args.batch_size, shuffle=True, **kwargs)\n", 156 | "test_loader = torch.utils.data.DataLoader(\n", 157 | " datasets.MNIST('../data', train=False, transform=transforms.Compose([\n", 158 | " transforms.ToTensor(),\n", 159 | " transforms.Normalize((0.1307,), (0.3081,))\n", 160 | " ])),\n", 161 | " batch_size=args.test_batch_size, shuffle=True, **kwargs)\n", 162 | "\n", 163 | "\n", 164 | "model = Net().to(device)\n", 165 | "optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n", 166 | "\n", 167 | "for epoch in range(1, args.epochs + 1):\n", 168 | " train(args, model, device, train_loader, optimizer, epoch)\n", 169 | " test(args, model, device, test_loader)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "## Save the trained model" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "# Save the trained model to a file\n", 186 | "torch.save(model.state_dict(), 'output/mnist.pth')" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "## Export the trained model to ONNX \n", 194 | "In order to export the model, Pytorch exporter needs to run the model once and save this resulting traced model to a ONNX file. Therefore, we need to provide the input for our MNIST model. Here we would expect to get a black and white 28 x 28 picture as an input to run this model in inference phase." 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": {}, 201 | "outputs": [], 202 | "source": [ 203 | "from torch.autograd import Variable\n", 204 | "\n", 205 | "# Load the trained model from file\n", 206 | "trained_model = Net()\n", 207 | "trained_model.load_state_dict(torch.load('output/mnist.pth'))\n", 208 | "\n", 209 | "# Export the trained model to ONNX\n", 210 | "dummy_input = Variable(torch.randn(1, 1, 28, 28)) # one black and white 28 x 28 picture will be the input to the model\n", 211 | "torch.onnx.export(trained_model, dummy_input, \"output/mnist.onnx\")" 212 | ] 213 | }, 214 | { 215 | "cell_type": "markdown", 216 | "metadata": {}, 217 | "source": [ 218 | "PS. You can examine the graph of this mnist.onnx file using an ONNX viewer call [Netron](https://github.com/lutzroeder/Netron)" 219 | ] 220 | }, 221 | { 222 | "cell_type": "markdown", 223 | "metadata": {}, 224 | "source": [ 225 | "## Import the ONNX model to Tensorflow\n", 226 | "We will use onnx_tf.backend.prepare to import the ONNX model into Tensorflow." 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": null, 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "import onnx\n", 236 | "from onnx_tf.backend import prepare\n", 237 | "\n", 238 | "# Load the ONNX file\n", 239 | "model = onnx.load('output/mnist.onnx')\n", 240 | "\n", 241 | "# Import the ONNX model to Tensorflow\n", 242 | "tf_rep = prepare(model)" 243 | ] 244 | }, 245 | { 246 | "cell_type": "markdown", 247 | "metadata": {}, 248 | "source": [ 249 | "Let's explore the tf_rep object return from onnx.tf.backend.prepare" 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": null, 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "# Input nodes to the model\n", 259 | "print('inputs:', tf_rep.inputs)\n", 260 | "\n", 261 | "# Output nodes from the model\n", 262 | "print('outputs:', tf_rep.outputs)\n", 263 | "\n", 264 | "# All nodes in the model\n", 265 | "print('tensor_dict:')\n", 266 | "print(tf_rep.tensor_dict)" 267 | ] 268 | }, 269 | { 270 | "cell_type": "markdown", 271 | "metadata": {}, 272 | "source": [ 273 | "## Run the model in Tensorflow" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": null, 279 | "metadata": {}, 280 | "outputs": [], 281 | "source": [ 282 | "import numpy as np\n", 283 | "from IPython.display import display\n", 284 | "from PIL import Image\n", 285 | "\n", 286 | "print('Image 1:')\n", 287 | "img = Image.open('assets/two.png').resize((28, 28)).convert('L')\n", 288 | "display(img)\n", 289 | "output = tf_rep.run(np.asarray(img, dtype=np.float32)[np.newaxis, np.newaxis, :, :])\n", 290 | "print('The digit is classified as ', np.argmax(output))\n", 291 | "\n", 292 | "print('Image 2:')\n", 293 | "img = Image.open('assets/three.png').resize((28, 28)).convert('L')\n", 294 | "display(img)\n", 295 | "output = tf_rep.run(np.asarray(img, dtype=np.float32)[np.newaxis, np.newaxis, :, :])\n", 296 | "print('The digit is classified as ', np.argmax(output))" 297 | ] 298 | }, 299 | { 300 | "cell_type": "markdown", 301 | "metadata": {}, 302 | "source": [ 303 | "## Save the Tensorflow model into a file" 304 | ] 305 | }, 306 | { 307 | "cell_type": "code", 308 | "execution_count": null, 309 | "metadata": {}, 310 | "outputs": [], 311 | "source": [ 312 | "tf_rep.export_graph('output/mnist.pb')" 313 | ] 314 | } 315 | ], 316 | "metadata": { 317 | "celltoolbar": "Raw Cell Format", 318 | "kernelspec": { 319 | "display_name": "Python 2", 320 | "language": "python", 321 | "name": "python2" 322 | }, 323 | "language_info": { 324 | "codemirror_mode": { 325 | "name": "ipython", 326 | "version": 3 327 | }, 328 | "file_extension": ".py", 329 | "mimetype": "text/x-python", 330 | "name": "python", 331 | "nbconvert_exporter": "python", 332 | "pygments_lexer": "ipython3", 333 | "version": "3.5.2" 334 | } 335 | }, 336 | "nbformat": 4, 337 | "nbformat_minor": 2 338 | } 339 | -------------------------------------------------------------------------------- /tutorials/TensorflowToOnnx-1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Convert Tensorflow model to ONNX\n", 8 | "Tensorflow and ONNX both define their own graph format to represent to model. You can use [tensorflow-onnx](https://github.com/onnx/tensorflow-onnx \"Title\") to export a Tensorflow model to ONNX.\n", 9 | "\n", 10 | "We divide the guide into 2 parts: part 1 covers basic conversion and part 2 advanced topics. The following content will be covered in order:\n", 11 | "1. Procedures to convert tensorflow model\n", 12 | " - get tensorflow model\n", 13 | " - convert to ONNX\n", 14 | " - validate\n", 15 | "2. Key conceptions\n", 16 | " - opset\n", 17 | " - data format" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "## Step 1 - Get Tensorflow model\n", 25 | "Tensorflow uses several file formats to represent a model, such as checkpoint files, graph with weight(called `frozen graph` next) and saved_model, and it has APIs to generate these files, you can find the code snippets in the script [tensorflow_to_onnx_example.py](./assets/tensorflow_to_onnx_example.py)\n", 26 | "\n", 27 | "And `tensorflow-onnx` can accept all the three formats to represent a Tensorflow model, **the format \"saved_model\" should be the preference** since it doesn't require the user to specify input and output names of graph.\n", 28 | "we will cover it in this section and cover the other two in the last section. And also, you could get more detail from `tensorflow-onnx`'s [README](https://github.com/onnx/tensorflow-onnx/blob/master/README.md \"Title\") file." 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 12, 34 | "metadata": { 35 | "scrolled": false 36 | }, 37 | "outputs": [ 38 | { 39 | "name": "stdout", 40 | "output_type": "stream", 41 | "text": [ 42 | "please wait for a while, because the script will train MNIST from scratch\n", 43 | "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n", 44 | "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n", 45 | "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n", 46 | "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n", 47 | "step 0, training accuracy 0.18\n", 48 | "step 1000, training accuracy 0.98\n", 49 | "step 2000, training accuracy 0.94\n", 50 | "step 3000, training accuracy 1\n", 51 | "step 4000, training accuracy 1\n", 52 | "test accuracy 0.976\n", 53 | "save tensorflow in format \"saved_model\"\n" 54 | ] 55 | } 56 | ], 57 | "source": [ 58 | "import os\n", 59 | "import shutil\n", 60 | "import tensorflow as tf\n", 61 | "from assets.tensorflow_to_onnx_example import create_and_train_mnist\n", 62 | "def save_model_to_saved_model(sess, input_tensor, output_tensor):\n", 63 | " from tensorflow.saved_model import simple_save\n", 64 | " save_path = r\"./output/saved_model\"\n", 65 | " if os.path.exists(save_path):\n", 66 | " shutil.rmtree(save_path)\n", 67 | " simple_save(sess, save_path, {input_tensor.name: input_tensor}, {output_tensor.name: output_tensor})\n", 68 | "\n", 69 | "print(\"please wait for a while, because the script will train MNIST from scratch\")\n", 70 | "tf.reset_default_graph()\n", 71 | "sess_tf, saver, input_tensor, output_tensor = create_and_train_mnist()\n", 72 | "print(\"save tensorflow in format \\\"saved_model\\\"\")\n", 73 | "save_model_to_saved_model(sess_tf, input_tensor, output_tensor)" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "## Step 2 - Convert to ONNX\n", 81 | "`tensorflow-onnx` has several entries to convert tensorflow model with different tensorflow formats, this section will cover \"saved_model\" only, \"frozen graph\" and \"checkpoint\" will be covered in [part 2](./TensorflowToOnnx-2.ipynb).\n", 82 | "\n", 83 | "Also, `tensorflow-onnx` has exported related python APIs, so users can call them directly on their script instead of command line, also the detail will be covered in [part 2](./TensorflowToOnnx-2.ipynb)." 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 13, 89 | "metadata": {}, 90 | "outputs": [ 91 | { 92 | "name": "stdout", 93 | "output_type": "stream", 94 | "text": [ 95 | "2019-06-17 07:22:03,871 - INFO - Using tensorflow=1.12.0, onnx=1.5.0, tf2onnx=1.5.1/0c735a\n", 96 | "2019-06-17 07:22:03,871 - INFO - Using opset \n", 97 | "2019-06-17 07:22:03,989 - INFO - \n", 98 | "2019-06-17 07:22:04,012 - INFO - Optimizing ONNX model\n", 99 | "2019-06-17 07:22:04,029 - INFO - After optimization: Add -2 (4->2), Identity -3 (3->0), Transpose -8 (9->1)\n", 100 | "2019-06-17 07:22:04,031 - INFO - \n", 101 | "2019-06-17 07:22:04,032 - INFO - Successfully converted TensorFlow model ./output/saved_model to ONNX\n", 102 | "2019-06-17 07:22:04,044 - INFO - ONNX model is saved at ./output/mnist1.onnx\n" 103 | ] 104 | } 105 | ], 106 | "source": [ 107 | "# generating mnist.onnx using saved_model\n", 108 | "!python -m tf2onnx.convert \\\n", 109 | " --saved-model ./output/saved_model \\\n", 110 | " --output ./output/mnist1.onnx \\\n", 111 | " --opset 7" 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": {}, 117 | "source": [ 118 | "## Step 3 - Validate\n", 119 | "There are several framework can run model in ONNX format, here [ONNXRuntime](https://github.com/microsoft/onnxruntime \"Title\") , opensourced by `Microsoft`, is used to make sure the generated ONNX graph behaves well.\n", 120 | "The input \"image.npz\" is an image of handwritten \"7\", so the expected classification result of model should be \"7\". " 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 14, 126 | "metadata": { 127 | "scrolled": false 128 | }, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "the expected result is \"7\"\n", 135 | "the digit is classified as \"7\" in ONNXRruntime\n" 136 | ] 137 | } 138 | ], 139 | "source": [ 140 | "import numpy as np\n", 141 | "import onnxruntime as ort\n", 142 | "\n", 143 | "img = np.load(\"./assets/image.npz\").reshape([1, 784]) \n", 144 | "sess_ort = ort.InferenceSession(\"./output/mnist1.onnx\")\n", 145 | "res = sess_ort.run(output_names=[output_tensor.name], input_feed={input_tensor.name: img})\n", 146 | "print(\"the expected result is \\\"7\\\"\")\n", 147 | "print(\"the digit is classified as \\\"%s\\\" in ONNXRruntime\"%np.argmax(res))" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "## Key conceptions\n", 155 | "This command line should work for most tensorflow models if they are available a saved_model. In some cases you might encounter issues that require extra options.\n", 156 | "\n", 157 | "The most important concept is \"**opset** version\": ONNX is an evolving standard, for example it will add more new operations and enhance existing operations, so different opset version will contain different operations and operations may have different behavior. The default version \"tensorflow-onnx\" used is 7 and ONNX supports version 10 now, so if the conversion failed, you may try different version, by command line option \"--opset\", to see if it works.\n", 158 | "\n", 159 | "Continue with [part 2](./TensorflowToOnnx-2.ipynb) that explains advanced topics." 160 | ] 161 | } 162 | ], 163 | "metadata": { 164 | "kernelspec": { 165 | "display_name": "Python 3", 166 | "language": "python", 167 | "name": "python3" 168 | }, 169 | "language_info": { 170 | "codemirror_mode": { 171 | "name": "ipython", 172 | "version": 3 173 | }, 174 | "file_extension": ".py", 175 | "mimetype": "text/x-python", 176 | "name": "python", 177 | "nbconvert_exporter": "python", 178 | "pygments_lexer": "ipython3", 179 | "version": "3.5.2" 180 | } 181 | }, 182 | "nbformat": 4, 183 | "nbformat_minor": 2 184 | } 185 | -------------------------------------------------------------------------------- /tutorials/TensorflowToOnnx-2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Convert Tensorflow model to ONNX\n", 8 | "The general procedures to convert a tensorflow to ONNX is covered [part 1](./TensorflowToOnnx-1.ipynb).\n", 9 | "\n", 10 | "In this tutorial, we will cover the following contents in order:\n", 11 | "1. convert tensorflow model with other formats\n", 12 | " - convert with frozen graph\n", 13 | " - convert with checkpoint\n", 14 | "2. convert in python script\n", 15 | "3. useful command line options of `tensorflow-onnx`" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "## Convert with frozen graph\n", 23 | "Tensorflow has API to get model's frozen graph and `tensorflow-onnx` can accept it as an input.\n", 24 | "\n", 25 | "While besides the frozen graph, the input and output tensors' names are also needed. Those names typically end with \":0\", you could either get them by tensorflow [summarized tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/graph_transforms), or specify their names using tf.identity in your tensorflow script." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 11, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n", 38 | "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n", 39 | "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n", 40 | "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n", 41 | "step 0, training accuracy 0.08\n", 42 | "step 1000, training accuracy 0.98\n", 43 | "step 2000, training accuracy 1\n", 44 | "step 3000, training accuracy 0.98\n", 45 | "step 4000, training accuracy 1\n", 46 | "test accuracy 0.984\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "import tensorflow as tf\n", 52 | "from assets.tensorflow_to_onnx_example import create_and_train_mnist\n", 53 | "\n", 54 | "def save_model_to_frozen_proto(sess):\n", 55 | " frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, [output_tensor.name[:-2]])\n", 56 | " with open(\"./output/mnist_frozen.pb\", \"wb\") as file:\n", 57 | " file.write(frozen_graph.SerializeToString())\n", 58 | "\n", 59 | "sess_tf, saver, input_tensor, output_tensor = create_and_train_mnist() \n", 60 | "save_model_to_frozen_proto(sess_tf)" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 12, 66 | "metadata": {}, 67 | "outputs": [ 68 | { 69 | "name": "stdout", 70 | "output_type": "stream", 71 | "text": [ 72 | "2019-06-17 07:19:39,492 - INFO - Using tensorflow=1.12.0, onnx=1.5.0, tf2onnx=1.5.1/0c735a\n", 73 | "2019-06-17 07:19:39,492 - INFO - Using opset \n", 74 | "2019-06-17 07:19:39,606 - INFO - \n", 75 | "2019-06-17 07:19:39,635 - INFO - Optimizing ONNX model\n", 76 | "2019-06-17 07:19:39,652 - INFO - After optimization: Add -2 (4->2), Identity -3 (3->0), Transpose -8 (9->1)\n", 77 | "2019-06-17 07:19:39,654 - INFO - \n", 78 | "2019-06-17 07:19:39,654 - INFO - Successfully converted TensorFlow model ./output/mnist_frozen.pb to ONNX\n", 79 | "2019-06-17 07:19:39,667 - INFO - ONNX model is saved at ./output/mnist2.onnx\n" 80 | ] 81 | } 82 | ], 83 | "source": [ 84 | "# generating mnist.onnx using frozen_graph\n", 85 | "!python -m tf2onnx.convert \\\n", 86 | " --input ./output/mnist_frozen.pb \\\n", 87 | " --inputs {input_tensor.name} \\\n", 88 | " --outputs {output_tensor.name} \\\n", 89 | " --output ./output/mnist2.onnx \\\n", 90 | " --opset 7" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "## Convert with checkpoint\n", 98 | "Same as frozen graph, you need to specify the path to checkpoint file and model's input and output names." 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 13, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "def save_model_to_checkpoint(saver, sess):\n", 108 | " save_path = saver.save(sess, \"./output/ckpt/model.ckpt\")\n", 109 | "\n", 110 | "save_model_to_checkpoint(saver, sess_tf)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 14, 116 | "metadata": {}, 117 | "outputs": [ 118 | { 119 | "name": "stdout", 120 | "output_type": "stream", 121 | "text": [ 122 | "2019-06-17 07:19:42,533 - INFO - Using tensorflow=1.12.0, onnx=1.5.0, tf2onnx=1.5.1/0c735a\n", 123 | "2019-06-17 07:19:42,534 - INFO - Using opset \n", 124 | "2019-06-17 07:19:42,660 - INFO - \n", 125 | "2019-06-17 07:19:42,684 - INFO - Optimizing ONNX model\n", 126 | "2019-06-17 07:19:42,700 - INFO - After optimization: Add -2 (4->2), Identity -3 (3->0), Transpose -8 (9->1)\n", 127 | "2019-06-17 07:19:42,705 - INFO - \n", 128 | "2019-06-17 07:19:42,705 - INFO - Successfully converted TensorFlow model ./output/ckpt/model.ckpt.meta to ONNX\n", 129 | "2019-06-17 07:19:42,718 - INFO - ONNX model is saved at ./output/mnist3.onnx\n" 130 | ] 131 | } 132 | ], 133 | "source": [ 134 | "# generating mnist.onnx using checkpoint\n", 135 | "!python -m tf2onnx.convert \\\n", 136 | " --checkpoint ./output/ckpt/model.ckpt.meta \\\n", 137 | " --inputs {input_tensor.name}\\\n", 138 | " --outputs {output_tensor.name} \\\n", 139 | " --output ./output/mnist3.onnx \\\n", 140 | " --opset 7" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "## Convert in python script\n", 148 | "`tensorflow-onnx` exports conversion APIs so that users can convert tensorflow model into ONNX directly in their script, the following code is an example." 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 15, 154 | "metadata": { 155 | "scrolled": true 156 | }, 157 | "outputs": [ 158 | { 159 | "name": "stdout", 160 | "output_type": "stream", 161 | "text": [ 162 | "generating mnist.onnx in python script\n", 163 | "ONNX model is saved at ./output/mnist4.onnx\n" 164 | ] 165 | } 166 | ], 167 | "source": [ 168 | "from tf2onnx.tfonnx import process_tf_graph, tf_optimize\n", 169 | "import tensorflow as tf\n", 170 | "from tensorflow.graph_util import convert_variables_to_constants as freeze_graph\n", 171 | "\n", 172 | "print(\"generating mnist.onnx in python script\")\n", 173 | "graph_def = freeze_graph(sess_tf, sess_tf.graph_def, [output_tensor.name[:-2]])\n", 174 | "with tf.Graph().as_default() as graph:\n", 175 | " tf.import_graph_def(graph_def, name='')\n", 176 | " onnx_graph = process_tf_graph(graph, opset=7, input_names=[input_tensor.name], output_names=[output_tensor.name])\n", 177 | "model_proto = onnx_graph.make_model(\"test\")\n", 178 | "print(\"ONNX model is saved at ./output/mnist4.onnx\")\n", 179 | "with open(\"./output/mnist4.onnx\", \"wb\") as f:\n", 180 | " f.write(model_proto.SerializeToString())" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "## Useful command line options\n", 188 | "The first useful option is \"**opset**\" which has been covered in [part 1](./TensorflowToOnnx-1.ipynb).\n", 189 | "\n", 190 | "Then second option is \"**inputs-as-nchw**\". Tensorflow supports NCHW and NHWC while ONNX only supports NCHW for now, so if your model uses NHWC then the tool will insert extra transpose nodes to convert the model. And though \"tensroflow-onnx\" has optimizers to remove the transpose nodes as much as possible, it's suggested to use NCHW directly if possible. And if model with NCHW is impossible, this option will tell the tool that the real input format will be NCHW and it can remove more inserted transpose nodes now. For example --inputs input0:0,input1:0 --inputs-as-nchw input0:0 assumes that images are passed into input0:0 as nchw while the TensorFlow model given uses nhwc.\n", 191 | "\n", 192 | "As said in part 1, ONNX defines its own operations set to represent machine learning computation operations and the set is different with tensorflow's. And two main difference will make the conversion fail, unsupported input dtype and unsupported operations, so `tensorflow-onnx` has two options to fix the gap if possible. The option \"**target**\" may insert cast operation to convert unsupported dtype into float in some target platform, please see the detail [here](https://github.com/onnx/tensorflow-onnx/wiki/target). The option \"**custom-ops**\" is useful when the runtime used supports custom ops that are not defined in onnx. For example: --custom-ops Print will insert a op Print in the onnx domain ai.onnx.converters.tensorflow into the graph." 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "More detail on `tensorflow-onnx` can be got from its [README](https://github.com/onnx/tensorflow-onnx/blob/master/README.md \"Title\") file, for example the internal procedures in `tensorflow-onnx` to convert a tensorflow model." 200 | ] 201 | } 202 | ], 203 | "metadata": { 204 | "kernelspec": { 205 | "display_name": "Python 3", 206 | "language": "python", 207 | "name": "python3" 208 | }, 209 | "language_info": { 210 | "codemirror_mode": { 211 | "name": "ipython", 212 | "version": 3 213 | }, 214 | "file_extension": ".py", 215 | "mimetype": "text/x-python", 216 | "name": "python", 217 | "nbconvert_exporter": "python", 218 | "pygments_lexer": "ipython3", 219 | "version": "3.5.2" 220 | } 221 | }, 222 | "nbformat": 4, 223 | "nbformat_minor": 2 224 | } 225 | -------------------------------------------------------------------------------- /tutorials/VersionConversion.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Version Conversion 4 | 5 | The ONNX [Version Converter](https://github.com/onnx/onnx/blob/main/docs/VersionConverter.md) helps convert ONNX models to the version needed by the runtime you are using. 6 | 7 | Version Conversion for BatchNormalization from opset 8 to 9: 8 | 9 | 10 | 11 | 12 | ### Downgrade Version Conversion from 9 to 8: 13 | 14 | ```python 15 | import onnx 16 | 17 | # Load the model 18 | model = onnx.load("path_to/resnet18.onnx") 19 | 20 | # Check that the IR is well formed 21 | onnx.checker.check_model(model) 22 | 23 | from onnx import version_converter 24 | 25 | # Convert to version 8 26 | converted_model = version_converter.convert_version(onnx_model, 8) 27 | 28 | # Save model 29 | onnx.save(converted_model, "path_to/resnet18_v8.onnx") 30 | ``` 31 | 32 | ### Upgrade Version Conversion from 8 to 9 33 | 34 | ```python 35 | # Convert to version 9 36 | converted_model9 = version_converter.convert_version(converted_model, 9) 37 | 38 | # Save model 39 | onnx.save(converted_model9, "path_to/resnet18_v9.onnx") 40 | ``` 41 | 42 | ### Downgrade Version Conversion from 8 to 7 43 | 44 | ```python 45 | # Convert to version 7 46 | converted_model7 = version_converter.convert_version(converted_model, 7) 47 | 48 | # Save model 49 | onnx.save(converted_model7, "path_to/resnet18_v7.onnx") 50 | ``` 51 | 52 | ### Upgrade Version Conversion from 7 to 9 53 | 54 | ```python 55 | # Convert to version 9 56 | converted_model79 = version_converter.convert_version(converted_model7, 9) 57 | 58 | # Save model 59 | onnx.save(converted_model79, "path_to/resnet18_v79.onnx") 60 | ``` 61 | -------------------------------------------------------------------------------- /tutorials/VisualizingAModel.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | Visualizing an ONNX Model 4 | ========================= 5 | 6 | To visualize an ONNX model, we can use the [net drawer tool](https://github.com/onnx/onnx/blob/main/onnx/tools/net_drawer.py). This tool takes in a serialized ONNX model and produces a directed graph representation. The graph contains the following information: 7 | 8 | * Tensors 9 | * Input/output tensors 10 | * Intermediate tensors 11 | * Operators (ops) 12 | * Op type 13 | * Op number 14 | * Input tensor names 15 | * Output tensor names 16 | * Docstrings (PyTorch exports stack traces, so this is a good way to become familiarized with the network topology) 17 | 18 | ## SqueezeNet Example 19 | 20 | Let's walk through an example visualizing a [SqueezeNet](https://arxiv.org/abs/1602.07360) model exported from [Pytorch](https://github.com/bwasti/AICamera/blob/master/Exporting%20Squeezenet%20to%20mobile.ipynb). Here's an example visualization: 21 | 22 | ![SqueezeNet Visualization](assets/squeezenet.png) 23 | 24 | **Prerequisites** 25 | * You will need [Graphviz](https://www.graphviz.org/) – specifically, the `dot` command-line utility. 26 | * You'll need the `pydot` Python package. 27 | * For the net drawer, you will need [ONNX](https://github.com/onnx/onnx), both installed and cloned somewhere (so that you have access to the `net_drawer.py` file). 28 | * For the optional part (i.e., experimentation), you'll need PyTorch and Numpy. 29 | 30 | ### Convert an exported ONNX model to a Graphviz representation 31 | 32 | In the `assets` folder, you should find a file named `squeezenet.onnx`. This is a serialized SqueezeNet model that was exported to ONNX from PyTorch. Go into your ONNX repository and run the following: 33 | 34 | python onnx/tools/net_drawer.py --input --output squeezenet.dot --embed_docstring 35 | 36 | The command line flags are described below: 37 | 38 | - `input` specifies the input filename (i.e., the serialized ONNX model you would like to visualize). 39 | - `output` specifies where to write the Graphviz `.dot` file. 40 | - `embed_docstring` specifies that you'd like to embed the doc_string for each node in the graph visualization. This is implemented as a JavaScript alert() that occurs when you click on the node. 41 | 42 | Now, we have a Graphviz file `squeezenet.dot`. We need to convert it into a viewable format. Let's convert this into an `svg` file like so: 43 | 44 | dot -Tsvg squeezenet.dot -o squeezenet.svg 45 | 46 | You should now have an `svg` file named `squeezenet.svg`. Open this file in a web browser. 47 | 48 | ### Interpreting the graph 49 | 50 | Within the graph, white hexagons represent tensors and green rectangles represent ops. Within the op nodes, inputs and outputs are listed in order. Note that the position of the hexagons with respect to the ops does NOT represent input order. Clicking on each op node will bring up an alert that contains the doc string (stack trace for PyTorch), and may have useful information about each node. 51 | 52 | ### (Optional) Exporting the ONNX model 53 | 54 | To create the exported model, you can put this into a Python script: 55 | 56 | ```python 57 | # Some standard imports 58 | import io 59 | import numpy as np 60 | import torch.onnx 61 | 62 | import math 63 | import torch 64 | import torch.nn as nn 65 | import torch.nn.init as init 66 | import torch.utils.model_zoo as model_zoo 67 | 68 | 69 | __all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] 70 | 71 | 72 | model_urls = { 73 | 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 74 | 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', 75 | } 76 | 77 | 78 | class Fire(nn.Module): 79 | 80 | def __init__(self, inplanes, squeeze_planes, 81 | expand1x1_planes, expand3x3_planes): 82 | super(Fire, self).__init__() 83 | self.inplanes = inplanes 84 | self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) 85 | self.squeeze_activation = nn.ReLU(inplace=True) 86 | self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, 87 | kernel_size=1) 88 | self.expand1x1_activation = nn.ReLU(inplace=True) 89 | self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, 90 | kernel_size=3, padding=1) 91 | self.expand3x3_activation = nn.ReLU(inplace=True) 92 | 93 | def forward(self, x): 94 | x = self.squeeze_activation(self.squeeze(x)) 95 | return torch.cat([ 96 | self.expand1x1_activation(self.expand1x1(x)), 97 | self.expand3x3_activation(self.expand3x3(x)) 98 | ], 1) 99 | 100 | 101 | class SqueezeNet(nn.Module): 102 | 103 | def __init__(self, version=1.0, num_classes=1000): 104 | super(SqueezeNet, self).__init__() 105 | if version not in [1.0, 1.1]: 106 | raise ValueError("Unsupported SqueezeNet version {version}:" 107 | "1.0 or 1.1 expected".format(version=version)) 108 | self.num_classes = num_classes 109 | if version == 1.0: 110 | self.features = nn.Sequential( 111 | nn.Conv2d(3, 96, kernel_size=7, stride=2), 112 | nn.ReLU(inplace=True), 113 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 114 | Fire(96, 16, 64, 64), 115 | Fire(128, 16, 64, 64), 116 | Fire(128, 32, 128, 128), 117 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 118 | Fire(256, 32, 128, 128), 119 | Fire(256, 48, 192, 192), 120 | Fire(384, 48, 192, 192), 121 | Fire(384, 64, 256, 256), 122 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 123 | Fire(512, 64, 256, 256), 124 | ) 125 | else: 126 | self.features = nn.Sequential( 127 | nn.Conv2d(3, 64, kernel_size=3, stride=2), 128 | nn.ReLU(inplace=True), 129 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 130 | Fire(64, 16, 64, 64), 131 | Fire(128, 16, 64, 64), 132 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 133 | Fire(128, 32, 128, 128), 134 | Fire(256, 32, 128, 128), 135 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False), 136 | Fire(256, 48, 192, 192), 137 | Fire(384, 48, 192, 192), 138 | Fire(384, 64, 256, 256), 139 | Fire(512, 64, 256, 256), 140 | ) 141 | # Final convolution is initialized differently form the rest 142 | final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) 143 | self.classifier = nn.Sequential( 144 | nn.Dropout(p=0.5), 145 | final_conv, 146 | nn.ReLU(inplace=True), 147 | nn.AvgPool2d(13) 148 | ) 149 | 150 | for m in self.modules(): 151 | if isinstance(m, nn.Conv2d): 152 | if m is final_conv: 153 | init.normal(m.weight.data, mean=0.0, std=0.01) 154 | else: 155 | init.kaiming_uniform(m.weight.data) 156 | if m.bias is not None: 157 | m.bias.data.zero_() 158 | 159 | def forward(self, x): 160 | x = self.features(x) 161 | x = self.classifier(x) 162 | return x.view(x.size(0), self.num_classes) 163 | 164 | 165 | def squeezenet1_0(pretrained=False, **kwargs): 166 | r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level 167 | accuracy with 50x fewer parameters and <0.5MB model size" 168 | `_ paper. 169 | Args: 170 | pretrained (bool): If True, returns a model pre-trained on ImageNet 171 | """ 172 | model = SqueezeNet(version=1.0, **kwargs) 173 | if pretrained: 174 | model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0'])) 175 | return model 176 | 177 | 178 | def squeezenet1_1(pretrained=False, **kwargs): 179 | r"""SqueezeNet 1.1 model from the `official SqueezeNet repo 180 | `_. 181 | SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters 182 | than SqueezeNet 1.0, without sacrificing accuracy. 183 | Args: 184 | pretrained (bool): If True, returns a model pre-trained on ImageNet 185 | """ 186 | model = SqueezeNet(version=1.1, **kwargs) 187 | if pretrained: 188 | model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'])) 189 | return model 190 | 191 | torch_model = squeezenet1_1(True) 192 | 193 | from torch.autograd import Variable 194 | batch_size = 1 # just a random number 195 | 196 | # Input to the model 197 | x = Variable(torch.randn(batch_size, 3, 224, 224), requires_grad=True) 198 | 199 | # Export the model 200 | torch_out = torch.onnx._export(torch_model, # model being run 201 | x, # model input (or a tuple for multiple inputs) 202 | "squeezenet.onnx", # where to save the model (can be a file or file-like object) 203 | export_params=True) # store the trained parameter weights inside the model file 204 | ``` 205 | -------------------------------------------------------------------------------- /tutorials/assets/batchnorm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/batchnorm.png -------------------------------------------------------------------------------- /tutorials/assets/blueangels.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/blueangels.jpg -------------------------------------------------------------------------------- /tutorials/assets/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/cat.jpg -------------------------------------------------------------------------------- /tutorials/assets/coco_classes.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /tutorials/assets/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/dog.jpg -------------------------------------------------------------------------------- /tutorials/assets/image.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/image.npz -------------------------------------------------------------------------------- /tutorials/assets/predict.proto: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | syntax = "proto3"; 4 | 5 | import "onnx-ml.proto"; 6 | 7 | package onnxruntime.server; 8 | 9 | // PredictRequest specifies how inputs are mapped to tensors 10 | // and how outputs are filtered before returning to user. 11 | message PredictRequest { 12 | reserved 1; 13 | 14 | // Input Tensors. 15 | // This is a mapping between output name and tensor. 16 | map inputs = 2; 17 | 18 | // Output Filters. 19 | // This field is to specify which output fields need to be returned. 20 | // If the list is empty, all outputs will be included. 21 | repeated string output_filter = 3; 22 | } 23 | 24 | // Response for PredictRequest on successful run. 25 | message PredictResponse { 26 | // Output Tensors. 27 | // This is a mapping between output name and tensor. 28 | map outputs = 1; 29 | } 30 | -------------------------------------------------------------------------------- /tutorials/assets/predict_pb2.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | # Generated by the protocol buffer compiler. DO NOT EDIT! 4 | # source: predict.proto 5 | 6 | import assets.onnx_ml_pb2 as onnx__ml__pb2 7 | from google.protobuf import symbol_database as _symbol_database 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import message as _message 10 | from google.protobuf import descriptor as _descriptor 11 | import sys 12 | _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) 13 | # @@protoc_insertion_point(imports) 14 | 15 | _sym_db = _symbol_database.Default() 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='predict.proto', 20 | package='onnxruntime.server', 21 | syntax='proto3', 22 | serialized_options=None, 23 | serialized_pb=_b('\n\rpredict.proto\x12\x12onnxruntime.server\x1a\ronnx-ml.proto\"\xaf\x01\n\x0ePredictRequest\x12>\n\x06inputs\x18\x02 \x03(\x0b\x32..onnxruntime.server.PredictRequest.InputsEntry\x12\x15\n\routput_filter\x18\x03 \x03(\t\x1a@\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.onnx.TensorProto:\x02\x38\x01J\x04\x08\x01\x10\x02\"\x97\x01\n\x0fPredictResponse\x12\x41\n\x07outputs\x18\x01 \x03(\x0b\x32\x30.onnxruntime.server.PredictResponse.OutputsEntry\x1a\x41\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.onnx.TensorProto:\x02\x38\x01\x62\x06proto3'), # noqa: E501 24 | dependencies=[ 25 | onnx__ml__pb2.DESCRIPTOR, 26 | ]) 27 | 28 | 29 | _PREDICTREQUEST_INPUTSENTRY = _descriptor.Descriptor( 30 | name='InputsEntry', 31 | full_name='onnxruntime.server.PredictRequest.InputsEntry', 32 | filename=None, 33 | file=DESCRIPTOR, 34 | containing_type=None, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='key', full_name='onnxruntime.server.PredictRequest.InputsEntry.key', index=0, 38 | number=1, type=9, cpp_type=9, label=1, 39 | has_default_value=False, default_value=_b("").decode('utf-8'), 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | serialized_options=None, file=DESCRIPTOR), 43 | _descriptor.FieldDescriptor( 44 | name='value', full_name='onnxruntime.server.PredictRequest.InputsEntry.value', index=1, 45 | number=2, type=11, cpp_type=10, label=1, 46 | has_default_value=False, default_value=None, 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | serialized_options=None, file=DESCRIPTOR), 50 | ], 51 | extensions=[ 52 | ], 53 | nested_types=[], 54 | enum_types=[ 55 | ], 56 | serialized_options=_b('8\001'), 57 | is_extendable=False, 58 | syntax='proto3', 59 | extension_ranges=[], 60 | oneofs=[ 61 | ], 62 | serialized_start=158, 63 | serialized_end=222, 64 | ) 65 | 66 | _PREDICTREQUEST = _descriptor.Descriptor( 67 | name='PredictRequest', 68 | full_name='onnxruntime.server.PredictRequest', 69 | filename=None, 70 | file=DESCRIPTOR, 71 | containing_type=None, 72 | fields=[ 73 | _descriptor.FieldDescriptor( 74 | name='inputs', full_name='onnxruntime.server.PredictRequest.inputs', index=0, 75 | number=2, type=11, cpp_type=10, label=3, 76 | has_default_value=False, default_value=[], 77 | message_type=None, enum_type=None, containing_type=None, 78 | is_extension=False, extension_scope=None, 79 | serialized_options=None, file=DESCRIPTOR), 80 | _descriptor.FieldDescriptor( 81 | name='output_filter', full_name='onnxruntime.server.PredictRequest.output_filter', index=1, 82 | number=3, type=9, cpp_type=9, label=3, 83 | has_default_value=False, default_value=[], 84 | message_type=None, enum_type=None, containing_type=None, 85 | is_extension=False, extension_scope=None, 86 | serialized_options=None, file=DESCRIPTOR), 87 | ], 88 | extensions=[ 89 | ], 90 | nested_types=[_PREDICTREQUEST_INPUTSENTRY, ], 91 | enum_types=[ 92 | ], 93 | serialized_options=None, 94 | is_extendable=False, 95 | syntax='proto3', 96 | extension_ranges=[], 97 | oneofs=[ 98 | ], 99 | serialized_start=53, 100 | serialized_end=228, 101 | ) 102 | 103 | 104 | _PREDICTRESPONSE_OUTPUTSENTRY = _descriptor.Descriptor( 105 | name='OutputsEntry', 106 | full_name='onnxruntime.server.PredictResponse.OutputsEntry', 107 | filename=None, 108 | file=DESCRIPTOR, 109 | containing_type=None, 110 | fields=[ 111 | _descriptor.FieldDescriptor( 112 | name='key', full_name='onnxruntime.server.PredictResponse.OutputsEntry.key', index=0, 113 | number=1, type=9, cpp_type=9, label=1, 114 | has_default_value=False, default_value=_b("").decode('utf-8'), 115 | message_type=None, enum_type=None, containing_type=None, 116 | is_extension=False, extension_scope=None, 117 | serialized_options=None, file=DESCRIPTOR), 118 | _descriptor.FieldDescriptor( 119 | name='value', full_name='onnxruntime.server.PredictResponse.OutputsEntry.value', index=1, 120 | number=2, type=11, cpp_type=10, label=1, 121 | has_default_value=False, default_value=None, 122 | message_type=None, enum_type=None, containing_type=None, 123 | is_extension=False, extension_scope=None, 124 | serialized_options=None, file=DESCRIPTOR), 125 | ], 126 | extensions=[ 127 | ], 128 | nested_types=[], 129 | enum_types=[ 130 | ], 131 | serialized_options=_b('8\001'), 132 | is_extendable=False, 133 | syntax='proto3', 134 | extension_ranges=[], 135 | oneofs=[ 136 | ], 137 | serialized_start=317, 138 | serialized_end=382, 139 | ) 140 | 141 | _PREDICTRESPONSE = _descriptor.Descriptor( 142 | name='PredictResponse', 143 | full_name='onnxruntime.server.PredictResponse', 144 | filename=None, 145 | file=DESCRIPTOR, 146 | containing_type=None, 147 | fields=[ 148 | _descriptor.FieldDescriptor( 149 | name='outputs', full_name='onnxruntime.server.PredictResponse.outputs', index=0, 150 | number=1, type=11, cpp_type=10, label=3, 151 | has_default_value=False, default_value=[], 152 | message_type=None, enum_type=None, containing_type=None, 153 | is_extension=False, extension_scope=None, 154 | serialized_options=None, file=DESCRIPTOR), 155 | ], 156 | extensions=[ 157 | ], 158 | nested_types=[_PREDICTRESPONSE_OUTPUTSENTRY, ], 159 | enum_types=[ 160 | ], 161 | serialized_options=None, 162 | is_extendable=False, 163 | syntax='proto3', 164 | extension_ranges=[], 165 | oneofs=[ 166 | ], 167 | serialized_start=231, 168 | serialized_end=382, 169 | ) 170 | 171 | _PREDICTREQUEST_INPUTSENTRY.fields_by_name['value'].message_type = onnx__ml__pb2._TENSORPROTO 172 | _PREDICTREQUEST_INPUTSENTRY.containing_type = _PREDICTREQUEST 173 | _PREDICTREQUEST.fields_by_name['inputs'].message_type = _PREDICTREQUEST_INPUTSENTRY 174 | _PREDICTRESPONSE_OUTPUTSENTRY.fields_by_name['value'].message_type = onnx__ml__pb2._TENSORPROTO 175 | _PREDICTRESPONSE_OUTPUTSENTRY.containing_type = _PREDICTRESPONSE 176 | _PREDICTRESPONSE.fields_by_name['outputs'].message_type = _PREDICTRESPONSE_OUTPUTSENTRY 177 | DESCRIPTOR.message_types_by_name['PredictRequest'] = _PREDICTREQUEST 178 | DESCRIPTOR.message_types_by_name['PredictResponse'] = _PREDICTRESPONSE 179 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 180 | 181 | PredictRequest = _reflection.GeneratedProtocolMessageType('PredictRequest', (_message.Message,), dict( 182 | 183 | InputsEntry=_reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict( 184 | DESCRIPTOR=_PREDICTREQUEST_INPUTSENTRY, 185 | __module__='predict_pb2' 186 | # @@protoc_insertion_point(class_scope:onnxruntime.server.PredictRequest.InputsEntry) 187 | )), 188 | DESCRIPTOR=_PREDICTREQUEST, 189 | __module__='predict_pb2' 190 | # @@protoc_insertion_point(class_scope:onnxruntime.server.PredictRequest) 191 | )) 192 | _sym_db.RegisterMessage(PredictRequest) 193 | _sym_db.RegisterMessage(PredictRequest.InputsEntry) 194 | 195 | PredictResponse = _reflection.GeneratedProtocolMessageType('PredictResponse', (_message.Message,), dict( 196 | 197 | OutputsEntry=_reflection.GeneratedProtocolMessageType('OutputsEntry', (_message.Message,), dict( 198 | DESCRIPTOR=_PREDICTRESPONSE_OUTPUTSENTRY, 199 | __module__='predict_pb2' 200 | # @@protoc_insertion_point(class_scope:onnxruntime.server.PredictResponse.OutputsEntry) 201 | )), 202 | DESCRIPTOR=_PREDICTRESPONSE, 203 | __module__='predict_pb2' 204 | # @@protoc_insertion_point(class_scope:onnxruntime.server.PredictResponse) 205 | )) 206 | _sym_db.RegisterMessage(PredictResponse) 207 | _sym_db.RegisterMessage(PredictResponse.OutputsEntry) 208 | 209 | 210 | _PREDICTREQUEST_INPUTSENTRY._options = None 211 | _PREDICTRESPONSE_OUTPUTSENTRY._options = None 212 | # @@protoc_insertion_point(module_scope) 213 | -------------------------------------------------------------------------------- /tutorials/assets/squeezenet.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/squeezenet.onnx -------------------------------------------------------------------------------- /tutorials/assets/squeezenet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/squeezenet.png -------------------------------------------------------------------------------- /tutorials/assets/super-res-input.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/super-res-input.jpg -------------------------------------------------------------------------------- /tutorials/assets/super_resolution.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/super_resolution.onnx -------------------------------------------------------------------------------- /tutorials/assets/tensorflow_to_onnx_example.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ============================================================================== 17 | 18 | """A deep MNIST classifier using convolutional layers. 19 | 20 | See extensive documentation at 21 | https://www.tensorflow.org/get_started/mnist/pros 22 | """ 23 | # Disable linter warnings to maintain consistency with tutorial. 24 | # pylint: disable=invalid-name 25 | # pylint: disable=g-bad-import-order 26 | 27 | 28 | import os 29 | import shutil 30 | import tensorflow as tf 31 | from tensorflow.examples.tutorials.mnist import input_data 32 | 33 | 34 | def add(x, y): 35 | return tf.nn.bias_add(x, y, data_format="NHWC") 36 | 37 | 38 | def deepnn(x): 39 | """deepnn builds the graph for a deep net for classifying digits. 40 | 41 | Args: 42 | x: an input tensor with the dimensions (N_examples, 784), where 784 is the 43 | number of pixels in a standard MNIST image. 44 | 45 | Returns: 46 | A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values 47 | equal to the logits of classifying the digit into one of 10 classes (the 48 | digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. 49 | """ 50 | # Reshape to use within a convolutional neural net. 51 | # Last dimension is for "features" - there is only one here, since images are 52 | # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. 53 | with tf.name_scope('reshape'): 54 | x_image = tf.reshape(x, [-1, 1, 28, 28]) 55 | x_image = tf.transpose(x_image, [0, 2, 3, 1]) 56 | 57 | # First convolutional layer - maps one grayscale image to 32 feature maps. 58 | with tf.name_scope('conv1'): 59 | w_conv1 = weight_variable([5, 5, 1, 32]) 60 | b_conv1 = bias_variable([32]) 61 | h_conv1 = tf.nn.relu(add(conv2d(x_image, w_conv1), b_conv1)) 62 | 63 | # Pooling layer - downsamples by 2X. 64 | with tf.name_scope('pool1'): 65 | h_pool1 = max_pool_2x2(h_conv1) 66 | 67 | # Second convolutional layer -- maps 32 feature maps to 64. 68 | with tf.name_scope('conv2'): 69 | w_conv2 = weight_variable([5, 5, 32, 64]) 70 | b_conv2 = bias_variable([64]) 71 | h_conv2 = tf.nn.relu(add(conv2d(h_pool1, w_conv2), b_conv2)) 72 | 73 | # Second pooling layer. 74 | with tf.name_scope('pool2'): 75 | h_pool2 = max_pool_2x2(h_conv2) 76 | 77 | # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image 78 | # is down to 7x7x64 feature maps -- maps this to 1024 features. 79 | with tf.name_scope('fc1'): 80 | w_fc1 = weight_variable([7 * 7 * 64, 1024]) 81 | b_fc1 = bias_variable([1024]) 82 | 83 | h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) 84 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) 85 | 86 | # Map the 1024 features to 10 classes, one for each digit 87 | with tf.name_scope('fc2'): 88 | w_fc2 = weight_variable([1024, 10]) 89 | b_fc2 = bias_variable([10]) 90 | 91 | y_conv = tf.matmul(h_fc1, w_fc2) + b_fc2 92 | 93 | return y_conv 94 | 95 | 96 | def conv2d(x, w): 97 | """conv2d returns a 2d convolution layer with full stride.""" 98 | return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME', data_format="NHWC") 99 | 100 | 101 | def max_pool_2x2(x): 102 | """max_pool_2x2 downsamples a feature map by 2X.""" 103 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], 104 | strides=[1, 2, 2, 1], padding='SAME', data_format="NHWC") 105 | 106 | 107 | def weight_variable(shape): 108 | """weight_variable generates a weight variable of a given shape.""" 109 | initial = tf.truncated_normal(shape, stddev=0.1) 110 | return tf.Variable(initial) 111 | 112 | 113 | def bias_variable(shape): 114 | """bias_variable generates a bias variable of a given shape.""" 115 | initial = tf.constant(0.1, shape=shape) 116 | return tf.Variable(initial) 117 | 118 | 119 | def create_and_train_mnist(): 120 | tf.logging.set_verbosity(tf.logging.ERROR) 121 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 122 | # Import data 123 | data_dir = r"/tmp/tensorflow/mnist/input_data" 124 | mnist = input_data.read_data_sets(data_dir) 125 | # Create the model 126 | tf.reset_default_graph() 127 | input_tensor = tf.placeholder(tf.float32, [None, 784], name="input") 128 | # Build the graph for the deep net 129 | y_conv = deepnn(input_tensor) 130 | output_tensor = tf.identity(y_conv, "result") 131 | with open("./output/graph.proto", "wb") as file: 132 | graph = tf.get_default_graph().as_graph_def(add_shapes=True) 133 | file.write(graph.SerializeToString()) 134 | # Define loss and optimizer 135 | y_ = tf.placeholder(tf.int64, [None]) 136 | with tf.name_scope('loss'): 137 | cross_entropy = tf.losses.sparse_softmax_cross_entropy( 138 | labels=y_, logits=y_conv) 139 | cross_entropy = tf.reduce_mean(cross_entropy) 140 | with tf.name_scope('adam_optimizer'): 141 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 142 | with tf.name_scope('accuracy'): 143 | correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_) 144 | correct_prediction = tf.cast(correct_prediction, tf.float32) 145 | accuracy = tf.reduce_mean(correct_prediction) 146 | 147 | saver = tf.train.Saver() 148 | 149 | config = tf.ConfigProto() 150 | config.gpu_options.allow_growth = True 151 | sess = tf.Session(config=config) 152 | sess.run(tf.global_variables_initializer()) 153 | for i in range(5000): 154 | batch = mnist.train.next_batch(50) 155 | if i % 1000 == 0: 156 | train_accuracy = accuracy.eval( 157 | session=sess, feed_dict={ 158 | input_tensor: batch[0], y_: batch[1]}) 159 | print('step %d, training accuracy %g' % (i, train_accuracy)) 160 | train_step.run(session=sess, feed_dict={input_tensor: batch[0], y_: batch[1]}) 161 | 162 | print('test accuracy %g' % accuracy.eval(session=sess, feed_dict={ 163 | input_tensor: mnist.test.images[:1000], y_: mnist.test.labels[:1000]})) 164 | return sess, saver, input_tensor, output_tensor 165 | 166 | 167 | def save_model_to_checkpoint(saver, sess): 168 | print("save model to checkpoint") 169 | saver.save(sess, "./output/ckpt/model.ckpt") 170 | 171 | 172 | def save_model_to_frozen_proto(sess): 173 | print('save model to frozen graph') 174 | frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["result"]) 175 | with open("./output/mnist_frozen.pb", "wb") as file: 176 | file.write(frozen_graph.SerializeToString()) 177 | 178 | 179 | def save_model_to_saved_model(sess, input_tensor, output_tensor): 180 | print('save model to saved_model') 181 | from tensorflow.saved_model import simple_save 182 | save_path = r"./output/saved_model" 183 | if os.path.exists(save_path): 184 | shutil.rmtree(save_path) 185 | simple_save(sess, save_path, {input_tensor.name: input_tensor}, {output_tensor.name: output_tensor}) 186 | -------------------------------------------------------------------------------- /tutorials/assets/tf-train-mnist.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # https://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ============================================================================== 17 | 18 | """A deep MNIST classifier using convolutional layers. 19 | 20 | See extensive documentation at 21 | https://www.tensorflow.org/get_started/mnist/pros 22 | """ 23 | # Disable linter warnings to maintain consistency with tutorial. 24 | # pylint: disable=invalid-name 25 | # pylint: disable=g-bad-import-order 26 | 27 | from __future__ import absolute_import 28 | from __future__ import division 29 | from __future__ import print_function 30 | 31 | import argparse 32 | import sys 33 | import tempfile 34 | 35 | from tensorflow.examples.tutorials.mnist import input_data 36 | 37 | import tensorflow as tf 38 | 39 | FLAGS = None 40 | 41 | 42 | def add(x, y): 43 | return tf.nn.bias_add(x, y, data_format="NCHW") 44 | 45 | 46 | def deepnn(x): 47 | """deepnn builds the graph for a deep net for classifying digits. 48 | 49 | Args: 50 | x: an input tensor with the dimensions (N_examples, 784), where 784 is the 51 | number of pixels in a standard MNIST image. 52 | 53 | Returns: 54 | A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values 55 | equal to the logits of classifying the digit into one of 10 classes (the 56 | digits 0-9). keep_prob is a scalar placeholder for the probability of 57 | dropout. 58 | """ 59 | # Reshape to use within a convolutional neural net. 60 | # Last dimension is for "features" - there is only one here, since images are 61 | # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. 62 | with tf.name_scope('reshape'): 63 | x_image = tf.reshape(x, [-1, 1, 28, 28]) 64 | 65 | # First convolutional layer - maps one grayscale image to 32 feature maps. 66 | with tf.name_scope('conv1'): 67 | W_conv1 = weight_variable([5, 5, 1, 32]) 68 | b_conv1 = bias_variable([32]) 69 | h_conv1 = tf.nn.relu(add(conv2d(x_image, W_conv1), b_conv1)) 70 | 71 | # Pooling layer - downsamples by 2X. 72 | with tf.name_scope('pool1'): 73 | h_pool1 = max_pool_2x2(h_conv1) 74 | 75 | # Second convolutional layer -- maps 32 feature maps to 64. 76 | with tf.name_scope('conv2'): 77 | W_conv2 = weight_variable([5, 5, 32, 64]) 78 | b_conv2 = bias_variable([64]) 79 | h_conv2 = tf.nn.relu(add(conv2d(h_pool1, W_conv2), b_conv2)) 80 | 81 | # Second pooling layer. 82 | with tf.name_scope('pool2'): 83 | h_pool2 = max_pool_2x2(h_conv2) 84 | 85 | # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image 86 | # is down to 7x7x64 feature maps -- maps this to 1024 features. 87 | with tf.name_scope('fc1'): 88 | W_fc1 = weight_variable([7 * 7 * 64, 1024]) 89 | b_fc1 = bias_variable([1024]) 90 | 91 | h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) 92 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 93 | 94 | # Map the 1024 features to 10 classes, one for each digit 95 | with tf.name_scope('fc2'): 96 | W_fc2 = weight_variable([1024, 10]) 97 | b_fc2 = bias_variable([10]) 98 | 99 | y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2 100 | 101 | return y_conv 102 | 103 | 104 | def conv2d(x, W): 105 | """conv2d returns a 2d convolution layer with full stride.""" 106 | return tf.nn.conv2d( 107 | x, 108 | W, 109 | strides=[ 110 | 1, 111 | 1, 112 | 1, 113 | 1], 114 | padding='SAME', 115 | data_format="NCHW") 116 | 117 | 118 | def max_pool_2x2(x): 119 | """max_pool_2x2 downsamples a feature map by 2X.""" 120 | return tf.nn.max_pool( 121 | x, ksize=[ 122 | 1, 1, 2, 2], strides=[ 123 | 1, 1, 2, 2], padding='SAME', data_format="NCHW") 124 | 125 | 126 | def weight_variable(shape): 127 | """weight_variable generates a weight variable of a given shape.""" 128 | initial = tf.truncated_normal(shape, stddev=0.1) 129 | return tf.Variable(initial) 130 | 131 | 132 | def bias_variable(shape): 133 | """bias_variable generates a bias variable of a given shape.""" 134 | initial = tf.constant(0.1, shape=shape) 135 | return tf.Variable(initial) 136 | 137 | 138 | def main(_): 139 | # Import data 140 | mnist = input_data.read_data_sets(FLAGS.data_dir) 141 | 142 | # Create the model 143 | x = tf.placeholder(tf.float32, [None, 784]) 144 | 145 | # Build the graph for the deep net 146 | y_conv = deepnn(x) 147 | 148 | with open("graph.proto", "wb") as file: 149 | graph = tf.get_default_graph().as_graph_def(add_shapes=True) 150 | file.write(graph.SerializeToString()) 151 | 152 | # Define loss and optimizer 153 | y_ = tf.placeholder(tf.int64, [None]) 154 | 155 | with tf.name_scope('loss'): 156 | cross_entropy = tf.losses.sparse_softmax_cross_entropy( 157 | labels=y_, logits=y_conv) 158 | cross_entropy = tf.reduce_mean(cross_entropy) 159 | 160 | with tf.name_scope('adam_optimizer'): 161 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 162 | 163 | with tf.name_scope('accuracy'): 164 | correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_) 165 | correct_prediction = tf.cast(correct_prediction, tf.float32) 166 | accuracy = tf.reduce_mean(correct_prediction) 167 | 168 | graph_location = tempfile.mkdtemp() 169 | print('Saving graph to: %s' % graph_location) 170 | train_writer = tf.summary.FileWriter(graph_location) 171 | train_writer.add_graph(tf.get_default_graph()) 172 | 173 | saver = tf.train.Saver() 174 | 175 | with tf.Session() as sess: 176 | sess.run(tf.global_variables_initializer()) 177 | for i in range(20000): 178 | batch = mnist.train.next_batch(50) 179 | 180 | if i % 1000 == 0: 181 | train_accuracy = accuracy.eval(feed_dict={ 182 | x: batch[0], y_: batch[1]}) 183 | print('step %d, training accuracy %g' % (i, train_accuracy)) 184 | 185 | save_path = saver.save(sess, "./ckpt/model.ckpt") 186 | print("Model saved in path: %s" % save_path) 187 | train_step.run(feed_dict={x: batch[0], y_: batch[1]}) 188 | 189 | print('test accuracy %g' % accuracy.eval(feed_dict={ 190 | x: mnist.test.images, y_: mnist.test.labels})) 191 | 192 | 193 | if __name__ == '__main__': 194 | parser = argparse.ArgumentParser() 195 | parser.add_argument('--data_dir', type=str, 196 | default='/tmp/tensorflow/mnist/input_data', 197 | help='Directory for storing input data') 198 | FLAGS, unparsed = parser.parse_known_args() 199 | tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) 200 | -------------------------------------------------------------------------------- /tutorials/assets/three.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/three.png -------------------------------------------------------------------------------- /tutorials/assets/two.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onnx/tutorials/3a0d50a0c983baa2d99dcd8c1d8bc10c56c01b66/tutorials/assets/two.png -------------------------------------------------------------------------------- /tutorials/output/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ipython notebooks from enclosing directory generate their outputs here. Folder is created for convenience, we don't check in those files 4 | -------------------------------------------------------------------------------- /workflow_scripts/url_validator.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | from re import findall 4 | from urllib.request import Request, urlopen 5 | 6 | 7 | SKIP_URLS_LIST = ["https://127.0.0", 8 | # Used for server demo code 9 | "https://www.cntk.ai/Models/CNTK_Pretrained/ResNet20_CIFAR10_CNTK.model" 10 | # [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: Hostname mismatch, certificate is not valid 11 | ] 12 | 13 | 14 | def validate_url(url): 15 | for skip_url in SKIP_URLS_LIST: 16 | if skip_url in url: 17 | return True 18 | try: 19 | headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'} 20 | request = Request(url, headers=headers) 21 | with urlopen(request) as response: 22 | status_code = response.getcode() 23 | # if the request succeeds 24 | if status_code == 200: 25 | return True 26 | else: 27 | print(f"{url}: is Not reachable, status_code: {status_code}.") 28 | return False 29 | except Exception as e: 30 | print(f"{url}: is Not reachable, Exception: {e}") 31 | return False 32 | 33 | 34 | def polish_url(url): 35 | """ Trim , \n . ) 's in the end """ 36 | url = url.replace("\\n", "") 37 | url = url.replace("'s", "") 38 | for i in range(len(url)): 39 | if url[len(url) - i - 1].isalpha() or url[len(url) - i - 1].isdigit(): 40 | return url[:len(url) - i] 41 | return url 42 | 43 | 44 | def validate_file(file_path): 45 | has_invalid_url = False 46 | if file_path.endswith(".ipynb") or file_path.endswith(".md"): 47 | with open(file_path, "r") as f: 48 | for line in f: 49 | url_list = findall( 50 | r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", 51 | line) 52 | for url in url_list: 53 | url = polish_url(url) 54 | if not validate_url(url): 55 | has_invalid_url = True 56 | 57 | if "http://" in line: 58 | print(f"File {file_path} contains an insecure url: {line}") 59 | has_invalid_url = True 60 | 61 | return not has_invalid_url 62 | -------------------------------------------------------------------------------- /workflow_scripts/validate_all_urls.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | import os 4 | from sys import exit 5 | from url_validator import validate_file 6 | 7 | 8 | def validate_urls_under_directory(directory): 9 | total_count = 0 10 | invalid_url_count = 0 11 | 12 | for root, _, files in os.walk(directory): 13 | for file_name in files: 14 | file_path = os.path.join(root, file_name) 15 | total_count += 1 16 | print(f"-----------validate {file_path}") 17 | if not validate_file(file_path): 18 | invalid_url_count += 1 19 | 20 | if invalid_url_count == 0: 21 | print(f"{total_count} files passed. ") 22 | else: 23 | print(f"{invalid_url_count} files failed in {total_count} files. ") 24 | exit(1) 25 | 26 | 27 | if __name__ == '__main__': 28 | validate_urls_under_directory('.') 29 | -------------------------------------------------------------------------------- /workflow_scripts/validate_changed_urls.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: Apache-2.0 2 | 3 | from os.path import exists 4 | from url_validator import validate_file 5 | import subprocess 6 | 7 | 8 | def get_changed_files(): 9 | try: 10 | files = subprocess.check_output(["git", "diff", "--name-only", "origin/main", "--"]) 11 | except subprocess.CalledProcessError as ex: 12 | return ex.output 13 | return files.decode("utf-8").split("\n") 14 | 15 | 16 | def validate_changed_urls(): 17 | files = get_changed_files() 18 | total_count = 0 19 | invalid_url_count = 0 20 | 21 | for file_path in files: 22 | total_count += 1 23 | if not exists(file_path): 24 | print(f"Skip because {file_path} does not exist. ") 25 | break 26 | print(f"-----------validate {file_path}") 27 | if not validate_file(file_path): 28 | invalid_url_count += 1 29 | 30 | if invalid_url_count == 0: 31 | print(f"{total_count} updated files passed. ") 32 | else: 33 | print(f"{invalid_url_count} files failed in updated {total_count} files. ") 34 | exit(1) 35 | 36 | 37 | if __name__ == '__main__': 38 | validate_changed_urls() 39 | --------------------------------------------------------------------------------