├── .dockerignore
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── config.yml
│ └── issue_template.yml
└── workflows
│ ├── codeql-analysis.yml
│ └── python-publish.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── bashrc
├── flatbuffers
└── 1.12.0
│ └── download.sh
├── sample_json
└── download.sh
├── sample_npy
└── calibration_data_img_sample.npy
├── schema
├── schema.fbs
├── schema_v0.fbs
├── schema_v1.fbs
├── schema_v2.fbs
├── schema_v3.fbs
└── schema_v3a.fbs
├── scripts
├── tflite2tensorflow
└── view_npy
├── setup.py
└── tflite2tensorflow
├── __init__.py
├── mediapipeCustomOp.py
├── tflite2tensorflow.py
└── view_npy.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | tflite2tensorflow
2 | packages
3 | sample_npy
4 | scripts
5 | weights_sample
6 | LICENSE
7 | README.md
8 | .vscode
9 |
10 | *.sh
11 | *.json
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: PINTO0309 # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
13 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/issue_template.yml:
--------------------------------------------------------------------------------
1 | name: Issue
2 | description: Miscellaneous issues are closed immediately. If you have no intention of returning useful information to the community, you have no right to post an issue here. Please withdraw.
3 | body:
4 | - type: markdown
5 | attributes:
6 | value: |
7 | Please provide as much detail as possible to convey the history of your problem. Sloppy issues will be closed immediately.
8 | - type: dropdown
9 | id: issue-type
10 | attributes:
11 | label: Issue Type
12 | description: What type of issue would you like to report?
13 | multiple: true
14 | options:
15 | - Bug
16 | - Performance
17 | - Feature Request
18 | - Documentation Feature Request
19 | - Documentation Bug
20 | - Others
21 | validations:
22 | required: true
23 | - type: dropdown
24 | id: Operating-System
25 | attributes:
26 | label: OS
27 | description: What OS are you seeing the issue in? If you don't see your OS listed, please provide more details in the "Description" section above.
28 | multiple: true
29 | options:
30 | - Windows
31 | - Ubuntu
32 | - Mac OS
33 | - Other
34 | validations:
35 | required: true
36 | - type: dropdown
37 | id: Operating-System-Architecture
38 | attributes:
39 | label: OS architecture
40 | description: If you don't see your device listed, please provide more details in the "Description" section above.
41 | multiple: true
42 | options:
43 | - x86_64
44 | - aarch64
45 | - armv7
46 | - armv6
47 | - Other
48 | validations:
49 | required: true
50 | - type: dropdown
51 | id: version
52 | attributes:
53 | label: Programming Language
54 | description: What programming language are you using? If "Other", please provide more details in the "Description" section above.
55 | multiple: true
56 | options:
57 | - C++
58 | - Python
59 | - Rust
60 | - Go
61 | - Other
62 | validations:
63 | required: true
64 | - type: dropdown
65 | id: Framework
66 | attributes:
67 | label: Framework
68 | description: What framework are you using? If "Other", please provide more details in the "Description" section above.
69 | multiple: true
70 | options:
71 | - OpenVINO
72 | - PyTorch
73 | - ONNX
74 | - TensorFlow
75 | - TensorFlowLite
76 | - TensorFlow.js
77 | - TensorRT
78 | - TF-TRT
79 | - CoreML
80 | - Myriad Inference Engine
81 | - Other
82 | validations:
83 | required: true
84 | - type: textarea
85 | id: Download-URL-for-tflite
86 | attributes:
87 | label: Download URL for tflite file
88 | description: Please include the URL where you quoted the tflite file.
89 | validations:
90 | required: true
91 | - type: textarea
92 | id: Convert-script
93 | attributes:
94 | label: Convert Script
95 | description: Please include the convert script.
96 | validations:
97 | required: true
98 | - type: textarea
99 | id: what-happened
100 | attributes:
101 | label: Description
102 | description: Please describe the current and expected behaviour, and attach all files/info needed to reproduce the issue if applicable.
103 | validations:
104 | required: true
105 | - type: textarea
106 | id: logs
107 | attributes:
108 | label: Relevant Log Output
109 | description: Please copy and paste any relevant log output. This will be automatically formatted into code.
110 | render: shell
111 | validations:
112 | required: true
113 | - type: textarea
114 | id: url-or-source-code-for-simple-inference-testing-code
115 | attributes:
116 | label: Source code for simple inference testing code
117 | description: Please copy and paste source code for simple inference testing code.
118 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ main ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ main ]
20 | schedule:
21 | - cron: '17 16 * * 5'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support
38 |
39 | steps:
40 | - name: Checkout repository
41 | uses: actions/checkout@v3
42 |
43 | # Initializes the CodeQL tools for scanning.
44 | - name: Initialize CodeQL
45 | uses: github/codeql-action/init@v2
46 | with:
47 | languages: ${{ matrix.language }}
48 | # If you wish to specify custom queries, you can do so here or in a config file.
49 | # By default, queries listed here will override any specified in a config file.
50 | # Prefix the list here with "+" to use these queries and those in the config file.
51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
52 |
53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
54 | # If this step fails, then you should remove it and run the build manually (see below)
55 | - name: Autobuild
56 | uses: github/codeql-action/autobuild@v2
57 |
58 | # ℹ️ Command-line programs to run using the OS shell.
59 | # 📚 https://git.io/JvXDl
60 |
61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
62 | # and modify them (or add more) to build your code if your project
63 | # uses a compiled language
64 |
65 | #- run: |
66 | # make bootstrap
67 | # make release
68 |
69 | - name: Perform CodeQL Analysis
70 | uses: github/codeql-action/analyze@v2
71 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | name: Upload Python Package
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | deploy:
9 |
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v3
14 | - name: Set up Python
15 | uses: actions/setup-python@v3
16 | with:
17 | python-version: '3.x'
18 | - name: Install dependencies
19 | run: |
20 | python -m pip install --upgrade pip
21 | pip install setuptools wheel pipenv
22 | - name: Build
23 | run: |
24 | python setup.py sdist bdist_wheel
25 | - name: Publish a Python distribution to PyPI
26 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
27 | with:
28 | user: __token__
29 | password: ${{ secrets.PYPI_API_TOKEN }}
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | dist/
3 | tflite2tensorflow.egg-info/
4 | tflite2tensorflow/debug/
5 | tflite2tensorflow/saved_model/
6 | .vscode
7 |
8 | *.bin
9 | *.xml
10 | saved_model/
11 | packages/
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/pinto0309/openvino2tensorflow:base.11.6.2-cudnn8-tf2.9.0-trt8.4.0-openvino2022.1.0
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 | ARG APPVER
5 | ARG WKDIR=/home/user
6 | WORKDIR ${WKDIR}
7 |
8 | # Install dependencies
9 | RUN pip install --upgrade openvino2tensorflow \
10 | && pip install --upgrade tflite2tensorflow \
11 | && sudo ldconfig \
12 | && sudo pip cache purge
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Katsuya Hyodo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # tflite2tensorflow
2 |
3 |
4 |
5 |
6 |
7 | Generate saved_model, tfjs, tf-trt, EdgeTPU, CoreML, quantized tflite, ONNX, OpenVINO, Myriad Inference Engine blob and .pb from .tflite. Support for building environments with Docker. It is possible to directly access the host PC GUI and the camera to verify the operation. NVIDIA GPU (dGPU) support. Intel iHD GPU (iGPU) support. Supports inverse quantization of INT8 quantization model.
8 |
9 | [Special custom TensorFlow binaries](https://github.com/PINTO0309/Tensorflow-bin) and [special custom TensorFLow Lite binaries](https://github.com/PINTO0309/TensorflowLite-bin) are used.
10 |
11 | [](https://pepy.tech/project/tflite2tensorflow)  [](https://pypi.org/project/tflite2tensorflow/) [](https://github.com/PINTO0309/tflite2tensorflow/actions?query=workflow%3ACodeQL)
12 |
13 |
14 |
15 |
16 | ## 1. Supported Layers
17 |
18 | **Supported Layers
**
19 |
20 | |No.|TFLite Layer|TF Layer|Remarks|
21 | |:--:|:--|:--|:--|
22 | |1|CONV_2D|tf.nn.conv2d||
23 | |2|DEPTHWISE_CONV_2D|tf.nn.depthwise_conv2d||
24 | |3|MAX_POOL_2D|tf.nn.max_pool||
25 | |4|PAD|tf.pad||
26 | |5|MIRROR_PAD|tf.raw_ops.MirrorPad||
27 | |6|RELU|tf.nn.relu||
28 | |7|PRELU|tf.keras.layers.PReLU||
29 | |8|RELU6|tf.nn.relu6||
30 | |9|RESHAPE|tf.reshape||
31 | |10|ADD|tf.add||
32 | |11|SUB|tf.math.subtract||
33 | |12|CONCATENATION|tf.concat||
34 | |13|LOGISTIC|tf.math.sigmoid||
35 | |14|TRANSPOSE_CONV|tf.nn.conv2d_transpose||
36 | |15|MUL|tf.multiply||
37 | |16|HARD_SWISH|x\*tf.nn.relu6(x+3)\*0.16666667 Or x\*tf.nn.relu6(x+3)\*0.16666666||
38 | |17|AVERAGE_POOL_2D|tf.keras.layers.AveragePooling2D||
39 | |18|FULLY_CONNECTED|tf.keras.layers.Dense||
40 | |19|RESIZE_BILINEAR|tf.image.resize Or tf.image.resize_bilinear|The behavior differs depending on the optimization options of openvino and edgetpu.|
41 | |20|RESIZE_NEAREST_NEIGHBOR|tf.image.resize Or tf.image.resize_nearest_neighbor|The behavior differs depending on the optimization options of openvino and edgetpu.|
42 | |21|MEAN|tf.math.reduce_mean||
43 | |22|SQUARED_DIFFERENCE|tf.math.squared_difference||
44 | |23|RSQRT|tf.math.rsqrt||
45 | |24|DEQUANTIZE|(const)||
46 | |25|FLOOR|tf.math.floor||
47 | |26|TANH|tf.math.tanh||
48 | |27|DIV|tf.math.divide||
49 | |28|FLOOR_DIV|tf.math.floordiv||
50 | |29|SUM|tf.math.reduce_sum||
51 | |30|POW|tf.math.pow||
52 | |31|SPLIT|tf.split||
53 | |32|SOFTMAX|tf.nn.softmax||
54 | |33|STRIDED_SLICE|tf.strided_slice||
55 | |34|TRANSPOSE|ttf.transpose||
56 | |35|SPACE_TO_DEPTH|tf.nn.space_to_depth||
57 | |36|DEPTH_TO_SPACE|tf.nn.depth_to_space||
58 | |37|REDUCE_MAX|tf.math.reduce_max||
59 | |38|Convolution2DTransposeBias|tf.nn.conv2d_transpose, tf.math.add|CUSTOM, MediaPipe|
60 | |39|LEAKY_RELU|tf.keras.layers.LeakyReLU||
61 | |40|MAXIMUM|tf.math.maximum||
62 | |41|MINIMUM|tf.math.minimum||
63 | |42|MaxPoolingWithArgmax2D|tf.raw_ops.MaxPoolWithArgmax|CUSTOM, MediaPipe|
64 | |43|MaxUnpooling2D|tf.cast, tf.shape, tf.math.floordiv, tf.math.floormod, tf.ones_like, tf.shape, tf.concat, tf.reshape, tf.transpose, tf.scatter_nd|CUSTOM, MediaPipe|
65 | |44|GATHER|tf.gather||
66 | |45|CAST|tf.cast||
67 | |46|SLICE|tf.slice||
68 | |47|PACK|tf.stack||
69 | |48|UNPACK|tf.unstack||
70 | |49|ARG_MAX|tf.math.argmax Or tf.math.reduce_max, tf.subtract, tf.math.minimum, tf.multiply|The behavior differs depending on the optimization options of edgetpu.|
71 | |50|EXP|tf.exp||
72 | |51|TOPK_V2|tf.math.top_k||
73 | |52|LOG_SOFTMAX|tf.nn.log_softmax||
74 | |53|L2_NORMALIZATION|tf.math.l2_normalize||
75 | |54|LESS|tf.math.less||
76 | |55|LESS_EQUAL|tf.math.less_equal||
77 | |56|GREATER|tf.math.greater||
78 | |57|GREATER_EQUAL|tf.math.greater_equal||
79 | |58|NEG|tf.math.negative||
80 | |59|WHERE|tf.where||
81 | |60|SELECT|tf.where||
82 | |61|SELECT_V2|tf.where||
83 | |62|PADV2|tf.raw_ops.PadV2||
84 | |63|SIN|tf.math.sin||
85 | |64|TILE|tf.tile||
86 | |65|EQUAL|tf.math.equal||
87 | |66|NOT_EQUAL|tf.math.not_equal||
88 | |67|LOG|tf.math.log||
89 | |68|SQRT|tf.math.sqrt||
90 | |69|ARG_MIN|tf.math.argmin or tf.math.negative,tf.math.argmax||
91 | |70|REDUCE_PROD|tf.math.reduce_prod||
92 | |71|LOGICAL_OR|tf.math.logical_or||
93 | |72|LOGICAL_AND|tf.math.logical_and||
94 | |73|LOGICAL_NOT|tf.math.logical_not||
95 | |74|REDUCE_MIN|tf.math.reduce_min or tf.math.negative,tf.math.reduce_max||
96 | |75|REDUCE_ANY|tf.math.reduce_any||
97 | |76|SQUARE|tf.math.square||
98 | |77|ZEROS_LIKE|tf.zeros_like||
99 | |78|FILL|tf.fill||
100 | |79|FLOOR_MOD|tf.math.floormod||
101 | |80|RANGE|tf.range||
102 | |81|ABS|tf.math.abs||
103 | |82|UNIQUE|tf.unique||
104 | |83|CEIL|tf.math.ceil||
105 | |84|REVERSE_V2|tf.reverse||
106 | |85|ADD_N|tf.math.add_n||
107 | |86|GATHER_ND|tf.gather_nd||
108 | |87|COS|tf.math.cos||
109 | |88|RANK|tf.math.rank||
110 | |89|ELU|tf.nn.elu||
111 | |90|WHILE|tf.while_loop||
112 | |91|REVERSE_SEQUENCE|tf.reverse_sequence||
113 | |92|MATRIX_DIAG|tf.linalg.diag||
114 | |93|ROUND|tf.math.round||
115 | |94|NON_MAX_SUPPRESSION_V4|tf.raw_ops.NonMaxSuppressionV4||
116 | |95|NON_MAX_SUPPRESSION_V5|tf.raw_ops.NonMaxSuppressionV5, tf.raw_ops.NonMaxSuppressionV4, tf.raw_ops.NonMaxSuppressionV3||
117 | |96|SCATTER_ND|tf.scatter_nd||
118 | |97|SEGMENT_SUM|tf.math.segment_sum||
119 | |98|CUMSUM|tf.math.cumsum||
120 | |99|BROADCAST_TO|tf.broadcast_to||
121 | |100|RFFT2D|tf.signal.rfft2d||
122 | |101|L2_POOL_2D|tf.square, tf.keras.layers.AveragePooling2D, tf.sqrt||
123 | |102|LOCAL_RESPONSE_NORMALIZATION|tf.nn.local_response_normalization||
124 | |103|RELU_N1_TO_1|tf.minimum, tf.maximum||
125 | |104|SPLIT_V|tf.raw_ops.SplitV||
126 | |105|MATRIX_SET_DIAG|tf.linalg.set_diag||
127 | |106|SHAPE|tf.shape||
128 | |107|EXPAND_DIMS|tf.expand_dims||
129 | |108|SQUEEZE|tf.squeeze||
130 | |109|FlexRFFT|tf.signal.rfft|Flex OP|
131 | |110|FlexImag|tf.math.imag|Flex OP|
132 | |111|FlexReal|tf.math.real|Flex OP|
133 | |112|FlexRFFT2D|tf.signal.rfft2d|Flex OP|
134 | |113|FlexComplexAbs|tf.raw_ops.ComplexAbs|Flex OP|
135 | |114|IMAG|tf.math.imag||
136 | |115|REAL|tf.math.real||
137 | |116|COMPLEX_ABS|tf.raw_ops.ComplexAbs||
138 | |117|TFLite_Detection_PostProcess|tf.divide, tf.strided_slice, tf.math.argmax, tf.math.reduce_max, tf.math.multiply, tf.math.add, tf.math.exp, tf.math.subtract, tf.expand_dims, tf.gather, tf.reshape, tf.identity, tf.raw_ops.NonMaxSuppressionV5|CUSTOM|
139 | |118|ONE_HOT|tf.one_hot||
140 | |119|FlexMultinomial|tf.random.categorical|Flex OP|
141 | |120|FlexAll|tf.math.reduce_all|Flex OP|
142 | |121|FlexErf|tf.math.erf|Flex OP|
143 | |122|FlexRoll|tf.roll|Flex OP|
144 | |123|CONV_3D|tf.keras.layers.Conv3D||
145 | |124|CONV_3D_TRANSPOSE|tf.nn.conv3d_transpose||
146 | |125|Densify|(const)||
147 | |126|SPACE_TO_BATCH_ND|tf.space_to_batch_nd||
148 | |127|BATCH_TO_SPACE_ND|tf.compat.v1.batch_to_space_nd||
149 | |128|TransformLandmarks|tf.reshape, tf.linalg.matmul, tf.math.add|CUSTOM, MediaPipe|
150 | |129|TransformTensorBilinear|tf.reshape, tf.linalg.matmul, tf.math.add, tf.tile, tf.math.floor, tf.math.subtract, tf.math.multiply, tf.math.reduce_prod, tf.cast, tf.math.maximum, tf.math.maximum, tf.concat, tf.gather_nd|CUSTOM, MediaPipe|
151 | |130|Landmarks2TransformMatrix|tf.constant, tf.math.subtract, tf.math.norm, tf.math.divide, tf.linalg.matmul, tf.concat, tf.transpose, tf.gather, tf.math.reduce_min, tf.math.reduce_max, tf.math.multiply, tf.zeros, tf.math.add, tf.tile|CUSTOM, MediaPipe|
152 |
153 |
154 |
155 | ## 2. Environment
156 | - Python3.8+
157 | - TensorFlow v2.9.0+
158 | - TensorFlow Lite v2.9.0 with MediaPipe Custom OP, FlexDelegate and XNNPACK enabled
159 | - **[Add a custom OP to the TFLite runtime to build the whl installer (for Python)](https://zenn.dev/pinto0309/articles/a0e40c2817f2ee)**, **`MaxPoolingWithArgmax2D`**, **`MaxUnpooling2D`**, **`Convolution2DTransposeBias`**, **`TransformLandmarks`**, **`TransformTensorBilinear`**, **`Landmarks2TransformMatrix`**
160 | - **https://github.com/PINTO0309/TensorflowLite-bin**
161 | - flatc v2.0.8
162 | - PyTorch v1.12.0 (with grid_sample)
163 | - TorchVision
164 | - TorchAudio
165 | - OpenVINO 2021.4.582+
166 | - TensorRT 8.4+
167 | - trtexec
168 | - pycuda 2021.1
169 | - tensorflowjs
170 | - coremltools
171 | - paddle2onnx
172 | - onnx
173 | - onnxruntime-gpu (CUDA, TensorRT, OpenVINO)
174 | - onnxruntime-extensions
175 | - onnx_graphsurgeon
176 | - onnx-simplifier
177 | - onnxconverter-common
178 | - onnxmltools
179 | - onnx-tensorrt
180 | - tf2onnx
181 | - torch2trt
182 | - onnx-tf
183 | - tensorflow-datasets
184 | - tf_slim
185 | - edgetpu_compiler
186 | - tflite2tensorflow
187 | - openvino2tensorflow
188 | - simple-onnx-processing-tools
189 | - gdown
190 | - pandas
191 | - matplotlib
192 | - paddlepaddle
193 | - paddle2onnx
194 | - pycocotools
195 | - scipy
196 | - Intel-Media-SDK
197 | - Intel iHD GPU (iGPU) support
198 | - OpenCL
199 | - gluoncv
200 | - LLVM
201 | - NNPACK
202 | - WSL2 OpenCL
203 |
204 | ## 3. Setup
205 | ### 3-1. **[Environment construction pattern 1]** Execution by Docker (`strongly recommended`)
206 | You do not need to install any packages other than Docker. It consumes about 26.7GB of host storage.
207 | ```bash
208 | $ docker pull ghcr.io/pinto0309/tflite2tensorflow:latest
209 | or
210 | $ docker build -t ghcr.io/pinto0309/tflite2tensorflow:latest .
211 |
212 | # If you don't need to access the GUI of the HostPC and the USB camera.
213 | $ docker run -it --rm \
214 | -v `pwd`:/home/user/workdir \
215 | ghcr.io/pinto0309/tflite2tensorflow:latest
216 |
217 | # If conversion to TF-TRT is not required. And if you need to access the HostPC GUI and USB camera.
218 | $ xhost +local: && \
219 | docker run -it --rm \
220 | -v `pwd`:/home/user/workdir \
221 | -v /tmp/.X11-unix/:/tmp/.X11-unix:rw \
222 | --device /dev/video0:/dev/video0:mwr \
223 | --net=host \
224 | -e XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR \
225 | -e DISPLAY=$DISPLAY \
226 | --privileged \
227 | ghcr.io/pinto0309/tflite2tensorflow:latest
228 |
229 | # If you need to convert to TF-TRT. And if you need to access the HostPC GUI and USB camera.
230 | $ xhost +local: && \
231 | docker run --gpus all -it --rm \
232 | -v `pwd`:/home/user/workdir \
233 | -v /tmp/.X11-unix/:/tmp/.X11-unix:rw \
234 | --device /dev/video0:/dev/video0:mwr \
235 | --net=host \
236 | -e XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR \
237 | -e DISPLAY=$DISPLAY \
238 | --privileged \
239 | ghcr.io/pinto0309/tflite2tensorflow:latest
240 |
241 | # If you are using iGPU (OpenCL). And if you need to access the HostPC GUI and USB camera.
242 | $ xhost +local: && \
243 | docker run -it --rm \
244 | -v `pwd`:/home/user/workdir \
245 | -v /tmp/.X11-unix/:/tmp/.X11-unix:rw \
246 | --device /dev/video0:/dev/video0:mwr \
247 | --net=host \
248 | -e LIBVA_DRIVER_NAME=iHD \
249 | -e XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR \
250 | -e DISPLAY=$DISPLAY \
251 | --privileged \
252 | ghcr.io/pinto0309/tflite2tensorflow:latest
253 | ```
254 | ### 3-2. **[Environment construction pattern 2]** Execution by Host machine
255 | To install using the Python Package Index (PyPI), use the following command.
256 | ```
257 | $ pip3 install --user --upgrade tflite2tensorflow
258 | ```
259 | Or, To install with the latest source code of the main branch, use the following command.
260 | ```
261 | $ pip3 install --user --upgrade git+https://github.com/PINTO0309/tflite2tensorflow
262 | ```
263 | Installs a customized TensorFlow Lite runtime with support for MediaPipe Custom OP, FlexDelegate, and XNNPACK. If tflite_runtime does not install properly, please follow the instructions in the next article to build a custom build in the environment you are using. **[Add a custom OP to the TFLite runtime to build the whl installer (for Python)](https://zenn.dev/pinto0309/articles/a0e40c2817f2ee)**, **`MaxPoolingWithArgmax2D`**, **`MaxUnpooling2D`**, **`Convolution2DTransposeBias`**, **`TransformLandmarks`**, **`TransformTensorBilinear`**, **`Landmarks2TransformMatrix`**
264 | ```
265 | $ sudo pip3 uninstall -y \
266 | tensorboard-plugin-wit \
267 | tb-nightly \
268 | tensorboard \
269 | tf-estimator-nightly \
270 | tensorflow-gpu \
271 | tensorflow \
272 | tf-nightly \
273 | tensorflow_estimator \
274 | tflite_runtime
275 |
276 | $ APPVER=v1.20.7
277 | $ TENSORFLOWVER=2.8.0
278 |
279 | ### Customized version of TensorFlow Lite installation
280 | $ wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/tflite_runtime-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
281 | && sudo chmod +x tflite_runtime-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
282 | && pip3 install --user --force-reinstall tflite_runtime-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
283 | && rm tflite_runtime-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl
284 |
285 | ### Install the Customized Full TensorFlow package
286 | ### (MediaPipe Custom OP, FlexDelegate, XNNPACK enabled)
287 | $ wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/tflite_runtime-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
288 | && sudo chmod +x tensorflow-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
289 | && pip3 install --user --force-reinstall tensorflow-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl \
290 | && rm tensorflow-${TENSORFLOWVER}-cp38-none-linux_x86_64.whl
291 |
292 | or
293 |
294 | ### Install the Non-customized TensorFlow package
295 | $ pip3 install --user tf-nightly
296 |
297 | ### Download schema.fbs
298 | $ wget https://github.com/PINTO0309/tflite2tensorflow/raw/main/schema/schema.fbs
299 | ```
300 | ### Build flatc
301 | ```
302 | $ git clone -b v2.0.8 https://github.com/google/flatbuffers.git
303 | $ cd flatbuffers && mkdir build && cd build
304 | $ cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release ..
305 | $ make -j$(nproc)
306 | ```
307 |
308 | 
309 | 
310 |
311 | The Windows version of flatc v2.0.8 can be downloaded from here.
312 | **https://github.com/google/flatbuffers/releases/download/v2.0.8/Windows.flatc.binary.zip**
313 |
314 | ## 4. Usage / Execution sample
315 | ### 4-1. Command line options
316 | ```
317 | usage: tflite2tensorflow
318 | [-h]
319 | --model_path MODEL_PATH
320 | --flatc_path FLATC_PATH
321 | --schema_path SCHEMA_PATH
322 | [--model_output_path MODEL_OUTPUT_PATH]
323 | [--output_pb]
324 | [--output_no_quant_float32_tflite]
325 | [--output_dynamic_range_quant_tflite]
326 | [--output_weight_quant_tflite]
327 | [--output_float16_quant_tflite]
328 | [--output_integer_quant_tflite]
329 | [--output_full_integer_quant_tflite]
330 | [--output_integer_quant_type]
331 | [--string_formulas_for_normalization STRING_FORMULAS_FOR_NORMALIZATION]
332 | [--calib_ds_type CALIB_DS_TYPE]
333 | [--ds_name_for_tfds_for_calibration DS_NAME_FOR_TFDS_FOR_CALIBRATION]
334 | [--split_name_for_tfds_for_calibration SPLIT_NAME_FOR_TFDS_FOR_CALIBRATION]
335 | [--download_dest_folder_path_for_the_calib_tfds DOWNLOAD_DEST_FOLDER_PATH_FOR_THE_CALIB_TFDS]
336 | [--tfds_download_flg]
337 | [--load_dest_file_path_for_the_calib_npy LOAD_DEST_FILE_PATH_FOR_THE_CALIB_NPY]
338 | [--output_tfjs]
339 | [--output_tftrt_float32]
340 | [--output_tftrt_float16]
341 | [--output_coreml]
342 | [--optimizing_coreml]
343 | [--output_edgetpu]
344 | [--edgetpu_compiler_timeout EDGETPU_COMPILER_TIMEOUT]
345 | [--edgetpu_num_segments EDGETPU_NUM_SEGMENTS]
346 | [--output_onnx]
347 | [--onnx_opset ONNX_OPSET]
348 | [--onnx_extra_opset ONNX_EXTRA_OPSET]
349 | [--disable_onnx_nchw_conversion]
350 | [--disable_onnx_optimization]
351 | [--output_openvino_and_myriad]
352 | [--vpu_number_of_shaves VPU_NUMBER_OF_SHAVES]
353 | [--vpu_number_of_cmx_slices VPU_NUMBER_OF_CMX_SLICES]
354 | [--optimizing_for_openvino_and_myriad]
355 | [--rigorous_optimization_for_myriad]
356 | [--replace_swish_and_hardswish]
357 | [--optimizing_for_edgetpu]
358 | [--replace_prelu_and_minmax]
359 | [--disable_experimental_new_quantizer]
360 | [--disable_per_channel]
361 | [--optimizing_barracuda]
362 | [--locationids_of_the_terminating_output]
363 |
364 | optional arguments:
365 | -h, --help
366 | show this help message and exit
367 | --model_path MODEL_PATH
368 | input tflite model path (*.tflite)
369 | --flatc_path FLATC_PATH
370 | flatc file path (flatc)
371 | --schema_path SCHEMA_PATH
372 | schema.fbs path (schema.fbs)
373 | --model_output_path MODEL_OUTPUT_PATH
374 | The output folder path of the converted model file
375 | --output_pb
376 | .pb output switch
377 | --output_no_quant_float32_tflite
378 | float32 tflite output switch
379 | --output_dynamic_range_quant_tflite
380 | dynamic range quant tflite output switch
381 | --output_weight_quant_tflite
382 | weight quant tflite output switch
383 | --output_float16_quant_tflite
384 | float16 quant tflite output switch
385 | --output_integer_quant_tflite
386 | integer quant tflite output switch
387 | --output_full_integer_quant_tflite
388 | full integer quant tflite output switch
389 | --output_integer_quant_type OUTPUT_INTEGER_QUANT_TYPE
390 | Input and output types when doing Integer Quantization
391 | ('int8 (default)' or 'uint8')
392 | --string_formulas_for_normalization STRING_FORMULAS_FOR_NORMALIZATION
393 | String formulas for normalization. It is evaluated by
394 | Python's eval() function. Default: '(data -
395 | [127.5,127.5,127.5]) / [127.5,127.5,127.5]'
396 | --calib_ds_type CALIB_DS_TYPE
397 | Types of data sets for calibration. tfds or numpy
398 | Default: numpy
399 | --ds_name_for_tfds_for_calibration DS_NAME_FOR_TFDS_FOR_CALIBRATION
400 | Dataset name for TensorFlow Datasets for calibration.
401 | https://www.tensorflow.org/datasets/catalog/overview
402 | --split_name_for_tfds_for_calibration SPLIT_NAME_FOR_TFDS_FOR_CALIBRATION
403 | Split name for TensorFlow Datasets for calibration.
404 | https://www.tensorflow.org/datasets/catalog/overview
405 | --download_dest_folder_path_for_the_calib_tfds DOWNLOAD_DEST_FOLDER_PATH_FOR_THE_CALIB_TFDS
406 | Download destination folder path for the calibration
407 | dataset. Default: $HOME/TFDS
408 | --tfds_download_flg
409 | True to automatically download datasets from
410 | TensorFlow Datasets. True or False
411 | --load_dest_file_path_for_the_calib_npy LOAD_DEST_FILE_PATH_FOR_THE_CALIB_NPY
412 | The path from which to load the .npy file containing
413 | the numpy binary version of the calibration data.
414 | Default: sample_npy/calibration_data_img_sample.npy
415 | [20, 513, 513, 3] -> [Number of images, h, w, c]
416 | --output_tfjs
417 | tfjs model output switch
418 | --output_tftrt32
419 | tftrt float32 model output switch
420 | --output_tftrt16
421 | tftrt float16 model output switch
422 | --output_coreml
423 | coreml model output switch
424 | --optimizing_for_coreml
425 | Optimizing graph for coreml
426 | --output_edgetpu
427 | edgetpu model output switch
428 | --edgetpu_compiler_timeout
429 | edgetpu_compiler timeout for one compilation process in seconds.
430 | Default: 3600
431 | --edgetpu_num_segments
432 | Partition the model into 'num_segments' segments.
433 | Default: 1 (no partition)
434 | --output_onnx
435 | onnx model output switch
436 | --onnx_opset ONNX_OPSET
437 | onnx opset version number
438 | --onnx_extra_opset ONNX_EXTRA_OPSET
439 | The name of the onnx 'extra_opset' to enable.
440 | Default: ''
441 | 'com.microsoft:1' or 'ai.onnx.contrib:1' or 'ai.onnx.ml:1'
442 | --disable_onnx_nchw_conversion
443 | Disable onnx NCHW conversion
444 | --disable_onnx_optimization
445 | Disable onnx optimization
446 | --output_openvino_and_myriad
447 | openvino model and myriad inference engine blob output switch
448 | --vpu_number_of_shaves VPU_NUMBER_OF_SHAVES
449 | vpu number of shaves. Default: 4
450 | --vpu_number_of_cmx_slices VPU_NUMBER_OF_CMX_SLICES
451 | vpu number of cmx slices. Default: 4
452 | --optimizing_for_openvino_and_myriad
453 | Optimizing graph for openvino/myriad
454 | --rigorous_optimization_for_myriad
455 | Replace operations that are not supported by myriad with operations
456 | that are as feasible as possible.
457 | e.g. 'Abs' -> 'Square' + 'Sqrt'
458 | --replace_swish_and_hardswish
459 | Replace swish and hard-swish with each other
460 | --optimizing_for_edgetpu
461 | Optimizing for edgetpu
462 | --replace_prelu_and_minmax
463 | Replace prelu and minimum/maximum with each other
464 | --disable_experimental_new_quantizer
465 | Disable MLIRs new quantization feature during INT8 quantization
466 | in TensorFlowLite.
467 | --disable_per_channel
468 | Disable per-channel quantization for tflite.
469 | --optimizing_barracuda
470 | Generates ONNX by replacing Barracuda unsupported layers
471 | with standard layers. For example, GatherND.
472 | --locationids_of_the_terminating_output
473 | A comma-separated list of LocationIDs to be used as output layers.
474 | e.g. --locationids_of_the_terminating_output 100,201,560
475 | Default: ''
476 | ```
477 | ### 4-2. Step 1 : Generating saved_model and FreezeGraph (.pb)
478 | ```
479 | $ tflite2tensorflow \
480 | --model_path segm_full_v679.tflite \
481 | --flatc_path ../flatc \
482 | --schema_path ../schema.fbs \
483 | --output_pb
484 | ```
485 | or
486 | ```
487 | $ tflite2tensorflow \
488 | --model_path segm_full_v679.tflite \
489 | --flatc_path ../flatc \
490 | --schema_path ../schema.fbs \
491 | --output_pb \
492 | --optimizing_for_openvino_and_myriad
493 | ```
494 | or
495 | ```
496 | $ tflite2tensorflow \
497 | --model_path segm_full_v679.tflite \
498 | --flatc_path ../flatc \
499 | --schema_path ../schema.fbs \
500 | --output_pb \
501 | --optimizing_for_openvino_and_myriad \
502 | --rigorous_optimization_for_myriad
503 | ```
504 | or
505 | ```
506 | $ tflite2tensorflow \
507 | --model_path segm_full_v679.tflite \
508 | --flatc_path ../flatc \
509 | --schema_path ../schema.fbs \
510 | --output_pb \
511 | --optimizing_for_edgetpu
512 | ```
513 | or
514 | ```
515 | $ tflite2tensorflow \
516 | --model_path segm_full_v679.tflite \
517 | --flatc_path ../flatc \
518 | --schema_path ../schema.fbs \
519 | --output_pb \
520 | --optimizing_for_coreml
521 | ```
522 | or
523 | ```
524 | $ tflite2tensorflow \
525 | --model_path segm_full_v679.tflite \
526 | --flatc_path ../flatc \
527 | --schema_path ../schema.fbs \
528 | --output_pb \
529 | --optimizing_barracuda
530 | ```
531 |
532 | ### 4-3. Step 2 : Generation of quantized tflite, TFJS, TF-TRT, EdgeTPU, CoreML and ONNX
533 | ```
534 | $ tflite2tensorflow \
535 | --model_path segm_full_v679.tflite \
536 | --flatc_path ../flatc \
537 | --schema_path ../schema.fbs \
538 | --output_no_quant_float32_tflite \
539 | --output_dynamic_range_quant_tflite \
540 | --output_weight_quant_tflite \
541 | --output_float16_quant_tflite \
542 | --output_integer_quant_tflite \
543 | --string_formulas_for_normalization 'data / 255.0' \
544 | --output_tfjs \
545 | --output_coreml \
546 | --output_tftrt_float32 \
547 | --output_tftrt_float16 \
548 | --output_onnx \
549 | --onnx_opset 11 \
550 | --output_openvino_and_myriad
551 | ```
552 | or
553 | ```
554 | $ tflite2tensorflow \
555 | --model_path segm_full_v679.tflite \
556 | --flatc_path ../flatc \
557 | --schema_path ../schema.fbs \
558 | --output_no_quant_float32_tflite \
559 | --output_dynamic_range_quant_tflite \
560 | --output_weight_quant_tflite \
561 | --output_float16_quant_tflite \
562 | --output_integer_quant_tflite \
563 | --output_edgetpu \
564 | --output_integer_quant_typ 'uint8' \
565 | --string_formulas_for_normalization 'data / 255.0' \
566 | --output_tfjs \
567 | --output_coreml \
568 | --output_tftrt_float32 \
569 | --output_tftrt_float16 \
570 | --output_onnx \
571 | --onnx_opset 11
572 | ```
573 | ### 4-4. Check the contents of the .npy file, which is a binary version of the image file
574 | ```
575 | $ view_npy --npy_file_path calibration_data_img_sample.npy
576 | ```
577 | Press the **`Q`** button to display the next image. **`calibration_data_img_sample.npy`** contains 20 images extracted from the MS-COCO data set.
578 |
579 | 
580 |
581 | ## 5. Sample image
582 | This is the result of converting MediaPipe's Meet Segmentation model (segm_full_v679.tflite / Float16 / Google Meet) to **`saved_model`** and then reconverting it to Float32 tflite. Replace the GPU-optimized **`Convolution2DTransposeBias`** layer with the standard **`TransposeConv`** and **`BiasAdd`** layers in a fully automatic manner. The weights and biases of the Float16 **`Dequantize`** layer are automatically back-quantized to Float32 precision. The generated **`saved_model`** in Float32 precision can be easily converted to **`Float16`**, **`INT8`**, **`EdgeTPU`**, **`TFJS`**, **`TF-TRT`**, **`CoreML`**, **`ONNX`**, **`OpenVINO`**, **`Myriad Inference Engine blob`**.
583 |
584 | |Before|After|
585 | |:--:|:--:|
586 | |||
587 |
--------------------------------------------------------------------------------
/bashrc:
--------------------------------------------------------------------------------
1 | # ~/.bashrc: executed by bash(1) for non-login shells.
2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
3 | # for examples
4 |
5 | # If not running interactively, don't do anything
6 | case $- in
7 | *i*) ;;
8 | *) return;;
9 | esac
10 |
11 | # don't put duplicate lines or lines starting with space in the history.
12 | # See bash(1) for more options
13 | HISTCONTROL=ignoreboth
14 |
15 | # append to the history file, don't overwrite it
16 | shopt -s histappend
17 |
18 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
19 | HISTSIZE=1000
20 | HISTFILESIZE=2000
21 |
22 | # check the window size after each command and, if necessary,
23 | # update the values of LINES and COLUMNS.
24 | shopt -s checkwinsize
25 |
26 | # If set, the pattern "**" used in a pathname expansion context will
27 | # match all files and zero or more directories and subdirectories.
28 | #shopt -s globstar
29 |
30 | # make less more friendly for non-text input files, see lesspipe(1)
31 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
32 |
33 | # set variable identifying the chroot you work in (used in the prompt below)
34 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
35 | debian_chroot=$(cat /etc/debian_chroot)
36 | fi
37 |
38 | # set a fancy prompt (non-color, unless we know we "want" color)
39 | case "$TERM" in
40 | xterm-color|*-256color) color_prompt=yes;;
41 | esac
42 |
43 | # uncomment for a colored prompt, if the terminal has the capability; turned
44 | # off by default to not distract the user: the focus in a terminal window
45 | # should be on the output of commands, not on the prompt
46 | #force_color_prompt=yes
47 |
48 | if [ -n "$force_color_prompt" ]; then
49 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
50 | # We have color support; assume it's compliant with Ecma-48
51 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such
52 | # a case would tend to support setf rather than setaf.)
53 | color_prompt=yes
54 | else
55 | color_prompt=
56 | fi
57 | fi
58 |
59 | if [ "$color_prompt" = yes ]; then
60 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
61 | else
62 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
63 | fi
64 | unset color_prompt force_color_prompt
65 |
66 | # If this is an xterm set the title to user@host:dir
67 | case "$TERM" in
68 | xterm*|rxvt*)
69 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
70 | ;;
71 | *)
72 | ;;
73 | esac
74 |
75 | # enable color support of ls and also add handy aliases
76 | if [ -x /usr/bin/dircolors ]; then
77 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
78 | alias ls='ls --color=auto'
79 | #alias dir='dir --color=auto'
80 | #alias vdir='vdir --color=auto'
81 |
82 | alias grep='grep --color=auto'
83 | alias fgrep='fgrep --color=auto'
84 | alias egrep='egrep --color=auto'
85 | fi
86 |
87 | # colored GCC warnings and errors
88 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
89 |
90 | # some more ls aliases
91 | alias ll='ls -alF'
92 | alias la='ls -A'
93 | alias l='ls -CF'
94 |
95 | # Add an "alert" alias for long running commands. Use like so:
96 | # sleep 10; alert
97 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
98 |
99 | # Alias definitions.
100 | # You may want to put all your additions into a separate file like
101 | # ~/.bash_aliases, instead of adding them here directly.
102 | # See /usr/share/doc/bash-doc/examples in the bash-doc package.
103 |
104 | if [ -f ~/.bash_aliases ]; then
105 | . ~/.bash_aliases
106 | fi
107 |
108 | # enable programmable completion features (you don't need to enable
109 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile
110 | # sources /etc/bash.bashrc).
111 | if ! shopt -oq posix; then
112 | if [ -f /usr/share/bash-completion/bash_completion ]; then
113 | . /usr/share/bash-completion/bash_completion
114 | elif [ -f /etc/bash_completion ]; then
115 | . /etc/bash_completion
116 | fi
117 | fi
118 |
--------------------------------------------------------------------------------
/flatbuffers/1.12.0/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1drnpyrXkUHsMSqb8klV2YosEU9jdoJTP" > /dev/null
4 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
5 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1drnpyrXkUHsMSqb8klV2YosEU9jdoJTP" -o resources.tar.gz
6 | tar -zxvf resources.tar.gz
7 | rm resources.tar.gz
8 |
9 | echo Download finished.
10 |
--------------------------------------------------------------------------------
/sample_json/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1_3-2by5IF817sUxFUCjdqx0ycm7D-kbY" > /dev/null
4 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
5 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1_3-2by5IF817sUxFUCjdqx0ycm7D-kbY" -o resources.tar.gz
6 | tar -zxvf resources.tar.gz
7 | rm resources.tar.gz
8 |
9 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1zd6aD2HyV_ss83SMsRRbKL87ZBInV2Nu" > /dev/null
10 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
11 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1zd6aD2HyV_ss83SMsRRbKL87ZBInV2Nu" -o resources.tar.gz
12 | tar -zxvf resources.tar.gz
13 | rm resources.tar.gz
14 |
15 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1xzeUXsulG4uApP2h5BuQmB5hbPi95vOZ" > /dev/null
16 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
17 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1xzeUXsulG4uApP2h5BuQmB5hbPi95vOZ" -o resources.tar.gz
18 | tar -zxvf resources.tar.gz
19 | rm resources.tar.gz
20 |
21 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=124qK0ZK3KPQ01KLTy32VBdlZdvEHCSDM" > /dev/null
22 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
23 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=124qK0ZK3KPQ01KLTy32VBdlZdvEHCSDM" -o resources.tar.gz
24 | tar -zxvf resources.tar.gz
25 | rm resources.tar.gz
26 |
27 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1TDvppVFS2zU3bzdqOnjGZekrGx8Km3t8" > /dev/null
28 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
29 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1TDvppVFS2zU3bzdqOnjGZekrGx8Km3t8" -o resources.tar.gz
30 | tar -zxvf resources.tar.gz
31 | rm resources.tar.gz
32 |
33 | curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1eRuGfi7bJ8j6FCn8oZ1QjhMRdpwPpyDB" > /dev/null
34 | CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
35 | curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1eRuGfi7bJ8j6FCn8oZ1QjhMRdpwPpyDB" -o resources.tar.gz
36 | tar -zxvf resources.tar.gz
37 | rm resources.tar.gz
38 |
39 |
40 | echo Download finished.
41 |
--------------------------------------------------------------------------------
/sample_npy/calibration_data_img_sample.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PINTO0309/tflite2tensorflow/c13504df2f82dc234f1009e34dbab9c8b65c7ce4/sample_npy/calibration_data_img_sample.npy
--------------------------------------------------------------------------------
/schema/schema.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Revision History
16 | // Version 0: Initial version.
17 | // Version 1: Add subgraphs to schema.
18 | // Version 2: Rename operators to conform to NN API.
19 | // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 | // Version 3a: Add new builtin op code field. Has backward compatibility with
21 | // version 3.
22 | // Version 3b: Rename fields in SignatureDef. Has backward compatibility with
23 | // version 3 and 3a.
24 |
25 | namespace tflite;
26 |
27 | // This corresponds to the version.
28 | file_identifier "TFL3";
29 | // File extension of any written files.
30 | file_extension "tflite";
31 |
32 | // IMPORTANT: All new members of tables, enums and unions must be added at the
33 | // end to ensure backwards compatibility.
34 |
35 | // The type of data stored in a tensor.
36 | enum TensorType : byte {
37 | FLOAT32 = 0,
38 | FLOAT16 = 1,
39 | INT32 = 2,
40 | UINT8 = 3,
41 | INT64 = 4,
42 | STRING = 5,
43 | BOOL = 6,
44 | INT16 = 7,
45 | COMPLEX64 = 8,
46 | INT8 = 9,
47 | FLOAT64 = 10,
48 | COMPLEX128 = 11,
49 | UINT64 = 12,
50 | // Experimental: Resource and variant types are experimental, that are subject
51 | // to change. Do not implement custom kernels using resource & variant types
52 | // now.
53 | RESOURCE = 13,
54 | VARIANT = 14,
55 | UINT32 = 15,
56 | UINT16 = 16
57 | }
58 |
59 | // Custom quantization parameters for experimenting with new quantization
60 | // techniques.
61 | table CustomQuantization {
62 | custom:[ubyte] (force_align: 16);
63 | }
64 |
65 | // Represents a specific quantization technique's parameters.
66 | union QuantizationDetails {
67 | CustomQuantization,
68 | }
69 |
70 | // Parameters for converting a quantized tensor back to float.
71 | table QuantizationParameters {
72 | // These four parameters are the asymmetric linear quantization parameters.
73 | // Given a quantized value q, the corresponding float value f should be:
74 | // f = scale * (q - zero_point)
75 | // For other quantization types, the QuantizationDetails below is used.
76 | min:[float]; // For importing back into tensorflow.
77 | max:[float]; // For importing back into tensorflow.
78 | scale:[float]; // For dequantizing the tensor's values.
79 | zero_point:[long];
80 |
81 | // If this is not none, the other quantization parameters (i.e. min, max,
82 | // scale, zero_point fields above) are ignored and the value of the
83 | // QuantizationDetails union should be used.
84 | details:QuantizationDetails;
85 |
86 | // Specifies the dimension of the Tensor's shape that the scales and
87 | // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
88 | // with quantization params:
89 | // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
90 | // will be quantized across the second dimension of t.
91 | // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
92 | // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
93 | // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
94 | quantized_dimension:int;
95 | }
96 |
97 | // Sparse tensors.
98 | // We use a modification of the TACO format.
99 | // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
100 | //
101 | // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
102 | // potentially with a k-dimensional block (0 <= k <= n) with dims
103 | // (dn, ..., dn+k-1), the format needs to specify:
104 | // 1. In what order to traverse these dimensions. For example, to store a 2-D
105 | // matrix in row major order, the traversal order would be (d0, d1),
106 | // whereas to store it in column major order, the traversal order would be
107 | // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
108 | // could be (d0, d1, d2, d3).
109 | // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
110 | // tensor dimension in (d0, ..., dn-1).
111 | // 3. In the traversal order defined above, the format (dense vs. sparse) and
112 | // index metadata for each dimension. For a dense dimension, this is just
113 | // the size of that dimension. For a sparse dimension, it's the same as
114 | // the compressed index defined in the Compressed Sparse Row (CSR) format.
115 | // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
116 |
117 | // The storage type for a dimension. Currently we support:
118 | // 1. DENSE: each coordinate in this dimension is stored implicitly.
119 | // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
120 | // compression technique is the same what CSR uses.
121 | // More types like a sparse dimension with a different compression technique
122 | // could be added to the list in the future.
123 | enum DimensionType : byte {
124 | DENSE = 0,
125 | SPARSE_CSR = 1,
126 | }
127 |
128 | table Int32Vector {
129 | values:[int];
130 | }
131 |
132 | table Uint16Vector {
133 | values:[ushort] (force_align: 4);
134 | }
135 |
136 | table Uint8Vector {
137 | values:[ubyte] (force_align: 4);
138 | }
139 |
140 | // Variable-typed buffer to store the index metadata for a sparse dimension.
141 | // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
142 | // vector. We don't want the per-dimensional index to overflow that range.
143 | union SparseIndexVector {
144 | Int32Vector,
145 | Uint16Vector,
146 | Uint8Vector
147 | }
148 |
149 | table DimensionMetadata {
150 | // Whether a dimension is dense or sparse.
151 | format:DimensionType;
152 | // Index metadata used for a dimension.
153 | // - If format is DimensionType.DENSE then we use the dense_size field to
154 | // store the size of that dimension. Each index in that dimension is
155 | // stored implicitly.
156 | // - If format is DimensionType.SPARSE_CSR then we use array_segments and
157 | // array_indices to encode that dimension. array_segments represents how
158 | // to segment the indices array, each segment corresponds to one element
159 | // in the previous dimension. array_indices represents the index of the
160 | // non-zero elements within this dimension (as those in the CSR matrix
161 | // format, where the first array is row pointers and the second array is
162 | // column indices).
163 | dense_size:int;
164 | array_segments:SparseIndexVector;
165 | array_indices:SparseIndexVector;
166 | }
167 |
168 | // Parameters to encode a sparse TfLite tensor.
169 | table SparsityParameters {
170 | // The traversal order of the dimensions defined in the `shape` field of the
171 | // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
172 | // ..., dn-1),
173 | // - if not block sparse, the traversal_order is just a permutation of (d0,
174 | // ..., dn-1). For example, a 2-D matrix stored in row-major order would
175 | // have traversal_order = (d0, d1).
176 | // - if block sparse with a k-dimensional block (0 <= k <= n), the
177 | // traversal_order has n + k elements. The first n elements are still a
178 | // permutation of (d0, ..., dn-1). The lask k elements are a permutation
179 | // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
180 | // example, a 2-D matrix with 2-D blocks, both stored in row-major order
181 | // would have traversal_order = (d0, d1, d2, d3).
182 | traversal_order:[int];
183 | // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
184 | // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
185 | // tensor dimension in (d0, ..., dn).
186 | // It's stored in the order of (dn, ..., dn+k-1).
187 | // If not block-sparse, this field is NULL.
188 | block_map:[int];
189 | // In the traversal order defined above, the metadata needed for
190 | // each dimension to locate the non-zero values in the original dense tensor.
191 | // The size of the dim_metadata array = the size of the traversal_order array
192 | // = n + k.
193 | dim_metadata:[DimensionMetadata];
194 | }
195 |
196 | table Tensor {
197 | // The tensor shape. The meaning of each entry is operator-specific but
198 | // builtin ops use: [batch size, height, width, number of channels] (That's
199 | // Tensorflow's NHWC).
200 | shape:[int];
201 | type:TensorType;
202 | // An index that refers to the buffers table at the root of the model. Or,
203 | // if there is no data buffer associated (i.e. intermediate results), then
204 | // this is 0 (which refers to an always existent empty buffer).
205 | //
206 | // The data_buffer itself is an opaque container, with the assumption that the
207 | // target device is little-endian. In addition, all builtin operators assume
208 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
209 | // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
210 | buffer:uint;
211 | name:string; // For debugging and importing back into tensorflow.
212 | quantization:QuantizationParameters; // Optional.
213 |
214 | is_variable:bool = false;
215 |
216 | // Parameters to encode a sparse tensor. See the example in
217 | // tensorflow/lite/testdata/sparse_tensor.json.
218 | sparsity:SparsityParameters; // Optional.
219 |
220 | // Encodes `shape` with unknown dimensions. Unknown dimensions are
221 | // represented with -1.
222 | shape_signature:[int]; // Optional.
223 |
224 | // If false, the rank or the number of tensor dimensions is unknown.
225 | // If false, "shape" must be [].
226 | has_rank: bool = false;
227 | }
228 |
229 | // A list of builtin operators. Builtin operators are slightly faster than custom
230 | // ones, but not by much. Moreover, while custom operators accept an opaque
231 | // object containing configuration parameters, builtins have a predetermined
232 | // set of acceptable options.
233 | // LINT.IfChange
234 | enum BuiltinOperator : int32 {
235 | ADD = 0,
236 | AVERAGE_POOL_2D = 1,
237 | CONCATENATION = 2,
238 | CONV_2D = 3,
239 | DEPTHWISE_CONV_2D = 4,
240 | DEPTH_TO_SPACE = 5,
241 | DEQUANTIZE = 6,
242 | EMBEDDING_LOOKUP = 7,
243 | FLOOR = 8,
244 | FULLY_CONNECTED = 9,
245 | HASHTABLE_LOOKUP = 10,
246 | L2_NORMALIZATION = 11,
247 | L2_POOL_2D = 12,
248 | LOCAL_RESPONSE_NORMALIZATION = 13,
249 | LOGISTIC = 14,
250 | LSH_PROJECTION = 15,
251 | LSTM = 16,
252 | MAX_POOL_2D = 17,
253 | MUL = 18,
254 | RELU = 19,
255 | // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
256 | // since different model developers use RELU1 in different ways. Never
257 | // create another op called RELU1.
258 | RELU_N1_TO_1 = 20,
259 | RELU6 = 21,
260 | RESHAPE = 22,
261 | RESIZE_BILINEAR = 23,
262 | RNN = 24,
263 | SOFTMAX = 25,
264 | SPACE_TO_DEPTH = 26,
265 | SVDF = 27,
266 | TANH = 28,
267 | CONCAT_EMBEDDINGS = 29,
268 | SKIP_GRAM = 30,
269 | CALL = 31,
270 | CUSTOM = 32,
271 | EMBEDDING_LOOKUP_SPARSE = 33,
272 | PAD = 34,
273 | UNIDIRECTIONAL_SEQUENCE_RNN = 35,
274 | GATHER = 36,
275 | BATCH_TO_SPACE_ND = 37,
276 | SPACE_TO_BATCH_ND = 38,
277 | TRANSPOSE = 39,
278 | MEAN = 40,
279 | SUB = 41,
280 | DIV = 42,
281 | SQUEEZE = 43,
282 | UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
283 | STRIDED_SLICE = 45,
284 | BIDIRECTIONAL_SEQUENCE_RNN = 46,
285 | EXP = 47,
286 | TOPK_V2 = 48,
287 | SPLIT = 49,
288 | LOG_SOFTMAX = 50,
289 | // DELEGATE is a special op type for the operations which are delegated to
290 | // other backends.
291 | // WARNING: Experimental interface, subject to change
292 | DELEGATE = 51,
293 | BIDIRECTIONAL_SEQUENCE_LSTM = 52,
294 | CAST = 53,
295 | PRELU = 54,
296 | MAXIMUM = 55,
297 | ARG_MAX = 56,
298 | MINIMUM = 57,
299 | LESS = 58,
300 | NEG = 59,
301 | PADV2 = 60,
302 | GREATER = 61,
303 | GREATER_EQUAL = 62,
304 | LESS_EQUAL = 63,
305 | SELECT = 64,
306 | SLICE = 65,
307 | SIN = 66,
308 | TRANSPOSE_CONV = 67,
309 | SPARSE_TO_DENSE = 68,
310 | TILE = 69,
311 | EXPAND_DIMS = 70,
312 | EQUAL = 71,
313 | NOT_EQUAL = 72,
314 | LOG = 73,
315 | SUM = 74,
316 | SQRT = 75,
317 | RSQRT = 76,
318 | SHAPE = 77,
319 | POW = 78,
320 | ARG_MIN = 79,
321 | FAKE_QUANT = 80,
322 | REDUCE_PROD = 81,
323 | REDUCE_MAX = 82,
324 | PACK = 83,
325 | LOGICAL_OR = 84,
326 | ONE_HOT = 85,
327 | LOGICAL_AND = 86,
328 | LOGICAL_NOT = 87,
329 | UNPACK = 88,
330 | REDUCE_MIN = 89,
331 | FLOOR_DIV = 90,
332 | REDUCE_ANY = 91,
333 | SQUARE = 92,
334 | ZEROS_LIKE = 93,
335 | FILL = 94,
336 | FLOOR_MOD = 95,
337 | RANGE = 96,
338 | RESIZE_NEAREST_NEIGHBOR = 97,
339 | LEAKY_RELU = 98,
340 | SQUARED_DIFFERENCE = 99,
341 | MIRROR_PAD = 100,
342 | ABS = 101,
343 | SPLIT_V = 102,
344 | UNIQUE = 103,
345 | CEIL = 104,
346 | REVERSE_V2 = 105,
347 | ADD_N = 106,
348 | GATHER_ND = 107,
349 | COS = 108,
350 | WHERE = 109,
351 | RANK = 110,
352 | ELU = 111,
353 | REVERSE_SEQUENCE = 112,
354 | MATRIX_DIAG = 113,
355 | QUANTIZE = 114,
356 | MATRIX_SET_DIAG = 115,
357 | ROUND = 116,
358 | HARD_SWISH = 117,
359 | IF = 118,
360 | WHILE = 119,
361 | NON_MAX_SUPPRESSION_V4 = 120,
362 | NON_MAX_SUPPRESSION_V5 = 121,
363 | SCATTER_ND = 122,
364 | SELECT_V2 = 123,
365 | DENSIFY = 124,
366 | SEGMENT_SUM = 125,
367 | BATCH_MATMUL = 126,
368 | PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
369 | CUMSUM = 128,
370 | CALL_ONCE = 129,
371 | BROADCAST_TO = 130,
372 | RFFT2D = 131,
373 | CONV_3D = 132,
374 | IMAG=133,
375 | REAL=134,
376 | COMPLEX_ABS=135,
377 | HASHTABLE = 136,
378 | HASHTABLE_FIND = 137,
379 | HASHTABLE_IMPORT = 138,
380 | HASHTABLE_SIZE = 139,
381 | REDUCE_ALL = 140,
382 | CONV_3D_TRANSPOSE = 141,
383 | VAR_HANDLE = 142,
384 | READ_VARIABLE = 143,
385 | ASSIGN_VARIABLE = 144,
386 | BROADCAST_ARGS = 145,
387 | RANDOM_STANDARD_NORMAL = 146,
388 | BUCKETIZE = 147,
389 | RANDOM_UNIFORM = 148,
390 | MULTINOMIAL = 149,
391 | GELU = 150,
392 | DYNAMIC_UPDATE_SLICE = 151,
393 | RELU_0_TO_1 = 152,
394 | UNSORTED_SEGMENT_PROD = 153,
395 | UNSORTED_SEGMENT_MAX = 154,
396 | UNSORTED_SEGMENT_SUM = 155,
397 | ATAN2 = 156
398 | }
399 | // LINT.ThenChange(nnapi_linter/linter.proto)
400 |
401 | // Options for the builtin operators.
402 | union BuiltinOptions {
403 | Conv2DOptions,
404 | DepthwiseConv2DOptions,
405 | ConcatEmbeddingsOptions,
406 | LSHProjectionOptions,
407 | Pool2DOptions,
408 | SVDFOptions,
409 | RNNOptions,
410 | FullyConnectedOptions,
411 | SoftmaxOptions,
412 | ConcatenationOptions,
413 | AddOptions,
414 | L2NormOptions,
415 | LocalResponseNormalizationOptions,
416 | LSTMOptions,
417 | ResizeBilinearOptions,
418 | CallOptions,
419 | ReshapeOptions,
420 | SkipGramOptions,
421 | SpaceToDepthOptions,
422 | EmbeddingLookupSparseOptions,
423 | MulOptions,
424 | PadOptions,
425 | GatherOptions,
426 | BatchToSpaceNDOptions,
427 | SpaceToBatchNDOptions,
428 | TransposeOptions,
429 | ReducerOptions,
430 | SubOptions,
431 | DivOptions,
432 | SqueezeOptions,
433 | SequenceRNNOptions,
434 | StridedSliceOptions,
435 | ExpOptions,
436 | TopKV2Options,
437 | SplitOptions,
438 | LogSoftmaxOptions,
439 | CastOptions,
440 | DequantizeOptions,
441 | MaximumMinimumOptions,
442 | ArgMaxOptions,
443 | LessOptions,
444 | NegOptions,
445 | PadV2Options,
446 | GreaterOptions,
447 | GreaterEqualOptions,
448 | LessEqualOptions,
449 | SelectOptions,
450 | SliceOptions,
451 | TransposeConvOptions,
452 | SparseToDenseOptions,
453 | TileOptions,
454 | ExpandDimsOptions,
455 | EqualOptions,
456 | NotEqualOptions,
457 | ShapeOptions,
458 | PowOptions,
459 | ArgMinOptions,
460 | FakeQuantOptions,
461 | PackOptions,
462 | LogicalOrOptions,
463 | OneHotOptions,
464 | LogicalAndOptions,
465 | LogicalNotOptions,
466 | UnpackOptions,
467 | FloorDivOptions,
468 | SquareOptions,
469 | ZerosLikeOptions,
470 | FillOptions,
471 | BidirectionalSequenceLSTMOptions,
472 | BidirectionalSequenceRNNOptions,
473 | UnidirectionalSequenceLSTMOptions,
474 | FloorModOptions,
475 | RangeOptions,
476 | ResizeNearestNeighborOptions,
477 | LeakyReluOptions,
478 | SquaredDifferenceOptions,
479 | MirrorPadOptions,
480 | AbsOptions,
481 | SplitVOptions,
482 | UniqueOptions,
483 | ReverseV2Options,
484 | AddNOptions,
485 | GatherNdOptions,
486 | CosOptions,
487 | WhereOptions,
488 | RankOptions,
489 | ReverseSequenceOptions,
490 | MatrixDiagOptions,
491 | QuantizeOptions,
492 | MatrixSetDiagOptions,
493 | HardSwishOptions,
494 | IfOptions,
495 | WhileOptions,
496 | DepthToSpaceOptions,
497 | NonMaxSuppressionV4Options,
498 | NonMaxSuppressionV5Options,
499 | ScatterNdOptions,
500 | SelectV2Options,
501 | DensifyOptions,
502 | SegmentSumOptions,
503 | BatchMatMulOptions,
504 | CumsumOptions,
505 | CallOnceOptions,
506 | BroadcastToOptions,
507 | Rfft2dOptions,
508 | Conv3DOptions,
509 | HashtableOptions,
510 | HashtableFindOptions,
511 | HashtableImportOptions,
512 | HashtableSizeOptions,
513 | VarHandleOptions,
514 | ReadVariableOptions,
515 | AssignVariableOptions,
516 | RandomOptions,
517 | BucketizeOptions,
518 | GeluOptions,
519 | DynamicUpdateSliceOptions,
520 | UnsortedSegmentProdOptions,
521 | UnsortedSegmentMaxOptions,
522 | UnsortedSegmentSumOptions,
523 | ATan2Options
524 | }
525 |
526 | // LINT.IfChange
527 | enum Padding : byte { SAME, VALID }
528 | // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
529 |
530 | // LINT.IfChange
531 | enum ActivationFunctionType : byte {
532 | NONE = 0,
533 | RELU = 1,
534 | RELU_N1_TO_1 = 2,
535 | RELU6 = 3,
536 | TANH = 4,
537 | SIGN_BIT = 5,
538 | }
539 | // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
540 |
541 | table Conv2DOptions {
542 | padding:Padding;
543 | stride_w:int;
544 | stride_h:int;
545 | fused_activation_function:ActivationFunctionType;
546 | dilation_w_factor:int = 1;
547 | dilation_h_factor:int = 1;
548 | }
549 |
550 | // Options for both Conv3D and Conv3DTranspose.
551 | table Conv3DOptions {
552 | padding:Padding;
553 | stride_d:int;
554 | stride_w:int;
555 | stride_h:int;
556 | fused_activation_function:ActivationFunctionType;
557 | dilation_d_factor:int = 1;
558 | dilation_w_factor:int = 1;
559 | dilation_h_factor:int = 1;
560 | }
561 |
562 | table Pool2DOptions {
563 | padding:Padding;
564 | stride_w:int;
565 | stride_h:int;
566 | filter_width:int;
567 | filter_height:int;
568 | fused_activation_function:ActivationFunctionType;
569 | }
570 |
571 | table DepthwiseConv2DOptions {
572 | // Parameters for DepthwiseConv version 1 or above.
573 | padding:Padding;
574 | stride_w:int;
575 | stride_h:int;
576 | // `depth_multiplier` is redundant. It's used by CPU kernels in
577 | // TensorFlow 2.0 or below, but ignored in versions above.
578 | // See comments in lite/c/builtin_op_data.h for more details.
579 | depth_multiplier:int;
580 | fused_activation_function:ActivationFunctionType;
581 | // Parameters for DepthwiseConv version 2 or above.
582 | dilation_w_factor:int = 1;
583 | dilation_h_factor:int = 1;
584 | }
585 |
586 | table ConcatEmbeddingsOptions {
587 | num_channels:int;
588 | num_columns_per_channel:[int];
589 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
590 | }
591 |
592 | enum LSHProjectionType: byte {
593 | UNKNOWN = 0,
594 | SPARSE = 1,
595 | DENSE = 2,
596 | }
597 |
598 | table LSHProjectionOptions {
599 | type: LSHProjectionType;
600 | }
601 |
602 | table SVDFOptions {
603 | rank:int;
604 | fused_activation_function:ActivationFunctionType;
605 | // For weights-only quantization, use asymmetric quantization for non
606 | // constant inputs at evaluation time.
607 | asymmetric_quantize_inputs:bool;
608 | }
609 |
610 | // An implementation of TensorFlow RNNCell.
611 | table RNNOptions {
612 | fused_activation_function:ActivationFunctionType;
613 | asymmetric_quantize_inputs:bool;
614 | }
615 |
616 | // An implementation of TensorFlow dynamic_rnn with RNNCell.
617 | table SequenceRNNOptions {
618 | time_major:bool;
619 | fused_activation_function:ActivationFunctionType;
620 | asymmetric_quantize_inputs:bool;
621 | }
622 |
623 | // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
624 | table BidirectionalSequenceRNNOptions {
625 | time_major:bool;
626 | fused_activation_function:ActivationFunctionType;
627 | merge_outputs: bool;
628 | asymmetric_quantize_inputs:bool;
629 | }
630 |
631 | // LINT.IfChange
632 | enum FullyConnectedOptionsWeightsFormat: byte {
633 | DEFAULT = 0,
634 | SHUFFLED4x16INT8 = 1,
635 | }
636 | // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
637 |
638 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
639 | table FullyConnectedOptions {
640 | // Parameters for FullyConnected version 1 or above.
641 | fused_activation_function:ActivationFunctionType;
642 |
643 | // Parameters for FullyConnected version 2 or above.
644 | weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
645 |
646 | // Parameters for FullyConnected version 5 or above.
647 | // If set to true, then the number of dimension is preserved. Furthermore,
648 | // all but the last dimension of the input and output shapes will be equal.
649 | keep_num_dims: bool;
650 |
651 | // Parameters for FullyConnected version 7 or above.
652 | // If set to true, then weights-only op will use asymmetric quantization for
653 | // inputs.
654 | asymmetric_quantize_inputs: bool;
655 | }
656 |
657 | table SoftmaxOptions {
658 | beta: float;
659 | }
660 |
661 | // An implementation of TensorFlow concat.
662 | table ConcatenationOptions {
663 | axis:int;
664 | fused_activation_function:ActivationFunctionType;
665 | }
666 |
667 | table AddOptions {
668 | fused_activation_function:ActivationFunctionType;
669 | // Parameters supported by version 3.
670 | pot_scale_int16:bool = true;
671 | }
672 |
673 | table MulOptions {
674 | fused_activation_function:ActivationFunctionType;
675 | }
676 |
677 | table L2NormOptions {
678 | // This field is currently ignored in the L2 Norm Op.
679 | fused_activation_function:ActivationFunctionType;
680 | }
681 |
682 | table LocalResponseNormalizationOptions {
683 | radius:int;
684 | bias:float;
685 | alpha:float;
686 | beta:float;
687 | }
688 |
689 | // LINT.IfChange
690 | enum LSTMKernelType : byte {
691 | // Full LSTM kernel which supports peephole and projection.
692 | FULL = 0,
693 | // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
694 | BASIC = 1,
695 | }
696 | // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
697 |
698 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
699 | table LSTMOptions {
700 | // Parameters for LSTM version 1 or above.
701 | fused_activation_function:ActivationFunctionType;
702 | cell_clip: float; // Optional, 0.0 means no clipping
703 | proj_clip: float; // Optional, 0.0 means no clipping
704 |
705 | // Parameters for LSTM version 2 or above.
706 | // Basic kernel is only supported in version 2 or above.
707 | kernel_type: LSTMKernelType = FULL;
708 |
709 | // Parameters for LSTM version 4 or above.
710 | asymmetric_quantize_inputs: bool;
711 | }
712 |
713 | // An implementation of TensorFlow dynamic_rnn with LSTMCell.
714 | table UnidirectionalSequenceLSTMOptions {
715 | fused_activation_function:ActivationFunctionType;
716 | cell_clip: float; // Optional, 0.0 means no clipping
717 | proj_clip: float; // Optional, 0.0 means no clipping
718 |
719 | // If true then first dimension is sequence, otherwise batch.
720 | time_major:bool;
721 |
722 | // Parameter for Unidirectional Sequence LSTM version 4.
723 | asymmetric_quantize_inputs:bool;
724 | }
725 |
726 | table BidirectionalSequenceLSTMOptions {
727 | // Parameters supported by version 1:
728 | fused_activation_function:ActivationFunctionType;
729 | cell_clip: float; // Optional, 0.0 means no clipping
730 | proj_clip: float; // Optional, 0.0 means no clipping
731 |
732 | // If true, store the outputs of both directions into the first output.
733 | merge_outputs: bool;
734 |
735 | // Parameters supported by version 2:
736 | // If true then first dimension is sequence, otherwise batch.
737 | // Version 1 implementations assumed time_major to be true, so this default
738 | // value should never change.
739 | time_major: bool = true;
740 |
741 | // Parameters for version 3 or above.
742 | asymmetric_quantize_inputs:bool;
743 | }
744 |
745 | table ResizeBilinearOptions {
746 | new_height: int (deprecated);
747 | new_width: int (deprecated);
748 | align_corners: bool;
749 | half_pixel_centers: bool;
750 | }
751 |
752 | table ResizeNearestNeighborOptions {
753 | align_corners: bool;
754 | half_pixel_centers: bool;
755 | }
756 |
757 | // A call operation options
758 | table CallOptions {
759 | // The subgraph index that needs to be called.
760 | subgraph:uint;
761 | }
762 |
763 | table PadOptions {
764 | }
765 |
766 | table PadV2Options {
767 | }
768 |
769 | table ReshapeOptions {
770 | new_shape:[int];
771 | }
772 |
773 | table SpaceToBatchNDOptions {
774 | }
775 |
776 | table BatchToSpaceNDOptions {
777 | }
778 |
779 | table SkipGramOptions {
780 | ngram_size: int;
781 | max_skip_size: int;
782 | include_all_ngrams: bool;
783 | }
784 |
785 | table SpaceToDepthOptions {
786 | block_size: int;
787 | }
788 |
789 | table DepthToSpaceOptions {
790 | block_size: int;
791 | }
792 |
793 | table SubOptions {
794 | fused_activation_function:ActivationFunctionType;
795 | // Parameters supported by version 5
796 | pot_scale_int16:bool = true;
797 | }
798 |
799 | table DivOptions {
800 | fused_activation_function:ActivationFunctionType;
801 | }
802 |
803 | table TopKV2Options {
804 | }
805 |
806 | enum CombinerType : byte {
807 | SUM = 0,
808 | MEAN = 1,
809 | SQRTN = 2,
810 | }
811 |
812 | table EmbeddingLookupSparseOptions {
813 | combiner:CombinerType;
814 | }
815 |
816 | table GatherOptions {
817 | axis: int;
818 | // Parameters for Gather version 5 or above.
819 | batch_dims: int = 0;
820 | }
821 |
822 | table TransposeOptions {
823 | }
824 |
825 | table ExpOptions {
826 | }
827 |
828 | table CosOptions {
829 | }
830 |
831 | table ReducerOptions {
832 | keep_dims: bool;
833 | }
834 |
835 | table SqueezeOptions {
836 | squeeze_dims:[int];
837 | }
838 |
839 | table SplitOptions {
840 | num_splits: int;
841 | }
842 |
843 | table SplitVOptions {
844 | num_splits: int;
845 | }
846 |
847 | table StridedSliceOptions {
848 | begin_mask: int;
849 | end_mask: int;
850 | ellipsis_mask: int;
851 | new_axis_mask: int;
852 | shrink_axis_mask: int;
853 | }
854 |
855 | table LogSoftmaxOptions {
856 | }
857 |
858 | table CastOptions {
859 | in_data_type: TensorType;
860 | out_data_type: TensorType;
861 | }
862 |
863 | table DequantizeOptions {
864 | }
865 |
866 | table MaximumMinimumOptions {
867 | }
868 |
869 | table TileOptions {
870 | }
871 |
872 | table ArgMaxOptions {
873 | output_type : TensorType;
874 | }
875 |
876 | table ArgMinOptions {
877 | output_type : TensorType;
878 | }
879 |
880 | table GreaterOptions {
881 | }
882 |
883 | table GreaterEqualOptions {
884 | }
885 |
886 | table LessOptions {
887 | }
888 |
889 | table LessEqualOptions {
890 | }
891 |
892 | table NegOptions {
893 | }
894 |
895 | table SelectOptions {
896 | }
897 |
898 | table SliceOptions {
899 | }
900 |
901 | table TransposeConvOptions {
902 | padding:Padding;
903 | stride_w:int;
904 | stride_h:int;
905 | }
906 |
907 | table ExpandDimsOptions {
908 | }
909 |
910 | table SparseToDenseOptions {
911 | validate_indices:bool;
912 | }
913 |
914 | table EqualOptions {
915 | }
916 |
917 | table NotEqualOptions {
918 | }
919 |
920 | table ShapeOptions {
921 | // Optional output type of the operation (int32 or int64). Defaults to int32.
922 | out_type : TensorType;
923 | }
924 |
925 | table RankOptions {
926 | }
927 |
928 | table PowOptions {
929 | }
930 |
931 | table FakeQuantOptions {
932 | // Parameters supported by version 1:
933 | min:float;
934 | max:float;
935 | num_bits:int;
936 |
937 | // Parameters supported by version 2:
938 | narrow_range:bool;
939 | }
940 |
941 | table PackOptions {
942 | values_count:int;
943 | axis:int;
944 | }
945 |
946 | table LogicalOrOptions {
947 | }
948 |
949 | table OneHotOptions {
950 | axis:int;
951 | }
952 |
953 | table AbsOptions {
954 | }
955 |
956 |
957 | table HardSwishOptions {
958 | }
959 |
960 | table LogicalAndOptions {
961 | }
962 |
963 | table LogicalNotOptions {
964 | }
965 |
966 | table UnpackOptions {
967 | num:int;
968 | axis:int;
969 | }
970 |
971 | table FloorDivOptions {
972 | }
973 |
974 | table SquareOptions {
975 | }
976 |
977 | table ZerosLikeOptions {
978 | }
979 |
980 | table FillOptions {
981 | }
982 |
983 | table FloorModOptions {
984 | }
985 |
986 | table RangeOptions {
987 | }
988 |
989 | table LeakyReluOptions {
990 | alpha:float;
991 | }
992 |
993 | table SquaredDifferenceOptions {
994 | }
995 |
996 | // LINT.IfChange
997 | enum MirrorPadMode : byte {
998 | // Doesn't include borders.
999 | REFLECT = 0,
1000 | // Includes borders.
1001 | SYMMETRIC = 1,
1002 | }
1003 | // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
1004 |
1005 | table MirrorPadOptions {
1006 | mode:MirrorPadMode;
1007 | }
1008 |
1009 | table UniqueOptions {
1010 | idx_out_type:TensorType = INT32;
1011 | }
1012 |
1013 | table ReverseV2Options {
1014 | }
1015 |
1016 | table AddNOptions {
1017 | }
1018 |
1019 | table GatherNdOptions {
1020 | }
1021 |
1022 | table WhereOptions {
1023 | }
1024 |
1025 | table ReverseSequenceOptions {
1026 | seq_dim:int;
1027 | batch_dim:int = 0;
1028 | }
1029 |
1030 | table MatrixDiagOptions {
1031 | }
1032 |
1033 | table QuantizeOptions {
1034 | }
1035 |
1036 | table MatrixSetDiagOptions {
1037 | }
1038 |
1039 | table IfOptions {
1040 | then_subgraph_index:int;
1041 | else_subgraph_index:int;
1042 | }
1043 |
1044 | table CallOnceOptions {
1045 | init_subgraph_index:int;
1046 | }
1047 |
1048 | table WhileOptions {
1049 | cond_subgraph_index:int;
1050 | body_subgraph_index:int;
1051 | }
1052 |
1053 | table NonMaxSuppressionV4Options {
1054 | }
1055 |
1056 | table NonMaxSuppressionV5Options {
1057 | }
1058 |
1059 | table ScatterNdOptions {
1060 | }
1061 |
1062 | table SelectV2Options {
1063 | }
1064 |
1065 | table DensifyOptions {
1066 | }
1067 |
1068 | table SegmentSumOptions {
1069 | }
1070 |
1071 | table BatchMatMulOptions {
1072 | adj_x:bool;
1073 | adj_y:bool;
1074 | // Parameters for BatchMatMul version 4 or above.
1075 | // If set to true, then weights-only op will use asymmetric quantization for
1076 | // inputs.
1077 | asymmetric_quantize_inputs: bool;
1078 | }
1079 |
1080 | table CumsumOptions {
1081 | exclusive:bool;
1082 | reverse:bool;
1083 | }
1084 |
1085 | table BroadcastToOptions {
1086 | }
1087 |
1088 | table Rfft2dOptions {
1089 | }
1090 |
1091 | table HashtableOptions {
1092 | // The identity of hash tables. This identity will be used across different
1093 | // subgraphs in the same interpreter instance.
1094 | table_id:int;
1095 | key_dtype:TensorType;
1096 | value_dtype:TensorType;
1097 | }
1098 |
1099 | table HashtableFindOptions {
1100 | }
1101 |
1102 | table HashtableImportOptions {
1103 | }
1104 |
1105 | table HashtableSizeOptions {
1106 | }
1107 |
1108 | table VarHandleOptions {
1109 | container:string;
1110 | shared_name:string;
1111 | }
1112 |
1113 | table ReadVariableOptions {
1114 | }
1115 |
1116 | table AssignVariableOptions {
1117 | }
1118 |
1119 | table RandomOptions {
1120 | seed: long;
1121 | seed2: long;
1122 | }
1123 |
1124 | table BucketizeOptions {
1125 | boundaries: [float]; // The bucket boundaries.
1126 | }
1127 |
1128 | table GeluOptions {
1129 | approximate: bool;
1130 | }
1131 |
1132 | table DynamicUpdateSliceOptions {
1133 | }
1134 |
1135 | table UnsortedSegmentProdOptions {
1136 | }
1137 |
1138 | table UnsortedSegmentMaxOptions {
1139 | }
1140 |
1141 | table UnsortedSegmentSumOptions {
1142 | }
1143 |
1144 | table ATan2Options {
1145 | }
1146 |
1147 |
1148 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1149 | // builtin, or a string if the operator is custom.
1150 | table OperatorCode {
1151 | // This field is for backward compatibility. This field will be used when
1152 | // the value of the extended builtin_code field has less than
1153 | // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1154 | deprecated_builtin_code:byte;
1155 | custom_code:string;
1156 |
1157 | // The version of the operator. The version need to be bumped whenever new
1158 | // parameters are introduced into an op.
1159 | version:int = 1;
1160 |
1161 | // This field is introduced for resolving op builtin code shortage problem
1162 | // (the original BuiltinOperator enum field was represented as a byte).
1163 | // This field will be used when the value of the extended builtin_code field
1164 | // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1165 | builtin_code:BuiltinOperator;
1166 | }
1167 |
1168 | enum CustomOptionsFormat : byte {
1169 | FLEXBUFFERS = 0,
1170 | }
1171 |
1172 | // An operator takes tensors as inputs and outputs. The type of operation being
1173 | // performed is determined by an index into the list of valid OperatorCodes,
1174 | // while the specifics of each operations is configured using builtin_options
1175 | // or custom_options.
1176 | table Operator {
1177 | // Index into the operator_codes array. Using an integer here avoids
1178 | // complicate map lookups.
1179 | opcode_index:uint;
1180 |
1181 | // Optional input are indicated by -1.
1182 | inputs:[int];
1183 | outputs:[int];
1184 |
1185 | builtin_options:BuiltinOptions;
1186 | custom_options:[ubyte];
1187 | custom_options_format:CustomOptionsFormat;
1188 |
1189 | // A list of booleans indicating the input tensors which are being mutated by
1190 | // this operator.(e.g. used by RNN and LSTM).
1191 | // For example, if the "inputs" array refers to 5 tensors and the second and
1192 | // fifth are mutable variables, then this list will contain
1193 | // [false, true, false, false, true].
1194 | //
1195 | // If the list is empty, no variable is mutated in this operator.
1196 | // The list either has the same length as `inputs`, or is empty.
1197 | mutating_variable_inputs:[bool];
1198 |
1199 | // A list of indices to the subgraph's "tensors" that are internal to an Op.
1200 | // Internal tensors are those that do not flow in or out of the operation,
1201 | // but instead are part of internal computation. As such, the operation's
1202 | // implementation may manage its memory more efficiently. They are needed
1203 | // however (i.e. not just an implementation detail) since they are part of the
1204 | // computation, which may require relevant metadata such as quantization
1205 | // parameters.
1206 | intermediates:[int];
1207 | }
1208 |
1209 | // The root type, defining a subgraph, which typically represents an entire
1210 | // model.
1211 | table SubGraph {
1212 | // A list of all tensors used in this subgraph.
1213 | tensors:[Tensor];
1214 |
1215 | // Indices of the tensors that are inputs into this subgraph. Note this is
1216 | // the list of non-static tensors that feed into the subgraph for inference.
1217 | inputs:[int];
1218 |
1219 | // Indices of the tensors that are outputs out of this subgraph. Note this is
1220 | // the list of output tensors that are considered the product of the
1221 | // subgraph's inference.
1222 | outputs:[int];
1223 |
1224 | // All operators, in execution order.
1225 | operators:[Operator];
1226 |
1227 | // Name of this subgraph (used for debugging).
1228 | name:string;
1229 | }
1230 |
1231 | // Table of raw data buffers (used for constant tensors). Referenced by tensors
1232 | // by index. The generous alignment accommodates mmap-friendly data structures.
1233 | table Buffer {
1234 | data:[ubyte] (force_align: 16);
1235 | }
1236 |
1237 | table Metadata {
1238 | // A human readable string to uniquely identify a Metadata.
1239 | name:string;
1240 | // An index to the buffers table.
1241 | buffer:uint;
1242 | }
1243 |
1244 | // Map from an alias name of tensor to tensor index in the graph.
1245 | // This is used in Signature def.
1246 | table TensorMap {
1247 | // Represents the alias to use for this tensor.
1248 | name:string;
1249 |
1250 | // The actual tensor index in the primary graph, that 'name' corresponds to.
1251 | tensor_index:uint;
1252 | }
1253 |
1254 | // This corresponds to SignatureDef in Tensorflow SavedModel.
1255 | // The SignatureDef will be part of the SavedModel provided for conversion.
1256 | table SignatureDef {
1257 | // Named inputs for this signature.
1258 | inputs:[TensorMap];
1259 |
1260 | // Named outputs for this signature.
1261 | outputs:[TensorMap];
1262 |
1263 | // Key value which was in the Tensorflow SavedModel SignatureDef map.
1264 | signature_key:string;
1265 |
1266 | // Model tag, deprecated.
1267 | deprecated_tag:string (deprecated);
1268 |
1269 | // Index of subgraphs that corresponds to the exported method.
1270 | subgraph_index:uint;
1271 | }
1272 |
1273 | table Model {
1274 | // Version of the schema.
1275 | version:uint;
1276 |
1277 | // A list of all operator codes used in this model. This is
1278 | // kept in order because operators carry an index into this
1279 | // vector.
1280 | operator_codes:[OperatorCode];
1281 |
1282 | // All the subgraphs of the model. The 0th is assumed to be the main
1283 | // model.
1284 | subgraphs:[SubGraph];
1285 |
1286 | // A description of the model.
1287 | description:string;
1288 |
1289 | // Buffers of the model.
1290 | // Note the 0th entry of this array must be an empty buffer (sentinel).
1291 | // This is a convention so that tensors without a buffer can provide 0 as
1292 | // their buffer.
1293 | buffers:[Buffer];
1294 |
1295 | // Metadata about the model. Indirects into the existings buffers list.
1296 | // Deprecated, prefer to use metadata field.
1297 | metadata_buffer:[int];
1298 |
1299 | // Metadata about the model.
1300 | metadata:[Metadata];
1301 |
1302 | // Optional SignatureDefs for the model.
1303 | signature_defs:[SignatureDef];
1304 | }
1305 |
1306 | root_type Model;
1307 |
--------------------------------------------------------------------------------
/schema/schema_v0.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | namespace tflite;
16 |
17 | // The type of data stored in a tensor.
18 | enum TensorType : byte {
19 | FLOAT32 = 0,
20 | FLOAT16 = 1,
21 | INT32 = 2,
22 | UINT8 = 3,
23 | INT64 = 4,
24 | }
25 |
26 | // Parameters for converting a quantized tensor back to float. Given a
27 | // quantized value q, the corresponding float value f should be:
28 | // f = scale * (q - zero_point)
29 | table QuantizationParameters {
30 | min:[float]; // For importing back into tensorflow.
31 | max:[float]; // For importing back into tensorflow.
32 | scale:[float];
33 | zero_point:[long];
34 | }
35 |
36 | table Tensor {
37 | // The tensor shape. The meaning of each entry is operator-specific but
38 | // builtin ops use: [batch size, height, width, number of channels] (That's
39 | // Tensorflow's NHWC).
40 | shape:[int];
41 | type:TensorType;
42 | // The data_buffer is an opaque container, with the assumption that the
43 | // target device is little-endian. In addition, all builtin operators assume
44 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
45 | // [i, j, k] maps to data_buffer[i*4*3 + j*3 + k].
46 | data_buffer:[ubyte];
47 | name:string; // For debugging and importing back into tensorflow.
48 | quantization:QuantizationParameters; // Optional.
49 | }
50 |
51 | // A list of builtin operators. Builtin operators are slightly faster than custom
52 | // ones, but not by much. Moreover, while custom operators accept an opaque
53 | // object containing configuration parameters, builtins have a predetermined
54 | // set of acceptable options.
55 | enum BuiltinOperator : byte {
56 | CUSTOM = 0,
57 | CONVOLUTION = 1,
58 | DEPTHWISE_CONVOLUTION = 2,
59 | CONCAT_EMBEDDINGS = 3,
60 | LSH_PROJECTION = 4,
61 | TANH = 5,
62 | RELU = 6,
63 | AVERAGE_POOL = 7,
64 | MAX_POOL = 8,
65 | L2_POOL = 9,
66 | SIGMOID = 10,
67 | SVDF = 11,
68 | BasicRNN = 12,
69 | RELU6 = 13,
70 | EMBEDDING_LOOKUP = 14,
71 | FULLY_CONNECTED = 15,
72 | HASHTABLE_LOOKUP = 16,
73 | SOFTMAX = 17,
74 | CONCATENATION = 18,
75 | LSTM = 19,
76 | ADD = 20,
77 | L2NORM = 21,
78 | LOCAL_RESPONSE_NORM = 22,
79 | RESIZE_BILINEAR = 23,
80 | }
81 |
82 | // Options for the builtin operators.
83 | union BuiltinOptions {
84 | ConvolutionOptions,
85 | DepthwiseConvolutionOptions,
86 | ConcatEmbeddingsOptions,
87 | LSHProjectionOptions,
88 | PoolOptions,
89 | SVDFOptions,
90 | BasicRNNOptions,
91 | FullyConnectedOptions,
92 | SoftmaxOptions,
93 | ConcatenationOptions,
94 | AddOptions,
95 | L2NormOptions,
96 | LocalResponseNormOptions,
97 | LSTMOptions,
98 | ResizeBilinearOptions,
99 | }
100 |
101 | enum Padding : byte { SAME, VALID }
102 |
103 | enum ActivationFunctionType : byte {
104 | NONE = 0,
105 | RELU = 1,
106 | RELU1 = 2,
107 | RELU6 = 3,
108 | TANH = 4,
109 | SIGN_BIT = 5,
110 | }
111 |
112 | table ConvolutionOptions {
113 | padding:Padding;
114 | stride_w:int;
115 | stride_h:int;
116 | fused_activation_function:ActivationFunctionType;
117 | }
118 |
119 | table PoolOptions {
120 | padding:Padding;
121 | stride_w:int;
122 | stride_h:int;
123 | filter_width:int;
124 | filter_height:int;
125 | fused_activation_function:ActivationFunctionType;
126 | }
127 |
128 | table DepthwiseConvolutionOptions {
129 | padding:Padding;
130 | stride_w:int;
131 | stride_h:int;
132 | depth_multiplier:int;
133 | fused_activation_function:ActivationFunctionType;
134 | }
135 |
136 | table ConcatEmbeddingsOptions {
137 | num_channels:int;
138 | num_columns_per_channel:[int];
139 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
140 | }
141 |
142 | enum LSHProjectionType: byte {
143 | UNKNOWN = 0,
144 | SPARSE = 1,
145 | DENSE = 2,
146 | }
147 |
148 | table LSHProjectionOptions {
149 | type: LSHProjectionType;
150 | }
151 |
152 | table SVDFOptions {
153 | rank:int;
154 | fused_activation_function:ActivationFunctionType;
155 | }
156 |
157 | // An implementation of TensorFlow BasicRNNCell.
158 | table BasicRNNOptions {
159 | fused_activation_function:ActivationFunctionType;
160 | }
161 |
162 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
163 | table FullyConnectedOptions {
164 | fused_activation_function:ActivationFunctionType;
165 | }
166 |
167 | table SoftmaxOptions {
168 | beta: float;
169 | }
170 |
171 | // An implementation of TensorFlow concat.
172 | table ConcatenationOptions {
173 | axis:int;
174 | fused_activation_function:ActivationFunctionType;
175 | }
176 |
177 | table AddOptions {
178 | fused_activation_function:ActivationFunctionType;
179 | }
180 |
181 | table L2NormOptions {
182 | fused_activation_function:ActivationFunctionType;
183 | }
184 |
185 | table LocalResponseNormOptions {
186 | radius:int;
187 | bias:float;
188 | alpha:float;
189 | beta:float;
190 | }
191 |
192 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
193 | table LSTMOptions {
194 | fused_activation_function:ActivationFunctionType;
195 | cell_clip: float; // Optional, 0.0 means no clipping
196 | proj_clip: float; // Optional, 0.0 means no clipping
197 | }
198 |
199 | table ResizeBilinearOptions {
200 | new_height:int;
201 | new_width:int;
202 | }
203 |
204 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
205 | // builtin, or a string if the operator is custom.
206 | table OperatorCode {
207 | builtin_code:BuiltinOperator;
208 | custom_code:string;
209 | }
210 |
211 | // An operator takes tensors as inputs and outputs. The type of operation being
212 | // performed is determined by an index into the list of valid OperatorCodes,
213 | // while the specifics of each operations is configured using builtin_options
214 | // or custom_options.
215 | table Operator {
216 | // Index into the operator_codes array. Using an integer here avoids
217 | // complicate map lookups.
218 | opcode_index:int;
219 |
220 | inputs:[int];
221 | outputs:[int];
222 |
223 | builtin_options:BuiltinOptions;
224 | custom_options:[ubyte];
225 | }
226 |
227 | // The root type, defining a model.
228 | table Model {
229 | // A list of all tensors used in this model.
230 | tensors:[Tensor];
231 |
232 | // Indices of the input tensors.
233 | inputs:[int];
234 |
235 | // Indices of the output tensors.
236 | outputs:[int];
237 |
238 | // A list of all operator codes used in this model. This is
239 | // kept in order because operators carry an index into this
240 | // vector.
241 | operator_codes:[OperatorCode];
242 |
243 | // All operators, in execution order.
244 | operators:[Operator];
245 | }
246 |
247 | root_type Model;
248 |
--------------------------------------------------------------------------------
/schema/schema_v1.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Revision History
16 | // Version 0: Initial version.
17 | // Version 1: Add subgraphs to schema.
18 |
19 | namespace tflite;
20 |
21 | // The type of data stored in a tensor.
22 | enum TensorType : byte {
23 | FLOAT32 = 0,
24 | FLOAT16 = 1,
25 | INT32 = 2,
26 | UINT8 = 3,
27 | INT64 = 4,
28 | STRING = 5,
29 | }
30 |
31 | // Parameters for converting a quantized tensor back to float. Given a
32 | // quantized value q, the corresponding float value f should be:
33 | // f = scale * (q - zero_point)
34 | table QuantizationParameters {
35 | min:[float]; // For importing back into tensorflow.
36 | max:[float]; // For importing back into tensorflow.
37 | scale:[float];
38 | zero_point:[long];
39 | }
40 |
41 | table Tensor {
42 | // The tensor shape. The meaning of each entry is operator-specific but
43 | // builtin ops use: [batch size, height, width, number of channels] (That's
44 | // Tensorflow's NHWC).
45 | shape:[int];
46 | type:TensorType;
47 | // The data_buffer is an opaque container, with the assumption that the
48 | // target device is little-endian. In addition, all builtin operators assume
49 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
50 | // [i, j, k] maps to data_buffer[i*3*2 + j*3 + k].
51 | data_buffer:[ubyte];
52 | name:string; // For debugging and importing back into tensorflow.
53 | quantization:QuantizationParameters; // Optional.
54 | }
55 |
56 | // A list of builtin operators. Builtin operators are slightly faster than custom
57 | // ones, but not by much. Moreover, while custom operators accept an opaque
58 | // object containing configuration parameters, builtins have a predetermined
59 | // set of acceptable options.
60 | enum BuiltinOperator : byte {
61 | CUSTOM = 0,
62 | CONVOLUTION = 1,
63 | DEPTHWISE_CONVOLUTION = 2,
64 | CONCAT_EMBEDDINGS = 3,
65 | LSH_PROJECTION = 4,
66 | TANH = 5,
67 | RELU = 6,
68 | AVERAGE_POOL = 7,
69 | MAX_POOL = 8,
70 | L2_POOL = 9,
71 | SIGMOID = 10,
72 | SVDF = 11,
73 | BasicRNN = 12,
74 | RELU6 = 13,
75 | EMBEDDING_LOOKUP = 14,
76 | FULLY_CONNECTED = 15,
77 | HASHTABLE_LOOKUP = 16,
78 | SOFTMAX = 17,
79 | CONCATENATION = 18,
80 | LSTM = 19,
81 | ADD = 20,
82 | L2NORM = 21,
83 | LOCAL_RESPONSE_NORM = 22,
84 | RESIZE_BILINEAR = 23,
85 | CALL = 24,
86 | RESHAPE = 25,
87 | SKIP_GRAM = 26,
88 | SPACE_TO_DEPTH = 27,
89 | }
90 |
91 | // Options for the builtin operators.
92 | union BuiltinOptions {
93 | ConvolutionOptions,
94 | DepthwiseConvolutionOptions,
95 | ConcatEmbeddingsOptions,
96 | LSHProjectionOptions,
97 | PoolOptions,
98 | SVDFOptions,
99 | BasicRNNOptions,
100 | FullyConnectedOptions,
101 | SoftmaxOptions,
102 | ConcatenationOptions,
103 | AddOptions,
104 | L2NormOptions,
105 | LocalResponseNormOptions,
106 | LSTMOptions,
107 | ResizeBilinearOptions,
108 | CallOptions,
109 | ReshapeOptions,
110 | SkipGramOptions,
111 | SpaceToDepthOptions,
112 | }
113 |
114 | enum Padding : byte { SAME, VALID }
115 |
116 | enum ActivationFunctionType : byte {
117 | NONE = 0,
118 | RELU = 1,
119 | RELU1 = 2,
120 | RELU6 = 3,
121 | TANH = 4,
122 | SIGN_BIT = 5,
123 | }
124 |
125 | table ConvolutionOptions {
126 | padding:Padding;
127 | stride_w:int;
128 | stride_h:int;
129 | fused_activation_function:ActivationFunctionType;
130 | }
131 |
132 | table PoolOptions {
133 | padding:Padding;
134 | stride_w:int;
135 | stride_h:int;
136 | filter_width:int;
137 | filter_height:int;
138 | fused_activation_function:ActivationFunctionType;
139 | }
140 |
141 | table DepthwiseConvolutionOptions {
142 | padding:Padding;
143 | stride_w:int;
144 | stride_h:int;
145 | depth_multiplier:int;
146 | fused_activation_function:ActivationFunctionType;
147 | }
148 |
149 | table ConcatEmbeddingsOptions {
150 | num_channels:int;
151 | num_columns_per_channel:[int];
152 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
153 | }
154 |
155 | enum LSHProjectionType: byte {
156 | UNKNOWN = 0,
157 | SPARSE = 1,
158 | DENSE = 2,
159 | }
160 |
161 | table LSHProjectionOptions {
162 | type: LSHProjectionType;
163 | }
164 |
165 | table SVDFOptions {
166 | rank:int;
167 | fused_activation_function:ActivationFunctionType;
168 | }
169 |
170 | // An implementation of TensorFlow BasicRNNCell.
171 | table BasicRNNOptions {
172 | fused_activation_function:ActivationFunctionType;
173 | }
174 |
175 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
176 | table FullyConnectedOptions {
177 | fused_activation_function:ActivationFunctionType;
178 | }
179 |
180 | table SoftmaxOptions {
181 | beta: float;
182 | }
183 |
184 | // An implementation of TensorFlow concat.
185 | table ConcatenationOptions {
186 | axis:int;
187 | fused_activation_function:ActivationFunctionType;
188 | }
189 |
190 | table AddOptions {
191 | fused_activation_function:ActivationFunctionType;
192 | }
193 |
194 | table L2NormOptions {
195 | fused_activation_function:ActivationFunctionType;
196 | }
197 |
198 | table LocalResponseNormOptions {
199 | radius:int;
200 | bias:float;
201 | alpha:float;
202 | beta:float;
203 | }
204 |
205 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
206 | table LSTMOptions {
207 | fused_activation_function:ActivationFunctionType;
208 | cell_clip: float; // Optional, 0.0 means no clipping
209 | proj_clip: float; // Optional, 0.0 means no clipping
210 | }
211 |
212 | table ResizeBilinearOptions {
213 | new_height:int;
214 | new_width:int;
215 | }
216 |
217 | // A call operation options
218 | table CallOptions {
219 | // The subgraph index that needs to be called.
220 | subgraph:int;
221 | }
222 |
223 | table ReshapeOptions {
224 | new_shape:[int];
225 | }
226 |
227 | table SkipGramOptions {
228 | ngram_size: int;
229 | max_skip_size: int;
230 | include_all_ngrams: bool;
231 | }
232 |
233 | table SpaceToDepthOptions {
234 | block_size: int;
235 | }
236 |
237 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
238 | // builtin, or a string if the operator is custom.
239 | table OperatorCode {
240 | builtin_code:BuiltinOperator;
241 | custom_code:string;
242 | }
243 |
244 | // An operator takes tensors as inputs and outputs. The type of operation being
245 | // performed is determined by an index into the list of valid OperatorCodes,
246 | // while the specifics of each operations is configured using builtin_options
247 | // or custom_options.
248 | table Operator {
249 | // Index into the operator_codes array. Using an integer here avoids
250 | // complicate map lookups.
251 | opcode_index:int;
252 |
253 | inputs:[int];
254 | outputs:[int];
255 |
256 | builtin_options:BuiltinOptions;
257 | custom_options:[ubyte];
258 | }
259 |
260 | // The root type, defining a model.
261 | table SubGraph {
262 | // A list of all tensors used in this model.
263 | tensors:[Tensor];
264 |
265 | // Indices of the input tensors.
266 | inputs:[int];
267 |
268 | // Indices of the output tensors.
269 | outputs:[int];
270 |
271 | // All operators, in execution order.
272 | operators:[Operator];
273 |
274 | // Name of subgraph (used for debugging).
275 | name:string;
276 | }
277 |
278 | table Model {
279 | // Version of the schema.
280 | version:int;
281 |
282 | // A list of all operator codes used in this model. This is
283 | // kept in order because operators carry an index into this
284 | // vector.
285 | operator_codes:[OperatorCode];
286 |
287 | // All the subgraphs of the model. The 0th is assumed to be the main
288 | // model.
289 | subgraphs:[SubGraph];
290 |
291 | // A description of the model.
292 | description:string;
293 | }
294 |
295 | root_type Model;
296 |
--------------------------------------------------------------------------------
/schema/schema_v2.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Revision History
16 | // Version 0: Initial version.
17 | // Version 1: Add subgraphs to schema.
18 | // Version 2: Rename operators to conform to NN API.
19 |
20 | namespace tflite;
21 |
22 | // The type of data stored in a tensor.
23 | enum TensorType : byte {
24 | FLOAT32 = 0,
25 | FLOAT16 = 1,
26 | INT32 = 2,
27 | UINT8 = 3,
28 | INT64 = 4,
29 | STRING = 5,
30 | }
31 |
32 | // Parameters for converting a quantized tensor back to float. Given a
33 | // quantized value q, the corresponding float value f should be:
34 | // f = scale * (q - zero_point)
35 | table QuantizationParameters {
36 | min:[float]; // For importing back into tensorflow.
37 | max:[float]; // For importing back into tensorflow.
38 | scale:[float];
39 | zero_point:[long];
40 | }
41 |
42 | table Tensor {
43 | // The tensor shape. The meaning of each entry is operator-specific but
44 | // builtin ops use: [batch size, height, width, number of channels] (That's
45 | // Tensorflow's NHWC).
46 | shape:[int];
47 | type:TensorType;
48 | // The data_buffer is an opaque container, with the assumption that the
49 | // target device is little-endian. In addition, all builtin operators assume
50 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
51 | // [i, j, k] maps to data_buffer[i*3*2 + j*3 + k].
52 | data_buffer:[ubyte];
53 | name:string; // For debugging and importing back into tensorflow.
54 | quantization:QuantizationParameters; // Optional.
55 | }
56 |
57 | // A list of builtin operators. Builtin operators are slightly faster than custom
58 | // ones, but not by much. Moreover, while custom operators accept an opaque
59 | // object containing configuration parameters, builtins have a predetermined
60 | // set of acceptable options.
61 | enum BuiltinOperator : byte {
62 | ADD = 0,
63 | AVERAGE_POOL_2D = 1,
64 | CONCATENATION = 2,
65 | CONV_2D = 3,
66 | DEPTHWISE_CONV_2D = 4,
67 | // DEPTH_TO_SPACE = 5,
68 | // DEQUANTIZE = 6,
69 | EMBEDDING_LOOKUP = 7,
70 | // FLOOR = 8,
71 | FULLY_CONNECTED = 9,
72 | HASHTABLE_LOOKUP = 10,
73 | L2_NORMALIZATION = 11,
74 | L2_POOL_2D = 12,
75 | LOCAL_RESPONSE_NORMALIZATION = 13,
76 | LOGISTIC = 14,
77 | LSH_PROJECTION = 15,
78 | LSTM = 16,
79 | MAX_POOL_2D = 17,
80 | // MUL = 18,
81 | RELU = 19,
82 | // RELU1=20,
83 | RELU6 = 21,
84 | RESHAPE = 22,
85 | RESIZE_BILINEAR = 23,
86 | RNN = 24,
87 | SOFTMAX = 25,
88 | SPACE_TO_DEPTH = 26,
89 | SVDF = 27,
90 | TANH = 28,
91 | // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
92 | CONCAT_EMBEDDINGS = 29,
93 | SKIP_GRAM = 30,
94 | CALL = 31,
95 | CUSTOM = 32,
96 |
97 | }
98 |
99 | // Options for the builtin operators.
100 | union BuiltinOptions {
101 | Conv2DOptions,
102 | DepthwiseConv2DOptions,
103 | ConcatEmbeddingsOptions,
104 | LSHProjectionOptions,
105 | Pool2DOptions,
106 | SVDFOptions,
107 | RNNOptions,
108 | FullyConnectedOptions,
109 | SoftmaxOptions,
110 | ConcatenationOptions,
111 | AddOptions,
112 | L2NormOptions,
113 | LocalResponseNormalizationOptions,
114 | LSTMOptions,
115 | ResizeBilinearOptions,
116 | CallOptions,
117 | ReshapeOptions,
118 | SkipGramOptions,
119 | SpaceToDepthOptions,
120 | }
121 |
122 | enum Padding : byte { SAME, VALID }
123 |
124 | enum ActivationFunctionType : byte {
125 | NONE = 0,
126 | RELU = 1,
127 | RELU1 = 2,
128 | RELU6 = 3,
129 | TANH = 4,
130 | SIGN_BIT = 5,
131 | }
132 |
133 | table Conv2DOptions {
134 | padding:Padding;
135 | stride_w:int;
136 | stride_h:int;
137 | fused_activation_function:ActivationFunctionType;
138 | }
139 |
140 | table Pool2DOptions {
141 | padding:Padding;
142 | stride_w:int;
143 | stride_h:int;
144 | filter_width:int;
145 | filter_height:int;
146 | fused_activation_function:ActivationFunctionType;
147 | }
148 |
149 | table DepthwiseConv2DOptions {
150 | padding:Padding;
151 | stride_w:int;
152 | stride_h:int;
153 | depth_multiplier:int;
154 | fused_activation_function:ActivationFunctionType;
155 | }
156 |
157 | table ConcatEmbeddingsOptions {
158 | num_channels:int;
159 | num_columns_per_channel:[int];
160 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
161 | }
162 |
163 | enum LSHProjectionType: byte {
164 | UNKNOWN = 0,
165 | SPARSE = 1,
166 | DENSE = 2,
167 | }
168 |
169 | table LSHProjectionOptions {
170 | type: LSHProjectionType;
171 | }
172 |
173 | table SVDFOptions {
174 | rank:int;
175 | fused_activation_function:ActivationFunctionType;
176 | }
177 |
178 | // An implementation of TensorFlow RNNCell.
179 | table RNNOptions {
180 | fused_activation_function:ActivationFunctionType;
181 | }
182 |
183 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
184 | table FullyConnectedOptions {
185 | fused_activation_function:ActivationFunctionType;
186 | }
187 |
188 | table SoftmaxOptions {
189 | beta: float;
190 | }
191 |
192 | // An implementation of TensorFlow concat.
193 | table ConcatenationOptions {
194 | axis:int;
195 | fused_activation_function:ActivationFunctionType;
196 | }
197 |
198 | table AddOptions {
199 | fused_activation_function:ActivationFunctionType;
200 | }
201 |
202 | table L2NormOptions {
203 | fused_activation_function:ActivationFunctionType;
204 | }
205 |
206 | table LocalResponseNormalizationOptions {
207 | radius:int;
208 | bias:float;
209 | alpha:float;
210 | beta:float;
211 | }
212 |
213 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
214 | table LSTMOptions {
215 | fused_activation_function:ActivationFunctionType;
216 | cell_clip: float; // Optional, 0.0 means no clipping
217 | proj_clip: float; // Optional, 0.0 means no clipping
218 | }
219 |
220 | table ResizeBilinearOptions {
221 | new_height:int;
222 | new_width:int;
223 | }
224 |
225 | // A call operation options
226 | table CallOptions {
227 | // The subgraph index that needs to be called.
228 | subgraph:int;
229 | }
230 |
231 | table ReshapeOptions {
232 | new_shape:[int];
233 | }
234 |
235 | table SkipGramOptions {
236 | ngram_size: int;
237 | max_skip_size: int;
238 | include_all_ngrams: bool;
239 | }
240 |
241 | table SpaceToDepthOptions {
242 | block_size: int;
243 | }
244 |
245 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
246 | // builtin, or a string if the operator is custom.
247 | table OperatorCode {
248 | builtin_code:BuiltinOperator;
249 | custom_code:string;
250 | }
251 |
252 | // An operator takes tensors as inputs and outputs. The type of operation being
253 | // performed is determined by an index into the list of valid OperatorCodes,
254 | // while the specifics of each operations is configured using builtin_options
255 | // or custom_options.
256 | table Operator {
257 | // Index into the operator_codes array. Using an integer here avoids
258 | // complicate map lookups.
259 | opcode_index:int;
260 |
261 | inputs:[int];
262 | outputs:[int];
263 |
264 | builtin_options:BuiltinOptions;
265 | custom_options:[ubyte];
266 | }
267 |
268 | // The root type, defining a model.
269 | table SubGraph {
270 | // A list of all tensors used in this model.
271 | tensors:[Tensor];
272 |
273 | // Indices of the input tensors.
274 | inputs:[int];
275 |
276 | // Indices of the output tensors.
277 | outputs:[int];
278 |
279 | // All operators, in execution order.
280 | operators:[Operator];
281 |
282 | // Name of subgraph (used for debugging).
283 | name:string;
284 | }
285 |
286 | table Model {
287 | // Version of the schema.
288 | version:int;
289 |
290 | // A list of all operator codes used in this model. This is
291 | // kept in order because operators carry an index into this
292 | // vector.
293 | operator_codes:[OperatorCode];
294 |
295 | // All the subgraphs of the model. The 0th is assumed to be the main
296 | // model.
297 | subgraphs:[SubGraph];
298 |
299 | // A description of the model.
300 | description:string;
301 | }
302 |
303 | root_type Model;
304 |
--------------------------------------------------------------------------------
/schema/schema_v3.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Revision History
16 | // Version 0: Initial version.
17 | // Version 1: Add subgraphs to schema.
18 | // Version 2: Rename operators to conform to NN API.
19 | // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 |
21 | namespace tflite;
22 |
23 | // This corresponds to the version (4).
24 | file_identifier "TFL3";
25 | // File extension of any written files.
26 | file_extension "tflite";
27 |
28 | // The type of data stored in a tensor.
29 | enum TensorType : byte {
30 | FLOAT32 = 0,
31 | FLOAT16 = 1,
32 | INT32 = 2,
33 | UINT8 = 3,
34 | INT64 = 4,
35 | STRING = 5,
36 | }
37 |
38 | // Parameters for converting a quantized tensor back to float. Given a
39 | // quantized value q, the corresponding float value f should be:
40 | // f = scale * (q - zero_point)
41 | table QuantizationParameters {
42 | min:[float]; // For importing back into tensorflow.
43 | max:[float]; // For importing back into tensorflow.
44 | scale:[float];
45 | zero_point:[long];
46 | }
47 |
48 | table Tensor {
49 | // The tensor shape. The meaning of each entry is operator-specific but
50 | // builtin ops use: [batch size, height, width, number of channels] (That's
51 | // Tensorflow's NHWC).
52 | shape:[int];
53 | type:TensorType;
54 | // An index that refers to the buffers table at the root of the model. Or,
55 | // if there is no data buffer associated (i.e. intermediate results), then
56 | // this is 0 (which refers to an always existent empty buffer).
57 | //
58 | // The data_buffer itself is an opaque container, with the assumption that the
59 | // target device is little-endian. In addition, all builtin operators assume
60 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
61 | // [i, j, k] maps to data_buffer[i*3*2 + j*3 + k].
62 | buffer:uint;
63 | name:string; // For debugging and importing back into tensorflow.
64 | quantization:QuantizationParameters; // Optional.
65 | }
66 |
67 | // A list of builtin operators. Builtin operators are slightly faster than custom
68 | // ones, but not by much. Moreover, while custom operators accept an opaque
69 | // object containing configuration parameters, builtins have a predetermined
70 | // set of acceptable options.
71 | enum BuiltinOperator : byte {
72 | ADD = 0,
73 | AVERAGE_POOL_2D = 1,
74 | CONCATENATION = 2,
75 | CONV_2D = 3,
76 | DEPTHWISE_CONV_2D = 4,
77 | // DEPTH_TO_SPACE = 5,
78 | // DEQUANTIZE = 6,
79 | EMBEDDING_LOOKUP = 7,
80 | // FLOOR = 8,
81 | FULLY_CONNECTED = 9,
82 | HASHTABLE_LOOKUP = 10,
83 | L2_NORMALIZATION = 11,
84 | L2_POOL_2D = 12,
85 | LOCAL_RESPONSE_NORMALIZATION = 13,
86 | LOGISTIC = 14,
87 | LSH_PROJECTION = 15,
88 | LSTM = 16,
89 | MAX_POOL_2D = 17,
90 | // MUL = 18,
91 | RELU = 19,
92 | // RELU1=20,
93 | RELU6 = 21,
94 | RESHAPE = 22,
95 | RESIZE_BILINEAR = 23,
96 | RNN = 24,
97 | SOFTMAX = 25,
98 | SPACE_TO_DEPTH = 26,
99 | SVDF = 27,
100 | TANH = 28,
101 | // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
102 | CONCAT_EMBEDDINGS = 29,
103 | SKIP_GRAM = 30,
104 | CALL = 31,
105 | CUSTOM = 32,
106 |
107 | }
108 |
109 | // Options for the builtin operators.
110 | union BuiltinOptions {
111 | Conv2DOptions,
112 | DepthwiseConv2DOptions,
113 | ConcatEmbeddingsOptions,
114 | LSHProjectionOptions,
115 | Pool2DOptions,
116 | SVDFOptions,
117 | RNNOptions,
118 | FullyConnectedOptions,
119 | SoftmaxOptions,
120 | ConcatenationOptions,
121 | AddOptions,
122 | L2NormOptions,
123 | LocalResponseNormalizationOptions,
124 | LSTMOptions,
125 | ResizeBilinearOptions,
126 | CallOptions,
127 | ReshapeOptions,
128 | SkipGramOptions,
129 | SpaceToDepthOptions,
130 | }
131 |
132 | enum Padding : byte { SAME, VALID }
133 |
134 | enum ActivationFunctionType : byte {
135 | NONE = 0,
136 | RELU = 1,
137 | RELU1 = 2,
138 | RELU6 = 3,
139 | TANH = 4,
140 | SIGN_BIT = 5,
141 | }
142 |
143 | table Conv2DOptions {
144 | padding:Padding;
145 | stride_w:int;
146 | stride_h:int;
147 | fused_activation_function:ActivationFunctionType;
148 | }
149 |
150 | table Pool2DOptions {
151 | padding:Padding;
152 | stride_w:int;
153 | stride_h:int;
154 | filter_width:int;
155 | filter_height:int;
156 | fused_activation_function:ActivationFunctionType;
157 | }
158 |
159 | table DepthwiseConv2DOptions {
160 | padding:Padding;
161 | stride_w:int;
162 | stride_h:int;
163 | depth_multiplier:int;
164 | fused_activation_function:ActivationFunctionType;
165 | }
166 |
167 | table ConcatEmbeddingsOptions {
168 | num_channels:int;
169 | num_columns_per_channel:[int];
170 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
171 | }
172 |
173 | enum LSHProjectionType: byte {
174 | UNKNOWN = 0,
175 | SPARSE = 1,
176 | DENSE = 2,
177 | }
178 |
179 | table LSHProjectionOptions {
180 | type: LSHProjectionType;
181 | }
182 |
183 | table SVDFOptions {
184 | rank:int;
185 | fused_activation_function:ActivationFunctionType;
186 | }
187 |
188 | // An implementation of TensorFlow RNNCell.
189 | table RNNOptions {
190 | fused_activation_function:ActivationFunctionType;
191 | }
192 |
193 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
194 | table FullyConnectedOptions {
195 | fused_activation_function:ActivationFunctionType;
196 | }
197 |
198 | table SoftmaxOptions {
199 | beta: float;
200 | }
201 |
202 | // An implementation of TensorFlow concat.
203 | table ConcatenationOptions {
204 | axis:int;
205 | fused_activation_function:ActivationFunctionType;
206 | }
207 |
208 | table AddOptions {
209 | fused_activation_function:ActivationFunctionType;
210 | }
211 |
212 | table L2NormOptions {
213 | fused_activation_function:ActivationFunctionType;
214 | }
215 |
216 | table LocalResponseNormalizationOptions {
217 | radius:int;
218 | bias:float;
219 | alpha:float;
220 | beta:float;
221 | }
222 |
223 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
224 | table LSTMOptions {
225 | fused_activation_function:ActivationFunctionType;
226 | cell_clip: float; // Optional, 0.0 means no clipping
227 | proj_clip: float; // Optional, 0.0 means no clipping
228 | }
229 |
230 | table ResizeBilinearOptions {
231 | new_height:int;
232 | new_width:int;
233 | }
234 |
235 | // A call operation options
236 | table CallOptions {
237 | // The subgraph index that needs to be called.
238 | subgraph:uint;
239 | }
240 |
241 | table ReshapeOptions {
242 | new_shape:[int];
243 | }
244 |
245 | table SkipGramOptions {
246 | ngram_size: int;
247 | max_skip_size: int;
248 | include_all_ngrams: bool;
249 | }
250 |
251 | table SpaceToDepthOptions {
252 | block_size: int;
253 | }
254 |
255 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
256 | // builtin, or a string if the operator is custom.
257 | table OperatorCode {
258 | builtin_code:BuiltinOperator;
259 | custom_code:string;
260 | }
261 |
262 | // An operator takes tensors as inputs and outputs. The type of operation being
263 | // performed is determined by an index into the list of valid OperatorCodes,
264 | // while the specifics of each operations is configured using builtin_options
265 | // or custom_options.
266 | table Operator {
267 | // Index into the operator_codes array. Using an integer here avoids
268 | // complicate map lookups.
269 | opcode_index:uint;
270 |
271 | inputs:[int];
272 | outputs:[int];
273 |
274 | builtin_options:BuiltinOptions;
275 | custom_options:[ubyte];
276 | }
277 |
278 | // The root type, defining a model.
279 | table SubGraph {
280 | // A list of all tensors used in this model.
281 | tensors:[Tensor];
282 |
283 | // Indices of the input tensors.
284 | inputs:[int];
285 |
286 | // Indices of the output tensors.
287 | outputs:[int];
288 |
289 | // All operators, in execution order.
290 | operators:[Operator];
291 |
292 | // Name of subgraph (used for debugging).
293 | name:string;
294 | }
295 |
296 | // Table of raw data buffers (used for constant tensors). Referenced by tensors
297 | // by index.
298 | table Buffer {
299 | data:[ubyte];
300 | }
301 |
302 | table Model {
303 | // Version of the schema.
304 | version:uint;
305 |
306 | // A list of all operator codes used in this model. This is
307 | // kept in order because operators carry an index into this
308 | // vector.
309 | operator_codes:[OperatorCode];
310 |
311 | // All the subgraphs of the model. The 0th is assumed to be the main
312 | // model.
313 | subgraphs:[SubGraph];
314 |
315 | // A description of the model.
316 | description:string;
317 |
318 | // Buffers of the model.
319 | // NOTE: It is required that the first entry in here is always an empty
320 | // buffer. This is so that the default buffer index of zero in Tensor
321 | // will always refer to a valid empty buffer.
322 | buffers:[Buffer];
323 |
324 | }
325 |
326 | root_type Model;
327 |
--------------------------------------------------------------------------------
/schema/schema_v3a.fbs:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Revision History
16 | // Version 0: Initial version.
17 | // Version 1: Add subgraphs to schema.
18 | // Version 2: Rename operators to conform to NN API.
19 | // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 | // Version 3a: Add new builtin op code field. Has backward compatibility with
21 | // version 3.
22 |
23 | namespace tflite;
24 |
25 | // This corresponds to the version.
26 | file_identifier "TFL3";
27 | // File extension of any written files.
28 | file_extension "tflite";
29 |
30 | // IMPORTANT: All new members of tables, enums and unions must be added at the
31 | // end to ensure backwards compatibility.
32 |
33 | // The type of data stored in a tensor.
34 | enum TensorType : byte {
35 | FLOAT32 = 0,
36 | FLOAT16 = 1,
37 | INT32 = 2,
38 | UINT8 = 3,
39 | INT64 = 4,
40 | STRING = 5,
41 | BOOL = 6,
42 | INT16 = 7,
43 | COMPLEX64 = 8,
44 | INT8 = 9,
45 | FLOAT64 = 10,
46 | COMPLEX128 = 11,
47 | }
48 |
49 | // Custom quantization parameters for experimenting with new quantization
50 | // techniques.
51 | table CustomQuantization {
52 | custom:[ubyte] (force_align: 16);
53 | }
54 |
55 | // Represents a specific quantization technique's parameters.
56 | union QuantizationDetails {
57 | CustomQuantization,
58 | }
59 |
60 | // Parameters for converting a quantized tensor back to float.
61 | table QuantizationParameters {
62 | // These four parameters are the asymmetric linear quantization parameters.
63 | // Given a quantized value q, the corresponding float value f should be:
64 | // f = scale * (q - zero_point)
65 | // For other quantization types, the QuantizationDetails below is used.
66 | min:[float]; // For importing back into tensorflow.
67 | max:[float]; // For importing back into tensorflow.
68 | scale:[float]; // For dequantizing the tensor's values.
69 | zero_point:[long];
70 |
71 | // If this is not none, the other quantization parameters (i.e. min, max,
72 | // scale, zero_point fields above) are ignored and the value of the
73 | // QuantizationDetails union should be used.
74 | details:QuantizationDetails;
75 |
76 | // Specifies the dimension of the Tensor's shape that the scales and
77 | // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
78 | // with quantization params:
79 | // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
80 | // will be quantized across the second dimension of t.
81 | // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
82 | // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
83 | // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
84 | quantized_dimension:int;
85 | }
86 |
87 | // Sparse tensors.
88 | // We use a modification of the TACO format.
89 | // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
90 | //
91 | // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
92 | // potentially with a k-dimensional block (0 <= k <= n) with dims
93 | // (dn, ..., dn+k-1), the format needs to specify:
94 | // 1. In what order to traverse these dimensions. For example, to store a 2-D
95 | // matrix in row major order, the traversal order would be (d0, d1),
96 | // whereas to store it in column major order, the traversal order would be
97 | // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
98 | // could be (d0, d1, d2, d3).
99 | // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
100 | // tensor dimension in (d0, ..., dn-1).
101 | // 3. In the traversal order defined above, the format (dense vs. sparse) and
102 | // index metadata for each dimension. For a dense dimension, this is just
103 | // the size of that dimension. For a sparse dimension, it's the same as
104 | // the compressed index defined in the Compressed Sparse Row (CSR) format.
105 | // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
106 |
107 | // The storage type for a dimension. Currently we support:
108 | // 1. DENSE: each coordinate in this dimension is stored implicitly.
109 | // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
110 | // compression technique is the same what CSR uses.
111 | // More types like a sparse dimension with a different compression technique
112 | // could be added to the list in the future.
113 | enum DimensionType : byte {
114 | DENSE = 0,
115 | SPARSE_CSR = 1,
116 | }
117 |
118 | table Int32Vector {
119 | values:[int];
120 | }
121 |
122 | table Uint16Vector {
123 | values:[ushort] (force_align: 4);
124 | }
125 |
126 | table Uint8Vector {
127 | values:[ubyte] (force_align: 4);
128 | }
129 |
130 | // Variable-typed buffer to store the index metadata for a sparse dimension.
131 | // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
132 | // vector. We don't want the per-dimensional index to overflow that range.
133 | union SparseIndexVector {
134 | Int32Vector,
135 | Uint16Vector,
136 | Uint8Vector
137 | }
138 |
139 | table DimensionMetadata {
140 | // Whether a dimension is dense or sparse.
141 | format:DimensionType;
142 | // Index metadata used for a dimension.
143 | // - If format is DimensionType.DENSE then we use the dense_size field to
144 | // store the size of that dimension. Each index in that dimension is
145 | // stored implicitly.
146 | // - If format is DimensionType.SPARSE_CSR then we use array_segments and
147 | // array_indices to encode that dimension. array_segments represents how
148 | // to segment the indices array, each segment corresponds to one element
149 | // in the previous dimension. array_indices represents the index of the
150 | // non-zero elements within this dimension (as those in the CSR matrix
151 | // format, where the first array is row pointers and the second array is
152 | // column indices).
153 | dense_size:int;
154 | array_segments:SparseIndexVector;
155 | array_indices:SparseIndexVector;
156 | }
157 |
158 | // Parameters to encode a sparse TfLite tensor.
159 | table SparsityParameters {
160 | // The traversal order of the dimensions defined in the `shape` field of the
161 | // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
162 | // ..., dn-1),
163 | // - if not block sparse, the traversal_order is just a permutation of (d0,
164 | // ..., dn-1). For example, a 2-D matrix stored in row-major order would
165 | // have traversal_order = (d0, d1).
166 | // - if block sparse with a k-dimensional block (0 <= k <= n), the
167 | // traversal_order has n + k elements. The first n elements are still a
168 | // permutation of (d0, ..., dn-1). The lask k elements are a permutation
169 | // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
170 | // example, a 2-D matrix with 2-D blocks, both stored in row-major order
171 | // would have traversal_order = (d0, d1, d2, d3).
172 | traversal_order:[int];
173 | // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
174 | // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
175 | // tensor dimension in (d0, ..., dn).
176 | // It's stored in the order of (dn, ..., dn+k-1).
177 | // If not block-sparse, this field is NULL.
178 | block_map:[int];
179 | // In the traversal order defined above, the metadata needed for
180 | // each dimension to locate the non-zero values in the original dense tensor.
181 | // The size of the dim_metadata array = the size of the traversal_order array
182 | // = n + k.
183 | dim_metadata:[DimensionMetadata];
184 | }
185 |
186 | table Tensor {
187 | // The tensor shape. The meaning of each entry is operator-specific but
188 | // builtin ops use: [batch size, height, width, number of channels] (That's
189 | // Tensorflow's NHWC).
190 | shape:[int];
191 | type:TensorType;
192 | // An index that refers to the buffers table at the root of the model. Or,
193 | // if there is no data buffer associated (i.e. intermediate results), then
194 | // this is 0 (which refers to an always existent empty buffer).
195 | //
196 | // The data_buffer itself is an opaque container, with the assumption that the
197 | // target device is little-endian. In addition, all builtin operators assume
198 | // the memory is ordered such that if `shape` is [4, 3, 2], then index
199 | // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
200 | buffer:uint;
201 | name:string; // For debugging and importing back into tensorflow.
202 | quantization:QuantizationParameters; // Optional.
203 |
204 | is_variable:bool = false;
205 |
206 | // Parameters to encode a sparse tensor. See the example in
207 | // tensorflow/lite/testdata/sparse_tensor.json.
208 | sparsity:SparsityParameters; // Optional.
209 |
210 | // Encodes `shape` with unknown dimensions. Unknown dimensions are
211 | // represented with -1.
212 | shape_signature:[int]; // Optional.
213 | }
214 |
215 | // A list of builtin operators. Builtin operators are slightly faster than custom
216 | // ones, but not by much. Moreover, while custom operators accept an opaque
217 | // object containing configuration parameters, builtins have a predetermined
218 | // set of acceptable options.
219 |
220 | enum BuiltinOperator : int32 {
221 | ADD = 0,
222 | AVERAGE_POOL_2D = 1,
223 | CONCATENATION = 2,
224 | CONV_2D = 3,
225 | DEPTHWISE_CONV_2D = 4,
226 | DEPTH_TO_SPACE = 5,
227 | DEQUANTIZE = 6,
228 | EMBEDDING_LOOKUP = 7,
229 | FLOOR = 8,
230 | FULLY_CONNECTED = 9,
231 | HASHTABLE_LOOKUP = 10,
232 | L2_NORMALIZATION = 11,
233 | L2_POOL_2D = 12,
234 | LOCAL_RESPONSE_NORMALIZATION = 13,
235 | LOGISTIC = 14,
236 | LSH_PROJECTION = 15,
237 | LSTM = 16,
238 | MAX_POOL_2D = 17,
239 | MUL = 18,
240 | RELU = 19,
241 | // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
242 | // since different model developers use RELU1 in different ways. Never
243 | // create another op called RELU1.
244 | RELU_N1_TO_1 = 20,
245 | RELU6 = 21,
246 | RESHAPE = 22,
247 | RESIZE_BILINEAR = 23,
248 | RNN = 24,
249 | SOFTMAX = 25,
250 | SPACE_TO_DEPTH = 26,
251 | SVDF = 27,
252 | TANH = 28,
253 | CONCAT_EMBEDDINGS = 29,
254 | SKIP_GRAM = 30,
255 | CALL = 31,
256 | CUSTOM = 32,
257 | EMBEDDING_LOOKUP_SPARSE = 33,
258 | PAD = 34,
259 | UNIDIRECTIONAL_SEQUENCE_RNN = 35,
260 | GATHER = 36,
261 | BATCH_TO_SPACE_ND = 37,
262 | SPACE_TO_BATCH_ND = 38,
263 | TRANSPOSE = 39,
264 | MEAN = 40,
265 | SUB = 41,
266 | DIV = 42,
267 | SQUEEZE = 43,
268 | UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
269 | STRIDED_SLICE = 45,
270 | BIDIRECTIONAL_SEQUENCE_RNN = 46,
271 | EXP = 47,
272 | TOPK_V2 = 48,
273 | SPLIT = 49,
274 | LOG_SOFTMAX = 50,
275 | // DELEGATE is a special op type for the operations which are delegated to
276 | // other backends.
277 | // WARNING: Experimental interface, subject to change
278 | DELEGATE = 51,
279 | BIDIRECTIONAL_SEQUENCE_LSTM = 52,
280 | CAST = 53,
281 | PRELU = 54,
282 | MAXIMUM = 55,
283 | ARG_MAX = 56,
284 | MINIMUM = 57,
285 | LESS = 58,
286 | NEG = 59,
287 | PADV2 = 60,
288 | GREATER = 61,
289 | GREATER_EQUAL = 62,
290 | LESS_EQUAL = 63,
291 | SELECT = 64,
292 | SLICE = 65,
293 | SIN = 66,
294 | TRANSPOSE_CONV = 67,
295 | SPARSE_TO_DENSE = 68,
296 | TILE = 69,
297 | EXPAND_DIMS = 70,
298 | EQUAL = 71,
299 | NOT_EQUAL = 72,
300 | LOG = 73,
301 | SUM = 74,
302 | SQRT = 75,
303 | RSQRT = 76,
304 | SHAPE = 77,
305 | POW = 78,
306 | ARG_MIN = 79,
307 | FAKE_QUANT = 80,
308 | REDUCE_PROD = 81,
309 | REDUCE_MAX = 82,
310 | PACK = 83,
311 | LOGICAL_OR = 84,
312 | ONE_HOT = 85,
313 | LOGICAL_AND = 86,
314 | LOGICAL_NOT = 87,
315 | UNPACK = 88,
316 | REDUCE_MIN = 89,
317 | FLOOR_DIV = 90,
318 | REDUCE_ANY = 91,
319 | SQUARE = 92,
320 | ZEROS_LIKE = 93,
321 | FILL = 94,
322 | FLOOR_MOD = 95,
323 | RANGE = 96,
324 | RESIZE_NEAREST_NEIGHBOR = 97,
325 | LEAKY_RELU = 98,
326 | SQUARED_DIFFERENCE = 99,
327 | MIRROR_PAD = 100,
328 | ABS = 101,
329 | SPLIT_V = 102,
330 | UNIQUE = 103,
331 | CEIL = 104,
332 | REVERSE_V2 = 105,
333 | ADD_N = 106,
334 | GATHER_ND = 107,
335 | COS = 108,
336 | WHERE = 109,
337 | RANK = 110,
338 | ELU = 111,
339 | REVERSE_SEQUENCE = 112,
340 | MATRIX_DIAG = 113,
341 | QUANTIZE = 114,
342 | MATRIX_SET_DIAG = 115,
343 | ROUND = 116,
344 | HARD_SWISH = 117,
345 | IF = 118,
346 | WHILE = 119,
347 | NON_MAX_SUPPRESSION_V4 = 120,
348 | NON_MAX_SUPPRESSION_V5 = 121,
349 | SCATTER_ND = 122,
350 | SELECT_V2 = 123,
351 | DENSIFY = 124,
352 | SEGMENT_SUM = 125,
353 | BATCH_MATMUL = 126,
354 | PLACEHOLDER_FOR_GREATER_OP_CODES = 127
355 | }
356 |
357 |
358 | // Options for the builtin operators.
359 | union BuiltinOptions {
360 | Conv2DOptions,
361 | DepthwiseConv2DOptions,
362 | ConcatEmbeddingsOptions,
363 | LSHProjectionOptions,
364 | Pool2DOptions,
365 | SVDFOptions,
366 | RNNOptions,
367 | FullyConnectedOptions,
368 | SoftmaxOptions,
369 | ConcatenationOptions,
370 | AddOptions,
371 | L2NormOptions,
372 | LocalResponseNormalizationOptions,
373 | LSTMOptions,
374 | ResizeBilinearOptions,
375 | CallOptions,
376 | ReshapeOptions,
377 | SkipGramOptions,
378 | SpaceToDepthOptions,
379 | EmbeddingLookupSparseOptions,
380 | MulOptions,
381 | PadOptions,
382 | GatherOptions,
383 | BatchToSpaceNDOptions,
384 | SpaceToBatchNDOptions,
385 | TransposeOptions,
386 | ReducerOptions,
387 | SubOptions,
388 | DivOptions,
389 | SqueezeOptions,
390 | SequenceRNNOptions,
391 | StridedSliceOptions,
392 | ExpOptions,
393 | TopKV2Options,
394 | SplitOptions,
395 | LogSoftmaxOptions,
396 | CastOptions,
397 | DequantizeOptions,
398 | MaximumMinimumOptions,
399 | ArgMaxOptions,
400 | LessOptions,
401 | NegOptions,
402 | PadV2Options,
403 | GreaterOptions,
404 | GreaterEqualOptions,
405 | LessEqualOptions,
406 | SelectOptions,
407 | SliceOptions,
408 | TransposeConvOptions,
409 | SparseToDenseOptions,
410 | TileOptions,
411 | ExpandDimsOptions,
412 | EqualOptions,
413 | NotEqualOptions,
414 | ShapeOptions,
415 | PowOptions,
416 | ArgMinOptions,
417 | FakeQuantOptions,
418 | PackOptions,
419 | LogicalOrOptions,
420 | OneHotOptions,
421 | LogicalAndOptions,
422 | LogicalNotOptions,
423 | UnpackOptions,
424 | FloorDivOptions,
425 | SquareOptions,
426 | ZerosLikeOptions,
427 | FillOptions,
428 | BidirectionalSequenceLSTMOptions,
429 | BidirectionalSequenceRNNOptions,
430 | UnidirectionalSequenceLSTMOptions,
431 | FloorModOptions,
432 | RangeOptions,
433 | ResizeNearestNeighborOptions,
434 | LeakyReluOptions,
435 | SquaredDifferenceOptions,
436 | MirrorPadOptions,
437 | AbsOptions,
438 | SplitVOptions,
439 | UniqueOptions,
440 | ReverseV2Options,
441 | AddNOptions,
442 | GatherNdOptions,
443 | CosOptions,
444 | WhereOptions,
445 | RankOptions,
446 | ReverseSequenceOptions,
447 | MatrixDiagOptions,
448 | QuantizeOptions,
449 | MatrixSetDiagOptions,
450 | HardSwishOptions,
451 | IfOptions,
452 | WhileOptions,
453 | DepthToSpaceOptions,
454 | NonMaxSuppressionV4Options,
455 | NonMaxSuppressionV5Options,
456 | ScatterNdOptions,
457 | SelectV2Options,
458 | DensifyOptions,
459 | SegmentSumOptions,
460 | BatchMatMulOptions
461 | }
462 |
463 | enum Padding : byte { SAME, VALID }
464 |
465 | enum ActivationFunctionType : byte {
466 | NONE = 0,
467 | RELU = 1,
468 | RELU_N1_TO_1 = 2,
469 | RELU6 = 3,
470 | TANH = 4,
471 | SIGN_BIT = 5,
472 | }
473 |
474 | table Conv2DOptions {
475 | padding:Padding;
476 | stride_w:int;
477 | stride_h:int;
478 | fused_activation_function:ActivationFunctionType;
479 | dilation_w_factor:int = 1;
480 | dilation_h_factor:int = 1;
481 | }
482 |
483 | table Pool2DOptions {
484 | padding:Padding;
485 | stride_w:int;
486 | stride_h:int;
487 | filter_width:int;
488 | filter_height:int;
489 | fused_activation_function:ActivationFunctionType;
490 | }
491 |
492 | table DepthwiseConv2DOptions {
493 | // Parameters for DepthwiseConv version 1 or above.
494 | padding:Padding;
495 | stride_w:int;
496 | stride_h:int;
497 | // `depth_multiplier` is redundant. It's used by CPU kernels in
498 | // TensorFlow 2.0 or below, but ignored in versions above.
499 | // See comments in lite/c/builtin_op_data.h for more details.
500 | depth_multiplier:int;
501 | fused_activation_function:ActivationFunctionType;
502 | // Parameters for DepthwiseConv version 2 or above.
503 | dilation_w_factor:int = 1;
504 | dilation_h_factor:int = 1;
505 | }
506 |
507 | table ConcatEmbeddingsOptions {
508 | num_channels:int;
509 | num_columns_per_channel:[int];
510 | embedding_dim_per_channel:[int]; // This could be inferred from parameters.
511 | }
512 |
513 | enum LSHProjectionType: byte {
514 | UNKNOWN = 0,
515 | SPARSE = 1,
516 | DENSE = 2,
517 | }
518 |
519 | table LSHProjectionOptions {
520 | type: LSHProjectionType;
521 | }
522 |
523 | table SVDFOptions {
524 | rank:int;
525 | fused_activation_function:ActivationFunctionType;
526 | // For weights-only quantization, use asymmetric quantization for non
527 | // constant inputs at evaluation time.
528 | asymmetric_quantize_inputs:bool;
529 | }
530 |
531 | // An implementation of TensorFlow RNNCell.
532 | table RNNOptions {
533 | fused_activation_function:ActivationFunctionType;
534 | asymmetric_quantize_inputs:bool;
535 | }
536 |
537 | // An implementation of TensorFlow dynamic_rnn with RNNCell.
538 | table SequenceRNNOptions {
539 | time_major:bool;
540 | fused_activation_function:ActivationFunctionType;
541 | asymmetric_quantize_inputs:bool;
542 | }
543 |
544 | // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
545 | table BidirectionalSequenceRNNOptions {
546 | time_major:bool;
547 | fused_activation_function:ActivationFunctionType;
548 | merge_outputs: bool;
549 | asymmetric_quantize_inputs:bool;
550 | }
551 |
552 | enum FullyConnectedOptionsWeightsFormat: byte {
553 | DEFAULT = 0,
554 | SHUFFLED4x16INT8 = 1,
555 | }
556 |
557 | // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
558 | table FullyConnectedOptions {
559 | // Parameters for FullyConnected version 1 or above.
560 | fused_activation_function:ActivationFunctionType;
561 |
562 | // Parameters for FullyConnected version 2 or above.
563 | weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
564 |
565 | // Parameters for FullyConnected version 5 or above.
566 | // If set to true, then the number of dimension is preserved. Furthermore,
567 | // all but the last dimension of the input and output shapes will be equal.
568 | keep_num_dims: bool;
569 |
570 | // Parameters for FullyConnected version 7 or above.
571 | // If set to true, then weights-only op will use asymmetric quantization for
572 | // inputs.
573 | asymmetric_quantize_inputs: bool;
574 | }
575 |
576 | table SoftmaxOptions {
577 | beta: float;
578 | }
579 |
580 | // An implementation of TensorFlow concat.
581 | table ConcatenationOptions {
582 | axis:int;
583 | fused_activation_function:ActivationFunctionType;
584 | }
585 |
586 | table AddOptions {
587 | fused_activation_function:ActivationFunctionType;
588 | // Parameters supported by version 4.
589 | pot_scale_int16:bool = true;
590 | }
591 |
592 | table MulOptions {
593 | fused_activation_function:ActivationFunctionType;
594 | }
595 |
596 | table L2NormOptions {
597 | fused_activation_function:ActivationFunctionType;
598 | }
599 |
600 | table LocalResponseNormalizationOptions {
601 | radius:int;
602 | bias:float;
603 | alpha:float;
604 | beta:float;
605 | }
606 |
607 | enum LSTMKernelType : byte {
608 | // Full LSTM kernel which supports peephole and projection.
609 | FULL = 0,
610 | // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
611 | BASIC = 1,
612 | }
613 |
614 | // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
615 | table LSTMOptions {
616 | // Parameters for LSTM version 1 or above.
617 | fused_activation_function:ActivationFunctionType;
618 | cell_clip: float; // Optional, 0.0 means no clipping
619 | proj_clip: float; // Optional, 0.0 means no clipping
620 |
621 | // Parameters for LSTM version 2 or above.
622 | // Basic kernel is only supported in version 2 or above.
623 | kernel_type: LSTMKernelType = FULL;
624 |
625 | // Parameters for LSTM version 4 or above.
626 | asymmetric_quantize_inputs: bool;
627 | }
628 |
629 | // An implementation of TensorFlow dynamic_rnn with LSTMCell.
630 | table UnidirectionalSequenceLSTMOptions {
631 | fused_activation_function:ActivationFunctionType;
632 | cell_clip: float; // Optional, 0.0 means no clipping
633 | proj_clip: float; // Optional, 0.0 means no clipping
634 |
635 | // If true then first dimension is sequence, otherwise batch.
636 | time_major:bool;
637 |
638 | // Parameter for Unidirectional Sequence LSTM version 4.
639 | asymmetric_quantize_inputs:bool;
640 | }
641 |
642 | table BidirectionalSequenceLSTMOptions {
643 | // Parameters supported by version 1:
644 | fused_activation_function:ActivationFunctionType;
645 | cell_clip: float; // Optional, 0.0 means no clipping
646 | proj_clip: float; // Optional, 0.0 means no clipping
647 |
648 | // If true, store the outputs of both directions into the first output.
649 | merge_outputs: bool;
650 |
651 | // Parameters supported by version 2:
652 | // If true then first dimension is sequence, otherwise batch.
653 | // Version 1 implementations assumed time_major to be true, so this default
654 | // value should never change.
655 | time_major: bool = true;
656 |
657 | // Parameters for version 3 or above.
658 | asymmetric_quantize_inputs:bool;
659 | }
660 |
661 | table ResizeBilinearOptions {
662 | new_height: int (deprecated);
663 | new_width: int (deprecated);
664 | align_corners: bool;
665 | half_pixel_centers: bool;
666 | }
667 |
668 | table ResizeNearestNeighborOptions {
669 | align_corners: bool;
670 | half_pixel_centers: bool;
671 | }
672 |
673 | // A call operation options
674 | table CallOptions {
675 | // The subgraph index that needs to be called.
676 | subgraph:uint;
677 | }
678 |
679 | table PadOptions {
680 | }
681 |
682 | table PadV2Options {
683 | }
684 |
685 | table ReshapeOptions {
686 | new_shape:[int];
687 | }
688 |
689 | table SpaceToBatchNDOptions {
690 | }
691 |
692 | table BatchToSpaceNDOptions {
693 | }
694 |
695 | table SkipGramOptions {
696 | ngram_size: int;
697 | max_skip_size: int;
698 | include_all_ngrams: bool;
699 | }
700 |
701 | table SpaceToDepthOptions {
702 | block_size: int;
703 | }
704 |
705 | table DepthToSpaceOptions {
706 | block_size: int;
707 | }
708 |
709 | table SubOptions {
710 | fused_activation_function:ActivationFunctionType;
711 | // Parameters supported by version 5
712 | pot_scale_int16:bool = true;
713 | }
714 |
715 | table DivOptions {
716 | fused_activation_function:ActivationFunctionType;
717 | }
718 |
719 | table TopKV2Options {
720 | }
721 |
722 | enum CombinerType : byte {
723 | SUM = 0,
724 | MEAN = 1,
725 | SQRTN = 2,
726 | }
727 |
728 | table EmbeddingLookupSparseOptions {
729 | combiner:CombinerType;
730 | }
731 |
732 | table GatherOptions {
733 | axis: int;
734 | }
735 |
736 | table TransposeOptions {
737 | }
738 |
739 | table ExpOptions {
740 | }
741 |
742 | table CosOptions {
743 | }
744 |
745 | table ReducerOptions {
746 | keep_dims: bool;
747 | }
748 |
749 | table SqueezeOptions {
750 | squeeze_dims:[int];
751 | }
752 |
753 | table SplitOptions {
754 | num_splits: int;
755 | }
756 |
757 | table SplitVOptions {
758 | num_splits: int;
759 | }
760 |
761 | table StridedSliceOptions {
762 | begin_mask: int;
763 | end_mask: int;
764 | ellipsis_mask: int;
765 | new_axis_mask: int;
766 | shrink_axis_mask: int;
767 | }
768 |
769 | table LogSoftmaxOptions {
770 | }
771 |
772 | table CastOptions {
773 | in_data_type: TensorType;
774 | out_data_type: TensorType;
775 | }
776 |
777 | table DequantizeOptions {
778 | }
779 |
780 | table MaximumMinimumOptions {
781 | }
782 |
783 | table TileOptions {
784 | }
785 |
786 | table ArgMaxOptions {
787 | output_type : TensorType;
788 | }
789 |
790 | table ArgMinOptions {
791 | output_type : TensorType;
792 | }
793 |
794 | table GreaterOptions {
795 | }
796 |
797 | table GreaterEqualOptions {
798 | }
799 |
800 | table LessOptions {
801 | }
802 |
803 | table LessEqualOptions {
804 | }
805 |
806 | table NegOptions {
807 | }
808 |
809 | table SelectOptions {
810 | }
811 |
812 | table SliceOptions {
813 | }
814 |
815 | table TransposeConvOptions {
816 | padding:Padding;
817 | stride_w:int;
818 | stride_h:int;
819 | }
820 |
821 | table ExpandDimsOptions {
822 | }
823 |
824 | table SparseToDenseOptions {
825 | validate_indices:bool;
826 | }
827 |
828 | table EqualOptions {
829 | }
830 |
831 | table NotEqualOptions {
832 | }
833 |
834 | table ShapeOptions {
835 | // Optional output type of the operation (int32 or int64). Defaults to int32.
836 | out_type : TensorType;
837 | }
838 |
839 | table RankOptions {
840 | }
841 |
842 | table PowOptions {
843 | }
844 |
845 | table FakeQuantOptions {
846 | // Parameters supported by version 1:
847 | min:float;
848 | max:float;
849 | num_bits:int;
850 |
851 | // Parameters supported by version 2:
852 | narrow_range:bool;
853 | }
854 |
855 | table PackOptions {
856 | values_count:int;
857 | axis:int;
858 | }
859 |
860 | table LogicalOrOptions {
861 | }
862 |
863 | table OneHotOptions {
864 | axis:int;
865 | }
866 |
867 | table AbsOptions {
868 | }
869 |
870 |
871 | table HardSwishOptions {
872 | }
873 |
874 | table LogicalAndOptions {
875 | }
876 |
877 | table LogicalNotOptions {
878 | }
879 |
880 | table UnpackOptions {
881 | num:int;
882 | axis:int;
883 | }
884 |
885 | table FloorDivOptions {
886 | }
887 |
888 | table SquareOptions {
889 | }
890 |
891 | table ZerosLikeOptions {
892 | }
893 |
894 | table FillOptions {
895 | }
896 |
897 | table FloorModOptions {
898 | }
899 |
900 | table RangeOptions {
901 | }
902 |
903 | table LeakyReluOptions {
904 | alpha:float;
905 | }
906 |
907 | table SquaredDifferenceOptions {
908 | }
909 |
910 | enum MirrorPadMode : byte {
911 | // Doesn't include borders.
912 | REFLECT = 0,
913 | // Includes borders.
914 | SYMMETRIC = 1,
915 | }
916 |
917 | table MirrorPadOptions {
918 | mode:MirrorPadMode;
919 | }
920 |
921 | table UniqueOptions {
922 | idx_out_type:TensorType = INT32;
923 | }
924 |
925 | table ReverseV2Options {
926 | }
927 |
928 | table AddNOptions {
929 | }
930 |
931 | table GatherNdOptions {
932 | }
933 |
934 | table WhereOptions {
935 | }
936 |
937 | table ReverseSequenceOptions {
938 | seq_dim:int;
939 | batch_dim:int = 0;
940 | }
941 |
942 | table MatrixDiagOptions {
943 | }
944 |
945 | table QuantizeOptions {
946 | }
947 |
948 | table MatrixSetDiagOptions {
949 | }
950 |
951 | table IfOptions {
952 | then_subgraph_index:int;
953 | else_subgraph_index:int;
954 | }
955 |
956 | table WhileOptions {
957 | cond_subgraph_index:int;
958 | body_subgraph_index:int;
959 | }
960 |
961 | table NonMaxSuppressionV4Options {
962 | }
963 |
964 | table NonMaxSuppressionV5Options {
965 | }
966 |
967 | table ScatterNdOptions {
968 | }
969 |
970 | table SelectV2Options {
971 | }
972 |
973 | table DensifyOptions {
974 | }
975 |
976 | table SegmentSumOptions {
977 | }
978 |
979 | table BatchMatMulOptions {
980 | adj_x:bool;
981 | adj_y:bool;
982 | }
983 |
984 | // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
985 | // builtin, or a string if the operator is custom.
986 | table OperatorCode {
987 | // This field is for backward compatibility. This field will be used when
988 | // the value of the extended builtin_code field has less than
989 | // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
990 | deprecated_builtin_code:byte;
991 | custom_code:string;
992 |
993 | // The version of the operator. The version need to be bumped whenever new
994 | // parameters are introduced into an op.
995 | version:int = 1;
996 |
997 | // This field is introduced for resolving op builtin code shortage problem.
998 | // This field will be used when the value of the extended builtin_code field
999 | // is greater than BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1000 | builtin_code:BuiltinOperator;
1001 | }
1002 |
1003 | enum CustomOptionsFormat : byte {
1004 | FLEXBUFFERS = 0,
1005 | }
1006 |
1007 | // An operator takes tensors as inputs and outputs. The type of operation being
1008 | // performed is determined by an index into the list of valid OperatorCodes,
1009 | // while the specifics of each operations is configured using builtin_options
1010 | // or custom_options.
1011 | table Operator {
1012 | // Index into the operator_codes array. Using an integer here avoids
1013 | // complicate map lookups.
1014 | opcode_index:uint;
1015 |
1016 | // Optional input are indicated by -1.
1017 | inputs:[int];
1018 | outputs:[int];
1019 |
1020 | builtin_options:BuiltinOptions;
1021 | custom_options:[ubyte];
1022 | custom_options_format:CustomOptionsFormat;
1023 |
1024 | // A list of booleans indicating the input tensors which are being mutated by
1025 | // this operator.(e.g. used by RNN and LSTM).
1026 | // For example, if the "inputs" array refers to 5 tensors and the second and
1027 | // fifth are mutable variables, then this list will contain
1028 | // [false, true, false, false, true].
1029 | //
1030 | // If the list is empty, no variable is mutated in this operator.
1031 | // The list either has the same length as `inputs`, or is empty.
1032 | mutating_variable_inputs:[bool];
1033 |
1034 | // A list of indices to the subgraph's "tensors" that are internal to an Op.
1035 | // Internal tensors are those that do not flow in or out of the operation,
1036 | // but instead are part of internal computation. As such, the operation's
1037 | // implementation may manage its memory more efficiently. They are needed
1038 | // however (i.e. not just an implementation detail) since they are part of the
1039 | // computation, which may require relevant metadata such as quantization
1040 | // parameters.
1041 | intermediates:[int];
1042 | }
1043 |
1044 | // The root type, defining a subgraph, which typically represents an entire
1045 | // model.
1046 | table SubGraph {
1047 | // A list of all tensors used in this subgraph.
1048 | tensors:[Tensor];
1049 |
1050 | // Indices of the tensors that are inputs into this subgraph. Note this is
1051 | // the list of non-static tensors that feed into the subgraph for inference.
1052 | inputs:[int];
1053 |
1054 | // Indices of the tensors that are outputs out of this subgraph. Note this is
1055 | // the list of output tensors that are considered the product of the
1056 | // subgraph's inference.
1057 | outputs:[int];
1058 |
1059 | // All operators, in execution order.
1060 | operators:[Operator];
1061 |
1062 | // Name of this subgraph (used for debugging).
1063 | name:string;
1064 | }
1065 |
1066 | // Table of raw data buffers (used for constant tensors). Referenced by tensors
1067 | // by index. The generous alignment accommodates mmap-friendly data structures.
1068 | table Buffer {
1069 | data:[ubyte] (force_align: 16);
1070 | }
1071 |
1072 | table Metadata {
1073 | // A human readable string to uniquely identify a Metadata.
1074 | name:string;
1075 | // An index to the buffers table.
1076 | buffer:uint;
1077 | }
1078 |
1079 | table Model {
1080 | // Version of the schema.
1081 | version:uint;
1082 |
1083 | // A list of all operator codes used in this model. This is
1084 | // kept in order because operators carry an index into this
1085 | // vector.
1086 | operator_codes:[OperatorCode];
1087 |
1088 | // All the subgraphs of the model. The 0th is assumed to be the main
1089 | // model.
1090 | subgraphs:[SubGraph];
1091 |
1092 | // A description of the model.
1093 | description:string;
1094 |
1095 | // Buffers of the model.
1096 | // Note the 0th entry of this array must be an empty buffer (sentinel).
1097 | // This is a convention so that tensors without a buffer can provide 0 as
1098 | // their buffer.
1099 | buffers:[Buffer];
1100 |
1101 | // Metadata about the model. Indirects into the existings buffers list.
1102 | // Deprecated, prefer to use metadata field.
1103 | metadata_buffer:[int];
1104 |
1105 | // Metadata about the model.
1106 | metadata:[Metadata];
1107 | }
1108 |
1109 | root_type Model;
1110 |
--------------------------------------------------------------------------------
/scripts/tflite2tensorflow:
--------------------------------------------------------------------------------
1 | ../tflite2tensorflow/tflite2tensorflow.py
--------------------------------------------------------------------------------
/scripts/view_npy:
--------------------------------------------------------------------------------
1 | ../tflite2tensorflow/view_npy.py
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, Extension
2 | from setuptools import find_packages
3 | from os import listdir
4 |
5 | with open("README.md") as f:
6 | long_description = f.read()
7 |
8 | scripts = ["scripts/"+i for i in listdir("scripts")]
9 |
10 | if __name__ == "__main__":
11 | setup(
12 | name="tflite2tensorflow",
13 | scripts=scripts,
14 | version="1.22.0",
15 | description="Generate saved_model, tfjs, tf-trt, EdgeTPU, CoreML, quantized tflite, ONNX, OpenVINO, Myriad Inference Engine blob and .pb from .tflite.",
16 | long_description=long_description,
17 | long_description_content_type="text/markdown",
18 | author="Katsuya Hyodo",
19 | author_email="rmsdh122@yahoo.co.jp",
20 | url="https://github.com/PINTO0309/tflite2tensorflow",
21 | license="MIT License",
22 | packages=find_packages(),
23 | platforms=["linux", "unix"],
24 | python_requires=">3.6",
25 | )
26 |
--------------------------------------------------------------------------------
/tflite2tensorflow/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PINTO0309/tflite2tensorflow/c13504df2f82dc234f1009e34dbab9c8b65c7ce4/tflite2tensorflow/__init__.py
--------------------------------------------------------------------------------
/tflite2tensorflow/mediapipeCustomOp.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # TensorFlow implementation of MediaPipe custom operators
3 | ###############################################################################
4 | #
5 | # MIT License
6 | #
7 | # Copyright (c) 2022 Akiya Research Institute, Inc.
8 | #
9 | # Permission is hereby granted, free of charge, to any person obtaining a copy
10 | # of this software and associated documentation files (the "Software"), to deal
11 | # in the Software without restriction, including without limitation the rights
12 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 | # copies of the Software, and to permit persons to whom the Software is
14 | # furnished to do so, subject to the following conditions:
15 | #
16 | # The above copyright notice and this permission notice shall be included in all
17 | # copies or substantial portions of the Software.
18 | #
19 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 | # SOFTWARE.
26 |
27 | import sys
28 | import tensorflow.compat.v1 as tf
29 | import numpy as np
30 |
31 | class Color:
32 | BLACK = '\033[30m'
33 | RED = '\033[31m'
34 | GREEN = '\033[32m'
35 | YELLOW = '\033[33m'
36 | BLUE = '\033[34m'
37 | MAGENTA = '\033[35m'
38 | CYAN = '\033[36m'
39 | WHITE = '\033[37m'
40 | COLOR_DEFAULT = '\033[39m'
41 | BOLD = '\033[1m'
42 | UNDERLINE = '\033[4m'
43 | INVISIBLE = '\033[08m'
44 | REVERCE = '\033[07m'
45 | BG_BLACK = '\033[40m'
46 | BG_RED = '\033[41m'
47 | BG_GREEN = '\033[42m'
48 | BG_YELLOW = '\033[43m'
49 | BG_BLUE = '\033[44m'
50 | BG_MAGENTA = '\033[45m'
51 | BG_CYAN = '\033[46m'
52 | BG_WHITE = '\033[47m'
53 | BG_DEFAULT = '\033[49m'
54 | RESET = '\033[0m'
55 |
56 | #Affine transform points
57 | def TransformLandmarks(operator, custom_options, tensors, interpreter, landmarks2d=None, mat=None):
58 | if landmarks2d is None:
59 | landmarks2d = tensors[operator['inputs'][0]] #float32 [b,80,2] landmarks 2d
60 | if mat is None:
61 | mat = tensors[operator['inputs'][1]] #float32 [b,4,4] affine transform matrix
62 | b = landmarks2d.shape[0]
63 |
64 | # extract important values
65 | mat_rot = mat[:,0:2,0:2] #[b,2,2]
66 | translation = mat[:,0:2,3:4] #[b,2,1]
67 | translation = tf.reshape(translation, [b,1,2])
68 |
69 | # Find the corresponding point in the input image
70 | landmarks2d_transformed = tf.matmul(landmarks2d, mat_rot, transpose_b=True) #[b,80,2]
71 | landmarks2d_transformed = tf.add(landmarks2d_transformed, translation) #[b,80,2]
72 | return landmarks2d_transformed
73 |
74 | #Affine transform images using bilinear interpolation
75 | def TransformTensorBilinear(operator, custom_options, tensors, interpreter, optimizing_barracuda, optimaization_for_myriad, features=None, mat=None):
76 | if features is None:
77 | features = tensors[operator['inputs'][0]] #float32 [b,48,48,32] feature maps
78 | if mat is None:
79 | mat = tensors[operator['inputs'][1]] #float32 [b,4,4] affine transform matrix
80 | w = custom_options['output_width']
81 | h = custom_options['output_height']
82 | b = features.shape[0]
83 | input_h = features.shape[1]
84 | input_w = features.shape[2]
85 |
86 | # extract important values
87 | mat_rot = mat[:,0:2,0:2] #[b,2,2]
88 | translation = mat[:,0:2,3:4] #[b,2,1]
89 | translation = tf.reshape(translation, [b,1,1,2])
90 |
91 | # construct output image coordinates
92 | # out_coord = [[[ 0,0],[ 0,1],[ 0,2],...,[0,15]],
93 | # [[ 1,0],[ 1,1],[ 1,2],...,[1,15]],
94 | # ...
95 | # [[15,0],[15,1],[15,2],...,[15,15]]]
96 | array_w = np.arange(w) #[0,1,2,...,15]
97 | array_h = np.arange(h) #[0,1,2,...,15]
98 | X, Y = np.meshgrid(array_w, array_h) #[h,w]
99 | out_coord = np.stack([X,Y], axis=2) #[h,w,2]
100 | out_coord = np.expand_dims(out_coord, axis=0).astype(np.float32) #[1,h,w,2]
101 | out_coord = tf.tile(out_coord, [b,1,1,1]) #[b,h,w,2]
102 |
103 | # Find the corresponding point in the input image
104 | in_coord = tf.matmul(out_coord, mat_rot, transpose_b=True) #[b,h,w,2]
105 | in_coord = tf.add(in_coord, translation) #[b,h,w,2]
106 |
107 | # Find the weights for the nearest 4 points
108 | in_coord_floor = tf.floor(in_coord) #[b,h,w,2]
109 | weight_ceil_ = tf.subtract(in_coord, in_coord_floor) #[b,h,w,2]
110 | weight_floor = tf.subtract(tf.ones(2), weight_ceil_) #[b,h,w,2]
111 | weight_ceilX = tf.multiply(weight_ceil_[:,:,:,0:1], weight_floor[:,:,:,1:2]) #[b,h,w]
112 | weight_ceilY = tf.multiply(weight_floor[:,:,:,0:1], weight_ceil_[:,:,:,1:2]) #[b,h,w]
113 | weight_ceil_ = tf.multiply(weight_ceil_[:,:,:,0:1], weight_ceil_[:,:,:,1:2]) #[b,h,w]
114 | weight_floor = tf.multiply(weight_floor[:,:,:,0:1], weight_floor[:,:,:,1:2]) #[b,h,w]
115 |
116 | # Find nearest 4 points.
117 | # Make sure they are in the input image
118 | in_coord_floor = tf.cast(in_coord_floor, dtype=tf.int32) #[b,h,w,XY]
119 | in_coord_floor = tf.maximum(in_coord_floor, tf.zeros(2, dtype=tf.int32)) #[b,h,w,XY]
120 | in_coord_floor = tf.minimum(in_coord_floor, [input_w, input_h]) #[b,h,w,XY]
121 |
122 | in_coord_ceil_ = tf.add(in_coord_floor, tf.ones(2, dtype=tf.int32)) #[b,h,w,XY]
123 | # in_coord_ceil_ = tf.maximum(in_coord_ceil_, tf.zeros(2, dtype=tf.int32)) #[b,h,w,XY]
124 | in_coord_ceil_ = tf.minimum(in_coord_ceil_, [input_w, input_h]) #[b,h,w,XY]
125 |
126 | in_coord_ceilX = tf.concat([in_coord_floor[:,:,:,1:2], in_coord_ceil_[:,:,:,0:1]], axis=3) #[b,h,w,YX] YX for BHWC
127 | in_coord_ceilY = tf.concat([in_coord_ceil_[:,:,:,1:2], in_coord_floor[:,:,:,0:1]], axis=3) #[b,h,w,YX]
128 | in_coord_floor = tf.concat([in_coord_floor[:,:,:,1:2], in_coord_floor[:,:,:,0:1]], axis=3) #[b,h,w,YX]
129 | in_coord_ceil_ = tf.concat([in_coord_ceil_[:,:,:,1:2], in_coord_ceil_[:,:,:,0:1]], axis=3) #[b,h,w,YX]
130 |
131 | def barracuda_gather_nd(params, indices):
132 | if len(indices.shape) == 4 and indices.shape[0] == 1:
133 | indices = indices[0]
134 | elif len(indices.shape) == 3:
135 | pass
136 | else:
137 | print(f'{Color.RED}ERROR:{Color.RESET} gather_nd when optimizing_barracuda is enabled must have 4 dimensions and batch size = 1 or 3 dimensions.')
138 | print(f'{Color.RED}ERROR:{Color.RESET} params.shape: {params.shape}, indices.shape: {indices.shape}')
139 | sys.exit(-1)
140 | if len(params.shape) == 4 and params.shape[0] == 1:
141 | params = params[0]
142 | elif len(params.shape) == 3:
143 | pass
144 | else:
145 | print(f'{Color.RED}ERROR:{Color.RESET} gather_nd when optimizing_barracuda is enabled must have 4 dimensions and batch size = 1 or 3 dimensions.')
146 | print(f'{Color.RED}ERROR:{Color.RESET} params.shape: {params.shape}, indices.shape: {indices.shape}')
147 | sys.exit(-1)
148 | idx_shape = indices.shape
149 | params_shape = params.shape
150 | idx_dims = idx_shape[-1]
151 | gather_shape = params_shape[idx_dims:]
152 | params_flat = tf.reshape(params, tf.concat([[-1], gather_shape], axis=0))
153 | axis_step = tf.math.cumprod(params_shape[:idx_dims], exclusive=True, reverse=True)
154 | mul = tf.math.multiply(indices, axis_step)
155 | indices_flat = tf.reduce_sum(mul, axis=-1)
156 | result_flat = tf.gather(params_flat, indices_flat)
157 | return tf.expand_dims(tf.reshape(result_flat, tf.concat([idx_shape[:-1], gather_shape], axis=0)), axis=0)
158 |
159 | # calc final pixel value
160 | if not optimizing_barracuda and not optimaization_for_myriad:
161 | value_floor = tf.gather_nd(params=features, indices=in_coord_floor, batch_dims=1) #[b,h,w,32]
162 | value_ceilX = tf.gather_nd(params=features, indices=in_coord_ceilX, batch_dims=1) #[b,h,w,32]
163 | value_ceilY = tf.gather_nd(params=features, indices=in_coord_ceilY, batch_dims=1) #[b,h,w,32]
164 | value_ceil_ = tf.gather_nd(params=features, indices=in_coord_ceil_, batch_dims=1) #[b,h,w,32]
165 | else:
166 | value_floor = barracuda_gather_nd(params=features, indices=in_coord_floor) #[b,h,w,32]
167 | value_ceilX = barracuda_gather_nd(params=features, indices=in_coord_ceilX) #[b,h,w,32]
168 | value_ceilY = barracuda_gather_nd(params=features, indices=in_coord_ceilY) #[b,h,w,32]
169 | value_ceil_ = barracuda_gather_nd(params=features, indices=in_coord_ceil_) #[b,h,w,32]
170 |
171 | value_floor_fraction = tf.multiply(value_floor, weight_floor)
172 | value_ceil__fraction = tf.multiply(value_ceil_, weight_ceil_)
173 | value_ceilX_fraction = tf.multiply(value_ceilX, weight_ceilX)
174 | value_ceilY_fraction = tf.multiply(value_ceilY, weight_ceilY)
175 |
176 | #[b,h,w,32]
177 | value = tf.add(
178 | tf.add(value_floor_fraction, value_ceil__fraction),
179 | tf.add(value_ceilX_fraction, value_ceilY_fraction)
180 | )
181 |
182 | return value
183 |
184 | # Left indexとRight indexで指定されたLandmarkを結ぶ線が水平になり、
185 | # Subset indicesで指定されたLandmrakをちょうど含むような範囲をcropするように、元の画像をAffine変換する行列
186 | # の逆行列を求める。なぜ、逆行列かといういうと、後の計算で使うのが逆行列だから。
187 | # Calc inverse of the matrix which represetns the affine transform which crops the area
188 | # which covers all the landmarks specified by "subset indices" and rotates
189 | # so that the landmarks specified by "Left index" and "Right index" are horizontally aligned.
190 | def Landmarks2TransformMatrix(operator, custom_options, tensors, interpreter, landmarks3d=None):
191 | if landmarks3d is None:
192 | landmarks3d = tensors[operator['inputs'][0]] #float32 [b,468,3] landmarks
193 | landmarks2d = landmarks3d[:,:,0:2] # [b,468,2]
194 | b = landmarks3d.shape[0]
195 |
196 | ######################################
197 | # calc rotation
198 | ######################################
199 | rot90_t = tf.constant(
200 | [
201 | [ 0.0, 1.0],
202 | [ -1.0, 0.0]
203 | ]
204 | ) #[2,2], already transposed
205 |
206 | idx_rot_l = custom_options['left_rotation_idx']
207 | idx_rot_r = custom_options['right_rotation_idx']
208 | left_ = landmarks2d[:,idx_rot_l:idx_rot_l+1,:] #[b,1,2]
209 | right = landmarks2d[:,idx_rot_r:idx_rot_r+1,:] #[b,1,2]
210 |
211 | delta = tf.subtract(right, left_) #[b,1,2]
212 | length = tf.norm(delta, axis=2, keepdims=True) #[b,1,1]
213 |
214 | u = tf.divide(delta, length) #[b,1,2] = [[ dx, dy]]
215 | v = tf.matmul(u, rot90_t) #[b,1,2] = [[-dy, dx]]
216 |
217 | # mat_rot_inv = [[ dx, dy],
218 | # [-dy, dx]]
219 | # mat_rot = [[ dx, -dy],
220 | # [ dy, dx]]
221 | mat_rot_inv = tf.concat([u, v], axis=1) #[b,2,2] 切り取り後の画像座標から、切り取り前の画像座標への回転
222 | mat_rot = tf.transpose(mat_rot_inv, perm=[0,2,1]) #[b,2,2] 切り取り前の画像座標から、切り取り後の画像座標への回転
223 |
224 | ######################################
225 | # calc crop size and center
226 | ######################################
227 | subset_idxs = custom_options['subset_idxs'] #[80]
228 | landmarks2d_subset = tf.gather(landmarks2d, indices=subset_idxs, axis=1) #[b,80,2]
229 | landmarks2d_subset_rotated = tf.matmul(landmarks2d_subset, mat_rot) #[b,80,2] 切り取り前の画像上でのLandmark座標を、切り取り後の画像上での向きにあわせて回転
230 | landmarks2d_subset_rotated_min = tf.reduce_min(landmarks2d_subset_rotated, axis=1, keepdims=True) #[b,1,2]
231 | landmarks2d_subset_rotated_max = tf.reduce_max(landmarks2d_subset_rotated, axis=1, keepdims=True) #[b,1,2]
232 |
233 | crop_size = tf.subtract(landmarks2d_subset_rotated_max, landmarks2d_subset_rotated_min) #[b,1,2], max - min
234 | center = tf.multiply(tf.add(landmarks2d_subset_rotated_min, landmarks2d_subset_rotated_max), tf.constant(0.5)) #[b,1,2], 1/2 * (max + min)
235 | center = tf.matmul(center, mat_rot_inv) #[b,1,2] 切り取り後の画像上での向きから、切り取り前の画像上での向きに回転
236 |
237 | ######################################
238 | # calc scale
239 | ######################################
240 | # s = [[scale_x * crop_size.x / output_w],
241 | # [scale_y * crop_size.y / output_h]]]
242 | output_w = custom_options['output_width']
243 | output_h = custom_options['output_height']
244 | scale_x = custom_options['scale_x']
245 | scale_y = custom_options['scale_y']
246 | scaling_const_x = scale_x / output_w
247 | scaling_const_y = scale_y / output_h
248 | scaling_const = tf.constant([[scaling_const_x, scaling_const_y]]) #[1,2]
249 | scale = tf.multiply(scaling_const, crop_size) #[b,1,2]
250 |
251 | ######################################
252 | # calc translation and final mat
253 | ######################################
254 | # mat = [[ sx*dx, -sy*dy, 0, tx],
255 | # [ sx*dy, sy*dx, 0, ty]]
256 | # where
257 | #
258 | # t = center - shift
259 | #
260 | # shift = -0.5 * output_w * sx * u
261 | # + -0.5 * output_h * sy * v
262 | sxu = tf.multiply(u, scale[:,:,0:1]) #[b,1,2]
263 | syv = tf.multiply(v, scale[:,:,1:2]) #[b,1,2]
264 | zeros = tf.zeros([b, 1, 2])
265 |
266 | shift_u = tf.multiply(sxu, output_w * 0.5) #[b,1,2]
267 | shift_v = tf.multiply(syv, output_h * 0.5) #[b,1,2]
268 | shift = tf.add(shift_u, shift_v) #[b,1,2]
269 | translation = tf.subtract(center, shift) #[b,1,2]
270 |
271 | mat = tf.concat([sxu, syv, zeros, translation], axis=1) #[b,4,2]
272 | mat = tf.transpose(mat, perm=[0,2,1]) #[b,2,4]
273 |
274 | # mat = [[ sx*dx, -sy*dy, 0, tx],
275 | # [ sx*dy, sy*dx, 0, ty],
276 | # [ 0, 0, 1, 0],
277 | # [ 0, 0, 0, 1]]
278 | unit_zw = tf.tile(tf.constant([[[0.0, 0.0, 1.0, 0.0],
279 | [0.0, 0.0, 0.0, 1.0]]]), [b,1,1]) #[b,2,4]
280 | mat = tf.concat([mat, unit_zw], axis=1) #[b,4,4]
281 | return mat
282 |
--------------------------------------------------------------------------------
/tflite2tensorflow/view_npy.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 | import argparse
6 |
7 | def main():
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('--npy_file_path', type=str, default='calibration_data_img_sample.npy', help='Specify the path to the .npy file that contains the binaryized image file')
10 | args = parser.parse_args()
11 | npy_file_path = args.npy_file_path
12 | img_array = np.load(npy_file_path)
13 |
14 | for idx in range(img_array.shape[0]):
15 | print(img_array[idx].shape)
16 | plt.imshow(img_array[idx])
17 | plt.show()
18 |
19 | if __name__ == '__main__':
20 | main()
21 |
--------------------------------------------------------------------------------