├── .gitignore
├── LICENSE
├── OP_LIST.md
├── README.md
├── requirements.txt
├── setup.py
├── tests
├── __init__.py
├── export_application
│ ├── __init__.py
│ ├── imagenet_classes.py
│ ├── test_export_srgan.py
│ └── test_export_vgg16.py
├── test_activation.py
├── test_adaptivepool.py
├── test_batchnorm.py
├── test_conv.py
├── test_custom_matmul.py
├── test_deconv.py
├── test_depthwiseconv.py
├── test_embedding.py
├── test_extend.py
├── test_globalpool.py
├── test_groupconv2d.py
├── test_layernorm.py
├── test_linear.py
├── test_mask_conv.py
├── test_merge.py
├── test_noise.py
├── test_onnx.py
├── test_padding.py
├── test_pool.py
├── test_rnn.py
├── test_sampling.py
├── test_scale.py
├── test_shape.py
├── test_stack.py
├── test_subpixelconv.py
└── test_topology.py
└── tlx2onnx
├── __init__.py
├── common
├── __init__.py
├── onnx_tool.py
├── preprocessing.py
└── utils.py
├── main.py
├── op_mapper
├── __init__.py
├── activation.py
├── datatype_mapping.py
├── math
│ ├── __init__.py
│ ├── add.py
│ └── matmul.py
├── nn
│ ├── __init__.py
│ ├── adaptivepool.py
│ ├── conv.py
│ ├── deconv.py
│ ├── dropout.py
│ ├── dwconv.py
│ ├── embedding.py
│ ├── extend.py
│ ├── flatten.py
│ ├── globalpool.py
│ ├── groupconv.py
│ ├── linear.py
│ ├── mask_conv.py
│ ├── merge.py
│ ├── noise.py
│ ├── normalization.py
│ ├── padding.py
│ ├── pool.py
│ ├── resampling.py
│ ├── rnn.py
│ ├── scale.py
│ ├── shape.py
│ ├── stack.py
│ └── subpixelconv.py
├── op_mapper.py
└── tensor.py
└── topology.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.egg-info/
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Jupyter Notebook
11 | .ipynb_checkpoints
12 |
13 | # Environments
14 | .env
15 | .venv
16 | env/
17 |
18 | # IDE Specific directories
19 | .DS_Store
20 | .idea
21 | .vscode/
22 |
23 | # Data Files and ByteCode files
24 | *.gz
25 | *.npz
26 |
27 | # onnx file
28 | *.onnx
29 |
30 | # packaging
31 | build/
32 | dist/
33 | eggs/
34 | .eggs/
35 | sdist/
36 | var/
37 | wheels/
38 | *.egg-info/
39 | model/
40 | data/
--------------------------------------------------------------------------------
/OP_LIST.md:
--------------------------------------------------------------------------------
1 | ## TLX2ONNX Support Status
2 |
3 | | TensorLayerX Layers | ONNX Opset Versions | support_status |
4 | | --------------- | -------------------- | -------------- |
5 | | Conv1d | 1~13 | Supported|
6 | | Conv2d | 1~13 | Supported|
7 | | Conv3d | 1~13 | Supported|
8 | | ConvTranspose1d | 1~12 | Supported|
9 | | ConvTranspose2d | 1~12 | Supported|
10 | | ConvTranspose3d | 1~12 | Supported|
11 | | Linear | 1~12 | Supported|
12 | | MaxPool1d | 1~12 | Supported|
13 | | AvgPool1d | 1~12 | Supported|
14 | | MaxPool2d | 1~12 | Supported|
15 | | AvgPool2d | 1~12 | Supported|
16 | | MaxPool3d | 1~12 | Supported|
17 | | AvgPool3d | 1~12 | Supported|
18 | | GlobalMaxPool1d | 1~12 | Supported|
19 | | GlobalAvgPool1d | 1~12 | Supported|
20 | | GlobalMaxPool2d | 1~12 | Supported|
21 | | GlobalAvgPool2d | 1~12 | Supported|
22 | | GlobalMaxPool3d | 1~12 | Supported|
23 | | GlobalAvgPool3d | 1~12 | Supported|
24 | |AdaptiveAvgPool1d| 1~12 | Supported|
25 | |AdaptiveAvgPool2d| 1~12 | Supported|
26 | |AdaptiveAvgPool3d| 1~12 | Supported|
27 | |AdaptiveMaxPool1d| 1~12 | Supported|
28 | |AdaptiveMaxPool2d| 1~12 | Supported|
29 | |AdaptiveMaxPool3d| 1~12 | Supported|
30 | | Onehot | 9~12 | Supported|
31 | | Embedding | 1~12 | Supported|
32 | | Flatten | 1~12 | Supported|
33 | | Reshape | 1~12 | Supported|
34 | | Transpose | 1~12 | Supported|
35 | | Dropout | 1~12 | Supported|
36 | | BatchNorm | 1~12 | Supported|
37 | | BatchNorm1d | 1~12 | Supported|
38 | | BatchNorm2d | 1~12 | Supported|
39 | | BatchNorm3d | 1~12 | Supported|
40 | | PRelu | 1~12 | Supported|
41 | | ReLU |1~12|Supported|
42 | | Tanh |1~12|Supported|
43 | | Sigmoid |1~12|Supported|
44 | | LeakyRelu |1~12|Supported|
45 | | Softplus |1~12|Supported|
46 | | ReLU6 | 1~12 | Supported|
47 | | LeakyReLU6 |1~12|Supported|
48 | | Mish |1~12|Supported|
49 | | Softmax |1~12|Supported|
50 | |Swish| 1~12|Supported|
51 | |Elu| 1~12|Supported|
52 | |ExpandDims| 1~12|Supported|
53 | |Tile| 1~12|Supported|
54 | |UpSampling2d| 1~12|Supported|
55 | |DownSampling2d| 1~12|Supported|
56 | |Concat| 1~12 | Supported|
57 | |Elementwise| 1~12 | Supported|
58 | |GaussianNoise| 1~12 | Supported|
59 | |PadLayer| 1~12 | Supported|
60 | |ZeroPad1d| 1~12 | Supported|
61 | |ZeroPad2d| 1~12 | Supported|
62 | |ZeroPad3d| 1~12 | Supported|
63 | |Stack| 1~12 | Supported|
64 | |UnStack| 1~12 | Supported|
65 | |Scale| 1~12 | Supported|
66 | |RNN|1~12 | Supported|
67 | |RNNCell|1~12 | Supported|
68 | |LSTM|1~12 | Supported|
69 | |LSTMCell|1~12 | Supported|
70 | |GRU|1~12 | Supported|
71 | |GRUCell|1~12 | Supported|
72 | |LayerNorm| 17 | Supported|
73 | |GroupConv2d
74 | |SeparableConv1d
75 | |SeparableConv2d
76 | |SubpixelConv2d
77 | | Matmul | 1~12 | Supported|
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TLX2ONNX
2 | ONNX Model Exporter for TensorLayerX. It's updated on Both [OpenI](https://git.openi.org.cn/OpenI/TLX2ONNX) and [Github](https://github.com/tensorlayer/TLX2ONNX/). You can get a [free GPU](https://git.openi.org.cn/OpenI/TLX2ONNX/debugjob?debugListType=all) on OpenI to use this project.
3 |
4 | ## Introduction
5 |
6 | TLX2ONNX enables users to convert models from TensorLayerX to ONNX.
7 |
8 | - Supported operators. TLX2ONNX can stably export models to ONNX Opset 9~11, and partialy support lower version opset. More details please refer to [Operator list](OP_LIST.md).
9 | - Supported Layers. You can find officially verified Layers by TLX2ONNX/tests in [TLX2ONNX/test](https://github.com/tensorlayer/TLX2ONNX/tree/main/tests).
10 |
11 | ## Installation
12 |
13 | #### Via Pip
14 | ```bash
15 | pip install tlx2onnx
16 | ```
17 |
18 | #### From Source
19 | ```bash
20 | git clone https://github.com/tensorlayer/TLX2ONNX.git
21 | cd TLX2ONNX
22 | python setup.py install
23 | ```
24 |
25 | ## Usage
26 | TLX2ONNX can convert models built using TensorLayerX Module Subclass and Layers, and the Layers support list can be found in [Operator list](OP_LIST.md).
27 |
28 | The following is an example of converting a multi-layer perceptron. You can get the code from [here](https://github.com/tensorlayer/TLX2ONNX/tree/main/tests/test_merge.py).
29 | ```python
30 | import os
31 | os.environ["TL_BACKEND"] = 'tensorflow'
32 | import tensorlayerx as tlx
33 | from tensorlayerx.nn import Module
34 | from tensorlayerx.nn import Linear, Concat, Elementwise
35 | from tlx2onnx.main import export
36 | import onnxruntime as rt
37 | import numpy as np
38 |
39 | class CustomModel(Module):
40 | def __init__(self):
41 | super(CustomModel, self).__init__(name="custom")
42 | self.linear1 = Linear(in_features=20, out_features=10, act=tlx.ReLU, name='relu1_1')
43 | self.linear2 = Linear(in_features=20, out_features=10, act=tlx.ReLU, name='relu2_1')
44 | self.concat = Concat(concat_dim=1, name='concat_layer')
45 |
46 | def forward(self, inputs):
47 | d1 = self.linear1(inputs)
48 | d2 = self.linear2(inputs)
49 | outputs = self.concat([d1, d2])
50 | return outputs
51 |
52 | net = CustomModel()
53 | input = tlx.nn.Input(shape=(3, 20), init=tlx.initializers.RandomNormal())
54 | net.set_eval()
55 | output = net(input)
56 | print("tlx out", output)
57 | onnx_model = export(net, input_spec=input, path='concat.onnx')
58 |
59 | # Infer Model
60 | sess = rt.InferenceSession('concat.onnx')
61 | input_name = sess.get_inputs()[0].name
62 | output_name = sess.get_outputs()[0].name
63 | input_data = np.array(input, dtype=np.float32)
64 | result = sess.run([output_name], {input_name: input_data})
65 | print('onnx out', result)
66 | ```
67 | The converted onnx file can be viewed via [Netron](https://github.com/lutzroeder/netron).
68 |
69 |

70 |
71 |
72 | The converted results have almost no loss of accuracy.
73 | And the graph show the input and output sizes of each layer, which is very helpful for checking the model.
74 |
75 |
76 | # Citation
77 |
78 | If you find TensorLayerX or TLX2ONNX useful for your project, please cite the following papers:
79 |
80 | ```
81 | @article{tensorlayer2017,
82 | author = {Dong, Hao and Supratak, Akara and Mai, Luo and Liu, Fangde and Oehmichen, Axel and Yu, Simiao and Guo, Yike},
83 | journal = {ACM Multimedia},
84 | title = {{TensorLayer: A Versatile Library for Efficient Deep Learning Development}},
85 | url = {http://tensorlayer.org},
86 | year = {2017}
87 | }
88 |
89 | @inproceedings{tensorlayer2021,
90 | title={TensorLayer 3.0: A Deep Learning Library Compatible With Multiple Backends},
91 | author={Lai, Cheng and Han, Jiarong and Dong, Hao},
92 | booktitle={2021 IEEE International Conference on Multimedia \& Expo Workshops (ICMEW)},
93 | pages={1--3},
94 | year={2021},
95 | organization={IEEE}
96 | }
97 | ```
98 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | onnx<=1.11.0
2 | tensorlayerx>=0.5.6
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import absolute_import
5 | from setuptools import setup, find_packages
6 | import os
7 |
8 | MAJOR = 0
9 | MINOR = 0
10 | PATCH = 2
11 | PRE_RELEASE = ''
12 | # Use the following formatting: (major, minor, patch, prerelease)
13 | VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
14 |
15 | long_description = "tlx2onnx is a toolkit for converting trained model of TensorLayerX to ONNX. \n\n"
16 | long_description += "Usage: export(tlx_model, input_spec=input, path='model.onnx') \n"
17 | long_description += "GitHub: https://github.com/tensorlayer/TLX2ONNX \n"
18 | long_description += "Email: tensorlayer@gmail.com"
19 |
20 | def req_file(filename, folder=''):
21 | with open(os.path.join(folder, filename)) as f:
22 | content = f.readlines()
23 | return [x.strip() for x in content]
24 |
25 | setup(
26 | name="tlx2onnx",
27 | version='.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]),
28 | author="TLX2ONNX Contributors",
29 | author_email="tensorlayer@gmail.com",
30 | description="a toolkit for converting trained model of TensorLayerX to ONNX.",
31 | long_description=long_description,
32 | long_description_content_type="text/plain",
33 | url="https://github.com/tensorlayer/TLX2ONNX",
34 | packages=find_packages(),
35 | install_requires=req_file("requirements.txt"),
36 | classifiers=[
37 | # How mature is this project? Common values are
38 | # 1 - Planning 2 - Pre-Alpha 3 - Alpha 4 - Beta 5 - Production/Stable 6 - Mature 7 - Inactive
39 | 'Development Status :: 3 - Alpha',
40 |
41 | # Specify the Python versions you support here.
42 | "Programming Language :: Python :: 3",
43 | "Programming Language :: Python :: 3.5",
44 | "Programming Language :: Python :: 3.6",
45 | "Programming Language :: Python :: 3.7",
46 |
47 | # Indicate who your project is intended for
48 | "Intended Audience :: Developers",
49 | "Intended Audience :: Science/Research",
50 | "Intended Audience :: Information Technology",
51 |
52 | # Pick your license as you wish (should match "license" above)
53 | "License :: OSI Approved :: Apache Software License",
54 |
55 | # Indicate what your project relates to
56 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
57 |
58 | # Additionnal Settings
59 | "Operating System :: POSIX",
60 | "Operating System :: Microsoft :: Windows",
61 | ],
62 | license='Apache 2.0',
63 | )
64 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tensorlayer/TLX2ONNX/d07911698d173a987cb0ae929b910ff3eedc5c3d/tests/__init__.py
--------------------------------------------------------------------------------
/tests/export_application/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tensorlayer/TLX2ONNX/d07911698d173a987cb0ae929b910ff3eedc5c3d/tests/export_application/__init__.py
--------------------------------------------------------------------------------
/tests/export_application/test_export_vgg16.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import tensorlayerx as tlx
5 | from tlx2onnx import export
6 | import os
7 | import numpy as np
8 | from tensorlayerx import logging
9 | from tensorlayerx.files import assign_weights, maybe_download_and_extract
10 | from tensorlayerx.nn import (BatchNorm, Conv2d, Linear, Flatten, Sequential, MaxPool2d)
11 | from tensorlayerx.nn import Module
12 | import onnxruntime as rt
13 | from tests.export_application.imagenet_classes import class_names
14 | import tensorflow as tf
15 | import onnx
16 | from onnx import shape_inference
17 |
18 | __all__ = [
19 | 'VGG',
20 | 'vgg16'
21 | ]
22 |
23 | layer_names = [
24 | ['conv1_1', 'conv1_2'], 'pool1', ['conv2_1', 'conv2_2'], 'pool2',
25 | ['conv3_1', 'conv3_2', 'conv3_3', 'conv3_4'], 'pool3', ['conv4_1', 'conv4_2', 'conv4_3', 'conv4_4'], 'pool4',
26 | ['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], 'pool5', 'flatten', 'fc1_relu', 'fc2_relu', 'outputs'
27 | ]
28 |
29 | cfg = {
30 | 'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
31 | 'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
32 | 'D':
33 | [
34 | [64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F',
35 | 'fc1', 'fc2', 'O'
36 | ],
37 | 'E':
38 | [
39 | [64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512],
40 | 'M', 'F', 'fc1', 'fc2', 'O'
41 | ],
42 | }
43 |
44 | mapped_cfg = {
45 | 'vgg11': 'A',
46 | 'vgg11_bn': 'A',
47 | 'vgg13': 'B',
48 | 'vgg13_bn': 'B',
49 | 'vgg16': 'D',
50 | 'vgg16_bn': 'D',
51 | 'vgg19': 'E',
52 | 'vgg19_bn': 'E'
53 | }
54 |
55 | model_urls = {
56 | 'vgg16': 'http://www.cs.toronto.edu/~frossard/vgg16/',
57 | }
58 |
59 | model_saved_name = {'vgg16': 'vgg16_weights.npz', 'vgg19': 'vgg19.npy'}
60 |
61 |
62 | class VGG(Module):
63 |
64 | def __init__(self, layer_type, batch_norm=False, end_with='outputs', name=None):
65 | super(VGG, self).__init__(name=name)
66 | self.end_with = end_with
67 |
68 | config = cfg[mapped_cfg[layer_type]]
69 | self.make_layer = make_layers(config, batch_norm, end_with)
70 |
71 | def forward(self, inputs):
72 | """
73 | inputs : tensor
74 | Shape [None, 224, 224, 3], value range [0, 1].
75 | """
76 |
77 | # inputs = inputs * 255 - tlx.convert_to_tensor(np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]))
78 | out = self.make_layer(inputs)
79 |
80 |
81 | return out
82 |
83 |
84 | def make_layers(config, batch_norm=False, end_with='outputs'):
85 | layer_list = []
86 | is_end = False
87 | for layer_group_idx, layer_group in enumerate(config):
88 | if isinstance(layer_group, list):
89 | for idx, layer in enumerate(layer_group):
90 | layer_name = layer_names[layer_group_idx][idx]
91 | n_filter = layer
92 | if idx == 0:
93 | if layer_group_idx > 0:
94 | in_channels = config[layer_group_idx - 2][-1]
95 | else:
96 | in_channels = 3
97 | else:
98 | in_channels = layer_group[idx - 1]
99 | layer_list.append(
100 | Conv2d(
101 | out_channels=n_filter, kernel_size=(3, 3), stride=(1, 1), act=tlx.ReLU, padding='SAME',
102 | in_channels=in_channels, name=layer_name
103 | )
104 | )
105 | if batch_norm:
106 | layer_list.append(BatchNorm(num_features=n_filter))
107 | if layer_name == end_with:
108 | is_end = True
109 | break
110 | else:
111 | layer_name = layer_names[layer_group_idx]
112 | if layer_group == 'M':
113 | layer_list.append(MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding='SAME', name=layer_name))
114 | elif layer_group == 'O':
115 | layer_list.append(Linear(out_features=1000, in_features=4096, name=layer_name))
116 | elif layer_group == 'F':
117 | layer_list.append(Flatten(name='flatten'))
118 | elif layer_group == 'fc1':
119 | layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=512 * 7 * 7, name=layer_name))
120 | elif layer_group == 'fc2':
121 | layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=4096, name=layer_name))
122 | if layer_name == end_with:
123 | is_end = True
124 | if is_end:
125 | break
126 | return Sequential(layer_list)
127 |
128 |
129 | def restore_model(model, layer_type):
130 | logging.info("Restore pre-trained weights")
131 | # download weights
132 | maybe_download_and_extract(model_saved_name[layer_type], 'model', model_urls[layer_type])
133 | weights = []
134 | npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True)
135 | # get weight list
136 | for val in sorted(npz.items()):
137 | logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0]))
138 | weights.append(val[1])
139 | if len(model.all_weights) == len(weights):
140 | break
141 | assign_weights(weights, model)
142 | del weights
143 |
144 |
145 | def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None):
146 | if mode == 'dynamic':
147 | model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name)
148 | if pretrained:
149 | restore_model(model, layer_type='vgg16')
150 | return model
151 |
152 |
153 |
154 | input = tlx.nn.Input(shape=(1, 224, 224, 3))
155 | net = vgg16(pretrained=True)
156 | net.set_eval()
157 | onnx_model = export(net, input_spec=input, path='vgg.onnx')
158 |
159 | # Infer Model
160 | sess = rt.InferenceSession('vgg.onnx')
161 |
162 | input_name = sess.get_inputs()[0].name
163 | output_name = sess.get_outputs()[0].name
164 |
165 | # Preprocess input data
166 | img = tlx.vision.load_image('data/tiger.jpeg')
167 | img = tlx.vision.transforms.Resize((224, 224))(img).astype(np.float32) / 255
168 | inputs = img * 255 - tlx.convert_to_tensor(np.array([123.68, 116.779, 103.939], dtype=np.float32))
169 | input_data = inputs[np.newaxis, :, :, :]
170 | input_data = np.array(input_data, dtype=np.float32)
171 | # Infer output
172 | result = sess.run([output_name], {input_name: input_data})
173 |
174 | print(np.shape(result))
175 | result = np.squeeze(result, axis = (0, 1))
176 | probs = tf.nn.softmax(result).numpy()
177 | print(probs)
178 | preds = (np.argsort(probs)[::-1])[0:5]
179 | for p in preds:
180 | print(class_names[p], probs[p])
181 |
182 | # # Debug Infer
183 | # def get_tensor_shape(tensor):
184 | # dims = tensor.type.tensor_type.shape.dim
185 | # n = len(dims)
186 | # return [dims[i].dim_value for i in range(n)]
187 | #
188 | #
189 | # def runtime_infer(onnx_model):
190 | # graph = onnx_model.graph
191 | # graph.output.insert(0, graph.input[0])
192 | # for i, tensor in enumerate(graph.value_info):
193 | # graph.output.insert(i + 1, tensor)
194 | # model_file = "temp.onnx"
195 | # onnx.save(onnx_model, model_file)
196 | #
197 | # sess = rt.InferenceSession(model_file)
198 | # input_name = sess.get_inputs()[0].name
199 | # # preprocess input
200 | # img = tlx.vision.load_image('data/tiger.jpeg')
201 | # img = tlx.vision.transforms.Resize((224, 224))(img).astype(np.float32) / 255
202 | # inputs = img * 255 - tlx.convert_to_tensor(np.array([123.68, 116.779, 103.939], dtype=np.float32))
203 | # input_data = inputs[np.newaxis, :, :, :]
204 | # input_data = np.array(input_data, dtype=np.float32)
205 | #
206 | # outputs = {}
207 | # for out in sess.get_outputs():
208 | # tensor = sess.run([out.name], {input_name: input_data})
209 | # outputs[str(out.name)] = np.array(tensor[0]).shape
210 | # if out.name == '_inputlayer_1_node_00_t_t':
211 | # print(out.name, tensor, np.shape(tensor))
212 | # # os.remove(model_file)
213 | # return outputs
214 | #
215 | #
216 | # def infer_shapes(model_file, running_mode=False):
217 | # onnx_model = onnx.load(model_file)
218 | # onnx.checker.check_model(onnx_model)
219 | # inferred_onnx_model = shape_inference.infer_shapes(onnx_model)
220 | #
221 | # save_path = model_file[:-5] + "_new.onnx"
222 | # onnx.save(inferred_onnx_model, save_path)
223 | # print("Model is saved in:", save_path)
224 | #
225 | # outputs = {}
226 | # if running_mode:
227 | # outputs = runtime_infer(inferred_onnx_model)
228 | # else:
229 | # graph = inferred_onnx_model.graph
230 | # # only 1 input tensor
231 | # tensor = graph.input[0]
232 | # outputs[str(tensor.name)] = get_tensor_shape(tensor)
233 | # # process tensor
234 | # for tensor in graph.value_info:
235 | # outputs[str(tensor.name)] = get_tensor_shape(tensor)
236 | # # output tensor
237 | # for tensor in graph.output:
238 | # outputs[str(tensor.name)] = get_tensor_shape(tensor)
239 | # return outputs
240 | #
241 | # if __name__ == '__main__':
242 | # model_1 = "vgg.onnx"
243 | # outputs = infer_shapes(model_1, True)
244 | # print(outputs)
245 |
246 |
--------------------------------------------------------------------------------
/tests/test_activation.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import ReLU, LeakyReLU, ELU, Tanh, Softmax, Softplus, Sigmoid, ReLU6, \
9 | PRelu, Mish, Swish, LeakyReLU6
10 | from tlx2onnx.main import export
11 | import onnxruntime as rt
12 | import numpy as np
13 |
14 |
15 | class MLP(Module):
16 | def __init__(self):
17 | super(MLP, self).__init__()
18 | self.relu = ReLU()
19 | self.leakyrelu = LeakyReLU()
20 | self.elu = ELU()
21 | self.tanh = Tanh()
22 | self.softmax = Softmax()
23 | self.softplus = Softplus()
24 | self.sigmoid = Sigmoid()
25 | self.relu6 = ReLU6()
26 | self.prelu = PRelu()
27 | self.mish = Mish()
28 | self.swish = Swish()
29 | self.lrelu6 = LeakyReLU6()
30 |
31 | def forward(self, x):
32 | z = self.relu(x)
33 | z = self.leakyrelu(z)
34 | z = self.elu(z)
35 | z = self.tanh(z)
36 | z = self.softmax(z)
37 | z = self.softplus(z)
38 | z = self.sigmoid(z)
39 | z = self.relu6(z)
40 | z = self.prelu(z)
41 | z = self.mish(z)
42 | z = self.swish(z)
43 | z = self.lrelu6(z)
44 | return z
45 |
46 | net = MLP()
47 | net.set_eval()
48 | input = tlx.nn.Input(shape=(4, 5, 5, 3))
49 | onnx_model = export(net, input_spec=input, path='activation.onnx')
50 | print("tlx out", net(input))
51 |
52 | # Infer Model
53 | sess = rt.InferenceSession('activation.onnx')
54 |
55 | input_name = sess.get_inputs()[0].name
56 | output_name = sess.get_outputs()[0].name
57 |
58 | input_data = tlx.nn.Input(shape=(4, 5, 5, 3))
59 | input_data = np.array(input_data, dtype=np.float32)
60 |
61 | result = sess.run([output_name], {input_name: input_data})
62 | print("onnx out", result)
--------------------------------------------------------------------------------
/tests/test_adaptivepool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | # os.environ["TL_BACKEND"] = 'paddle'
7 | # os.environ["TL_BACKEND"] = 'torch'
8 | # os.environ["TL_BACKEND"] = 'mindspore'
9 | import tensorlayerx as tlx
10 | from tensorlayerx.nn import Module
11 | from tensorlayerx.nn import (AdaptiveAvgPool1d,AdaptiveAvgPool2d,AdaptiveAvgPool3d)
12 | from tensorlayerx.nn import (AdaptiveMaxPool1d,AdaptiveMaxPool2d,AdaptiveMaxPool3d)
13 | from tlx2onnx.main import export
14 | import onnxruntime as rt
15 | import numpy as np
16 |
17 |
18 | ############################################ test 2d ###########################################################
19 | class Adaptiveavgpool2d(Module):
20 |
21 | def __init__(self):
22 | super(Adaptiveavgpool2d, self).__init__()
23 | self.pool1 = AdaptiveAvgPool2d(output_size=16, data_format='channels_last')
24 |
25 | def forward(self, x):
26 | z = self.pool1(x)
27 | return z
28 |
29 | net = Adaptiveavgpool2d()
30 | input = tlx.nn.Input(shape=(1, 32, 32, 3))
31 | onnx_model = export(net, input_spec=input, path='Adaptiveavgpool2d.onnx')
32 |
33 | # Infer Model
34 | sess = rt.InferenceSession('Adaptiveavgpool2d.onnx')
35 |
36 | input_name = sess.get_inputs()[0].name
37 | output_name = sess.get_outputs()[0].name
38 |
39 | input_data = tlx.nn.Input(shape=(1, 32, 32, 3))
40 | input_data = np.array(input_data, dtype=np.float32)
41 |
42 | result = sess.run([output_name], {input_name: input_data})
43 | print('Adaptiveavgpool2d result', result[0].shape)
44 |
45 |
46 | class Adaptivemaxpool2d(Module):
47 |
48 | def __init__(self):
49 | super(Adaptivemaxpool2d, self).__init__()
50 | self.pool1 = AdaptiveMaxPool2d(output_size=16, data_format='channels_last')
51 |
52 | def forward(self, x):
53 | z = self.pool1(x)
54 | return z
55 |
56 |
57 | net = Adaptivemaxpool2d()
58 | input = tlx.nn.Input(shape=(1, 32, 32, 3))
59 | onnx_model = export(net, input_spec=input, path='Adaptivemaxpool2d.onnx')
60 |
61 | # Infer Model
62 | sess = rt.InferenceSession('Adaptivemaxpool2d.onnx')
63 |
64 | input_name = sess.get_inputs()[0].name
65 | output_name = sess.get_outputs()[0].name
66 |
67 | input_data = tlx.nn.Input(shape=(1, 32, 32, 3))
68 | input_data = np.array(input_data, dtype=np.float32)
69 |
70 | result = sess.run([output_name], {input_name: input_data})
71 | print('Adaptivemaxpool2d result', result[0].shape)
72 |
73 |
74 |
75 | ############################################ test 1d ###########################################################
76 | class Adaptiveavgpool1d(Module):
77 |
78 | def __init__(self):
79 | super(Adaptiveavgpool1d, self).__init__()
80 | self.pool1 = AdaptiveAvgPool1d(output_size=16, data_format='channels_last')
81 |
82 | def forward(self, x):
83 | z = self.pool1(x)
84 | return z
85 |
86 | net = Adaptiveavgpool1d()
87 | input = tlx.nn.Input(shape=(1, 32, 3))
88 | onnx_model_1d = export(net, input_spec=input, path='Adaptiveavgpool1d.onnx')
89 |
90 | # Infer Model
91 | sess = rt.InferenceSession('Adaptiveavgpool1d.onnx')
92 |
93 | input_name = sess.get_inputs()[0].name
94 | output_name = sess.get_outputs()[0].name
95 |
96 | input_data = tlx.nn.Input(shape=(1, 32, 3))
97 | input_data = np.array(input_data, dtype=np.float32)
98 |
99 | result = sess.run([output_name], {input_name: input_data})
100 | print('Adaptiveavgpool1d result', result[0].shape)
101 |
102 |
103 |
104 | class Adaptivemaxpool1d(Module):
105 |
106 | def __init__(self):
107 | super(Adaptivemaxpool1d, self).__init__()
108 | self.pool1 = AdaptiveMaxPool1d(output_size=16, data_format='channels_last')
109 |
110 | def forward(self, x):
111 | z = self.pool1(x)
112 | return z
113 |
114 | net = Adaptiveavgpool1d()
115 | input = tlx.nn.Input(shape=(1, 32, 3))
116 | onnx_model_1d = export(net, input_spec=input, path='Adaptivemaxpool1d.onnx')
117 |
118 | # Infer Model
119 | sess = rt.InferenceSession('Adaptivemaxpool1d.onnx')
120 |
121 | input_name = sess.get_inputs()[0].name
122 | output_name = sess.get_outputs()[0].name
123 |
124 | input_data = tlx.nn.Input(shape=(1, 32, 3))
125 | input_data = np.array(input_data, dtype=np.float32)
126 |
127 | result = sess.run([output_name], {input_name: input_data})
128 | print('Adaptivemaxpool1d result', result[0].shape)
129 |
130 |
131 | ############################################ test 3d ###########################################################
132 | class Adaptiveavgpool3d(Module):
133 |
134 | def __init__(self):
135 | super(Adaptiveavgpool3d, self).__init__()
136 | self.pool1 = AdaptiveAvgPool3d(output_size=16, data_format='channels_last')
137 |
138 | def forward(self, x):
139 | z = self.pool1(x)
140 | return z
141 |
142 | net = Adaptiveavgpool3d()
143 | input = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
144 | onnx_model_1d = export(net, input_spec=input, path='Adaptiveavgpool3d.onnx')
145 |
146 | # Infer Model
147 | sess = rt.InferenceSession('Adaptiveavgpool3d.onnx')
148 |
149 | input_name = sess.get_inputs()[0].name
150 | output_name = sess.get_outputs()[0].name
151 |
152 | input_data = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
153 | input_data = np.array(input_data, dtype=np.float32)
154 |
155 | result = sess.run([output_name], {input_name: input_data})
156 | print('Adaptiveavgpool3d result', result[0].shape)
157 |
158 |
159 |
160 | class Adaptivemaxpool3d(Module):
161 |
162 | def __init__(self):
163 | super(Adaptivemaxpool3d, self).__init__()
164 | self.pool1 = AdaptiveMaxPool3d(output_size=16, data_format='channels_last')
165 |
166 | def forward(self, x):
167 | z = self.pool1(x)
168 | return z
169 |
170 | net = Adaptiveavgpool3d()
171 | input = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
172 | onnx_model_1d = export(net, input_spec=input, path='Adaptivemaxpool3d.onnx')
173 |
174 | # Infer Model
175 | sess = rt.InferenceSession('Adaptivemaxpool3d.onnx')
176 |
177 | input_name = sess.get_inputs()[0].name
178 | output_name = sess.get_outputs()[0].name
179 |
180 | input_data = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
181 | input_data = np.array(input_data, dtype=np.float32)
182 |
183 | result = sess.run([output_name], {input_name: input_data})
184 | print('Adaptivemaxpool3d result', result[0].shape)
--------------------------------------------------------------------------------
/tests/test_batchnorm.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | # os.environ["TL_BACKEND"] = 'tensorflow'
6 | os.environ['TL_BACKEND'] = 'torch'
7 | import tensorlayerx as tlx
8 | from tensorlayerx.nn import Module
9 | from tensorlayerx.nn import Conv2d, BatchNorm2d
10 | from tlx2onnx.main import export
11 | import onnxruntime as rt
12 | import numpy as np
13 |
14 |
15 | class MLP(Module):
16 | def __init__(self):
17 | super(MLP, self).__init__()
18 | # weights init
19 | self.conv1 = Conv2d(out_channels=16, kernel_size=3, stride=1, padding=(2, 2), in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
20 | self.bn = BatchNorm2d(data_format='channels_last', act=tlx.nn.ReLU)
21 |
22 |
23 | def forward(self, x):
24 | x = self.conv1(x)
25 | x = self.bn(x)
26 | return x
27 |
28 | net = MLP()
29 | net.set_eval()
30 | input = tlx.nn.Input(shape=(4, 5, 5, 3))
31 | onnx_model = export(net, input_spec=input, path='batchnorm.onnx')
32 | print("tlx out", net(input))
33 |
34 | # Infer Model
35 | sess = rt.InferenceSession('batchnorm.onnx')
36 |
37 | input_name = sess.get_inputs()[0].name
38 | output_name = sess.get_outputs()[0].name
39 |
40 | input_data = tlx.nn.Input(shape=(4, 5, 5, 3))
41 | input_data = np.array(input_data, dtype=np.float32)
42 |
43 | result = sess.run([output_name], {input_name: input_data})
44 | print("onnx out", result)
45 |
--------------------------------------------------------------------------------
/tests/test_conv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | # os.environ["TL_BACKEND"] = 'paddle'
7 | # os.environ["TL_BACKEND"] = 'torch'
8 | # os.environ["TL_BACKEND"] = 'mindspore'
9 | import tensorlayerx as tlx
10 | from tensorlayerx.nn import Module
11 | from tensorlayerx.nn import (Conv2d, Conv1d, Conv3d)
12 | from tlx2onnx.main import export
13 | import onnxruntime as rt
14 | import numpy as np
15 |
16 |
17 | ############################################ test 2d ###########################################################
18 | class CNN(Module):
19 |
20 | def __init__(self):
21 | super(CNN, self).__init__()
22 | # weights init
23 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
24 | b_init2 = tlx.nn.initializers.constant(value=0.1)
25 | self.conv1 = Conv2d(64, (5, 5), (1, 1), padding=(2,2), W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
26 | def forward(self, x):
27 | z = self.conv1(x)
28 | return z
29 |
30 | net = CNN()
31 | input = tlx.nn.Input(shape=(1, 32, 32, 3))
32 | net.set_eval()
33 | output = net(input)
34 | print("conv2d tlx output", output)
35 | onnx_model = export(net, input_spec=input, path='conv2d_model.onnx')
36 |
37 | # Infer Model
38 | sess = rt.InferenceSession('conv2d_model.onnx')
39 |
40 | input_name = sess.get_inputs()[0].name
41 | output_name = sess.get_outputs()[0].name
42 |
43 | input_data = tlx.nn.Input(shape=(1, 32, 32, 3))
44 | input_data = np.array(input_data, dtype=np.float32)
45 |
46 | result = sess.run([output_name], {input_name: input_data})
47 | print("conv2d onnx output", result)
48 |
49 |
50 | ############################################ test 1d ###########################################################
51 | class CNN1d(Module):
52 |
53 | def __init__(self):
54 | super(CNN1d, self).__init__()
55 | # weights init
56 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
57 | b_init2 = tlx.nn.initializers.constant(value=0.1)
58 | self.conv1 = Conv1d(64, 5, 1, padding=2, W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
59 | def forward(self, x):
60 | z = self.conv1(x)
61 | return z
62 |
63 | net = CNN1d()
64 | input = tlx.nn.Input(shape=(1, 32, 3))
65 | net.set_eval()
66 | output = net(input)
67 | print("conv1d tlx output", output)
68 | onnx_model = export(net, input_spec=input, path='conv1d_model.onnx')
69 |
70 | # Infer Model
71 | sess = rt.InferenceSession('conv1d_model.onnx')
72 |
73 | input_name = sess.get_inputs()[0].name
74 | output_name = sess.get_outputs()[0].name
75 |
76 | input_data = tlx.nn.Input(shape=(1, 32, 3))
77 | input_data = np.array(input_data, dtype=np.float32)
78 |
79 | result = sess.run([output_name], {input_name: input_data})
80 | print("conv1d onnx output", result)
81 |
82 |
83 | ############################################ test 3d ###########################################################
84 | class CNN3d(Module):
85 |
86 | def __init__(self):
87 | super(CNN3d, self).__init__()
88 | # weights init
89 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
90 | b_init2 = tlx.nn.initializers.constant(value=0.1)
91 | self.conv1 = Conv3d(64, 5, 1, padding=2, W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
92 | def forward(self, x):
93 | z = self.conv1(x)
94 | return z
95 |
96 | net = CNN3d()
97 | input = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
98 | net.set_eval()
99 | output = net(input)
100 | print("conv3d tlx output", output)
101 | onnx_model = export(net, input_spec=input, path='conv3d_model.onnx')
102 |
103 | # Infer Model
104 | sess = rt.InferenceSession('conv3d_model.onnx')
105 |
106 | input_name = sess.get_inputs()[0].name
107 | output_name = sess.get_outputs()[0].name
108 |
109 | input_data = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
110 | input_data = np.array(input_data, dtype=np.float32)
111 |
112 | result = sess.run([output_name], {input_name: input_data})
113 | print("conv3d onnx output", result)
--------------------------------------------------------------------------------
/tests/test_custom_matmul.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Linear, Dropout, Flatten, ReLU6
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class MatMul(tlx.nn.Module):
14 | def __init__(self):
15 | super(MatMul, self).__init__()
16 | self.matmul = tlx.ops.MatMul()
17 | self.transpose_X = False
18 | self.transpose_Y = False
19 | self._built = True
20 |
21 | def forward(self, a, b):
22 | z = self.matmul(a, b)
23 | if not self._nodes_fixed and self._build_graph:
24 | self._add_node([a, b], z)
25 | self._nodes_fixed = True
26 | return z
27 |
28 | class MLP(Module):
29 | def __init__(self):
30 | super(MLP, self).__init__()
31 | # weights init
32 | self.flatten = Flatten()
33 | self.line1 = Linear(in_features=32, out_features=64, act=tlx.nn.LeakyReLU(0.3))
34 | self.d1 = Dropout()
35 | self.line2 = Linear(in_features=64, out_features=128, b_init=None)
36 | self.relu6 = ReLU6()
37 | self.line3 = Linear(in_features=128, out_features=10, act=tlx.nn.ReLU)
38 | self.line4 = Linear(in_features=128, out_features=10)
39 | self.mat = MatMul()
40 |
41 | def forward(self, x):
42 | x = self.flatten(x)
43 | z = self.line1(x)
44 | z = self.d1(z)
45 | z = self.line2(z)
46 | z = self.relu6(z)
47 | z1 = self.line3(z)
48 | z2 = self.line4(z)
49 | z = self.mat(z1, z2)
50 | return z
51 |
52 | net = MLP()
53 | net.set_eval()
54 | input = tlx.nn.Input(shape=(10, 2, 2, 8))
55 | print("tlx output", net(input))
56 | onnx_model = export(net, input_spec=input, path='linear_model.onnx')
57 |
58 | # Infer Model
59 | sess = rt.InferenceSession('linear_model.onnx')
60 |
61 | input_name = sess.get_inputs()[0].name
62 | output_name = sess.get_outputs()[0].name
63 |
64 | input_data = tlx.nn.Input(shape=(10, 2, 2, 8))
65 | input_data = np.array(input_data, dtype=np.float32)
66 |
67 | result = sess.run([output_name], {input_name: input_data})
68 | print("onnx output", result)
--------------------------------------------------------------------------------
/tests/test_deconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Conv2d, ConvTranspose2d
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | # weights init
18 | self.conv1 = Conv2d(out_channels=16, kernel_size=3, stride=1, padding=(2, 2), in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
19 | self.deconv1 = ConvTranspose2d(out_channels=3, kernel_size=3, stride=1, padding=(2, 2), act=tlx.nn.ReLU, dilation=1, data_format='channels_last')
20 | self.conv2 = Conv2d(out_channels=16, kernel_size=3, stride=1, padding=(2, 2), in_channels=3, data_format='channels_last', act=tlx.nn.ReLU)
21 |
22 |
23 | def forward(self, x):
24 | x = self.conv1(x)
25 | x = self.deconv1(x)
26 | x = self.conv2(x)
27 | return x
28 |
29 | net = MLP()
30 | net.set_eval()
31 | input = tlx.nn.Input(shape=(4, 20, 20, 3))
32 | onnx_model = export(net, input_spec=input, path='deconv_model.onnx')
33 | print("tlx output", net(input))
34 |
35 | # Infer Model
36 | sess = rt.InferenceSession('deconv_model.onnx')
37 |
38 | input_name = sess.get_inputs()[0].name
39 | output_name = sess.get_outputs()[0].name
40 |
41 | input_data = tlx.nn.Input(shape=(4, 20, 20, 3))
42 | input_data = np.array(input_data, dtype=np.float32)
43 |
44 | result = sess.run([output_name], {input_name: input_data})
45 | print("onnx output", result)
--------------------------------------------------------------------------------
/tests/test_depthwiseconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'torch'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import DepthwiseConv2d
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | self.dpconv1 = DepthwiseConv2d(kernel_size = (2, 2), stride = (1, 1), dilation = 2, padding='SAME',
18 | depth_multiplier = 2, data_format='channels_first')
19 |
20 | self.dpconv2 = DepthwiseConv2d(kernel_size=(2, 2), stride=(1, 1), dilation=2, padding='VALID',
21 | depth_multiplier=2, data_format='channels_first')
22 | def forward(self, x):
23 | x = self.dpconv1(x)
24 | x = self.dpconv2(x)
25 | return x
26 |
27 | net = MLP()
28 | net.set_eval()
29 | input = tlx.nn.Input(shape=(4, 3, 10, 10))
30 | onnx_model = export(net, input_spec=input, path='dwconv.onnx')
31 | print("tlx out", net(input))
32 |
33 | # Infer Model
34 | sess = rt.InferenceSession('dwconv.onnx')
35 |
36 | input_name = sess.get_inputs()[0].name
37 | output_name = sess.get_outputs()[0].name
38 |
39 | input_data = np.array(input, dtype=np.float32)
40 |
41 | result = sess.run([output_name], {input_name: input_data})
42 | print("onnx out", result)
--------------------------------------------------------------------------------
/tests/test_embedding.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import OneHot, Embedding
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | ################################### OneHot ##############################################
14 | class Model(Module):
15 | def __init__(self):
16 | super(Model, self).__init__()
17 | self.onehot = OneHot(depth=10)
18 |
19 | def forward(self, x):
20 | z = self.onehot(x)
21 | return z
22 |
23 | net = Model()
24 | net.set_eval()
25 | input = tlx.nn.Input([10], dtype=tlx.int64)
26 | print("tlx output", net(input))
27 | onnx_model = export(net, input_spec=input, path='onehot.onnx')
28 |
29 | # Infer Model
30 | sess = rt.InferenceSession('onehot.onnx')
31 |
32 | input_name = sess.get_inputs()[0].name
33 | output_name = sess.get_outputs()[0].name
34 |
35 | input_data = np.array(input, dtype=np.int64)
36 |
37 | result = sess.run([output_name], {input_name: input_data})
38 | print("onnx output", result)
39 |
40 | ###################################### Embedding #################################################
41 | class Model_E(Module):
42 | def __init__(self):
43 | super(Model_E, self).__init__()
44 | self.embedding = Embedding(num_embeddings=1000, embedding_dim=50, name='embed')
45 |
46 | def forward(self, x):
47 | z = self.embedding(x)
48 | return z
49 |
50 | net = Model_E()
51 | net.set_eval()
52 | input = tlx.nn.Input([10, 100], dtype=tlx.int64)
53 | print("tlx output", net(input))
54 | onnx_model_e = export(net, input_spec=input, path='embedding.onnx')
55 |
56 | # Infer Model
57 | sess = rt.InferenceSession('embedding.onnx')
58 |
59 | input_name = sess.get_inputs()[0].name
60 | output_name = sess.get_outputs()[0].name
61 |
62 | input_data = np.array(input, dtype=np.int64)
63 |
64 | result = sess.run([output_name], {input_name: input_data})
65 | print("onnx output", result)
66 |
--------------------------------------------------------------------------------
/tests/test_extend.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import ExpandDims, Tile
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class NET(Module):
15 | def __init__(self):
16 | super(NET, self).__init__()
17 | self.expand = ExpandDims(axis=2)
18 |
19 | def forward(self, x):
20 | x = self.expand(x)
21 | return x
22 |
23 | net = NET()
24 | net.set_eval()
25 | input = tlx.nn.Input(shape=(10, 3, 5, 6))
26 | onnx_model = export(net, input_spec=input, path='extend.onnx')
27 | print("tlx out", net(input).shape)
28 |
29 | # Infer Model
30 | sess = rt.InferenceSession('extend.onnx')
31 |
32 | input_name = sess.get_inputs()[0].name
33 | output_name = sess.get_outputs()[0].name
34 |
35 | input_data = np.array(input, dtype=np.float32)
36 |
37 | result = sess.run([output_name], {input_name: input_data})
38 | print("onnx out", np.shape(result))
39 |
40 | ################################ Tile #############################################
41 | class Tile_M(Module):
42 | def __init__(self):
43 | super(Tile_M, self).__init__()
44 | self.expand = Tile(multiples=[2, 3])
45 |
46 | def forward(self, x):
47 | x = self.expand(x)
48 | return x
49 |
50 | net = Tile_M()
51 | net.set_eval()
52 | input = tlx.nn.Input(shape=(10, 9))
53 | tile_model = export(net, input_spec=input, path='tile.onnx')
54 | print("tlx out", net(input).shape)
55 |
56 | # Infer Model
57 | sess = rt.InferenceSession('tile.onnx')
58 |
59 | input_name = sess.get_inputs()[0].name
60 | output_name = sess.get_outputs()[0].name
61 |
62 | input_data = np.array(input, dtype=np.float32)
63 |
64 | result = sess.run([output_name], {input_name: input_data})
65 | print("onnx out", np.shape(result))
--------------------------------------------------------------------------------
/tests/test_globalpool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import GlobalMaxPool2d, GlobalAvgPool2d, Linear
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | ################################# Test GlobalAvgPool2d ################################################
14 | class ModelAvg(Module):
15 | def __init__(self):
16 | super(ModelAvg, self).__init__()
17 | self.globalmax = GlobalAvgPool2d(data_format='channels_first')
18 | self.line = Linear(out_features=10)
19 |
20 | def forward(self, x):
21 | x = self.globalmax(x)
22 | x = self.line(x)
23 | return x
24 |
25 | net = ModelAvg()
26 | net.set_eval()
27 | input = tlx.nn.Input(shape=(5, 6, 3, 3))
28 | onnx_model_avg = export(net, input_spec=input, path='globalavg_model.onnx')
29 | print("tlx output", net(input))
30 |
31 | # Infer Model
32 | sess = rt.InferenceSession('globalavg_model.onnx')
33 |
34 | input_name = sess.get_inputs()[0].name
35 | output_name = sess.get_outputs()[0].name
36 |
37 | input_data = tlx.nn.Input(shape=(5, 6, 3, 3))
38 | input_data = np.array(input_data, dtype=np.float32)
39 |
40 | result = sess.run([output_name], {input_name: input_data})
41 | print("onnx output", result)
42 |
43 | ################################# Test GlobalMaxPool2d ################################################
44 | class ModelMax(Module):
45 | def __init__(self):
46 | super(ModelMax, self).__init__()
47 | self.globalmax = GlobalMaxPool2d(data_format='channels_first')
48 | self.line = Linear(out_features=10)
49 |
50 | def forward(self, x):
51 | x = self.globalmax(x)
52 | x = self.line(x)
53 | return x
54 |
55 | net = ModelMax()
56 | net.set_eval()
57 | input = tlx.nn.Input(shape=(5, 6, 3, 3))
58 | onnx_model_max = export(net, input_spec=input, path='globalmax_model.onnx')
59 | print("tlx output", net(input))
60 |
61 | # Infer Model
62 | sess = rt.InferenceSession('globalmax_model.onnx')
63 |
64 | input_name = sess.get_inputs()[0].name
65 | output_name = sess.get_outputs()[0].name
66 |
67 | input_data = tlx.nn.Input(shape=(5, 6, 3, 3))
68 | input_data = np.array(input_data, dtype=np.float32)
69 |
70 | result = sess.run([output_name], {input_name: input_data})
71 | print("onnx output", result)
--------------------------------------------------------------------------------
/tests/test_groupconv2d.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | # os.environ["TL_BACKEND"] = 'tensorflow'
6 | # os.environ["TL_BACKEND"] = 'paddle'
7 | os.environ["TL_BACKEND"] = 'torch'
8 | # os.environ["TL_BACKEND"] = 'mindspore'
9 | import tensorlayerx as tlx
10 | from tensorlayerx.nn import Module
11 | from tensorlayerx.nn import GroupConv2d
12 | from tlx2onnx.main import export
13 | import onnxruntime as rt
14 | import numpy as np
15 |
16 |
17 | ############################################ test 2d ###########################################################
18 | class CNN(Module):
19 |
20 | def __init__(self):
21 | super(CNN, self).__init__()
22 | # weights init
23 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
24 | b_init2 = tlx.nn.initializers.constant(value=0.1)
25 | self.conv1 = GroupConv2d(
26 | 36, (5, 5), (1, 1), n_group=3, padding=(2,2), W_init=W_init, b_init=b_init2, name='conv1',
27 | in_channels=3, data_format='channels_last', act = tlx.nn.ReLU
28 | )
29 | def forward(self, x):
30 | z = self.conv1(x)
31 | return z
32 |
33 | net = CNN()
34 | input = tlx.nn.Input(shape=(1, 10, 10, 3))
35 | net.set_eval()
36 | output = net(input)
37 | print("groupconv2d tlx output", output)
38 | onnx_model = export(net, input_spec=input, path='groupconv2d_model.onnx')
39 |
40 | # Infer Model
41 | sess = rt.InferenceSession('groupconv2d_model.onnx')
42 |
43 | input_name = sess.get_inputs()[0].name
44 | output_name = sess.get_outputs()[0].name
45 |
46 | input_data = np.array(input, dtype=np.float32)
47 |
48 | result = sess.run([output_name], {input_name: input_data})
49 | print("groupconv2d onnx output", result)
50 |
--------------------------------------------------------------------------------
/tests/test_layernorm.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import LayerNorm
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class NET(Module):
15 | def __init__(self):
16 | super(NET, self).__init__()
17 | self.layernorm = LayerNorm([50, 50, 32], act=tlx.nn.ReLU)
18 |
19 | def forward(self, x):
20 | x = self.layernorm(x)
21 | return x
22 |
23 | net = NET()
24 | print(type(net))
25 | net.set_eval()
26 | input = tlx.nn.Input(shape=(10, 50, 50, 32))
27 | onnx_model = export(net, input_spec=input, path='layernorm.onnx', enable_onnx_checker=False)
28 | print("tlx out", input)
29 |
30 | # Infer Model
31 | sess = rt.InferenceSession('layernorm.onnx')
32 |
33 | input_name = sess.get_inputs()[0].name
34 | output_name = sess.get_outputs()[0].name
35 |
36 | input_data = np.array(input, dtype=np.float32)
37 |
38 | result = sess.run([output_name], {input_name: input_data})
39 | print("onnx out", result)
40 |
--------------------------------------------------------------------------------
/tests/test_linear.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Linear, Dropout, Flatten, ReLU6
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | # weights init
18 | self.flatten = Flatten()
19 | self.line1 = Linear(in_features=32, out_features=64, act=tlx.nn.LeakyReLU(0.3))
20 | self.d1 = Dropout()
21 | self.line2 = Linear(in_features=64, out_features=128, b_init=None, act=tlx.nn.ReLU)
22 | self.relu6 = ReLU6()
23 | self.line3 = Linear(in_features=128, out_features=10, act=tlx.nn.ReLU)
24 |
25 | def forward(self, x):
26 | x = self.flatten(x)
27 | z = self.line1(x)
28 | z = self.d1(z)
29 | z = self.line2(z)
30 | z = self.relu6(z)
31 | z = self.line3(z)
32 | return z
33 |
34 | net = MLP()
35 | input = tlx.nn.Input(shape=(3, 2, 2, 8))
36 | net.set_eval()
37 | output = net(input)
38 | print("tlx out", output)
39 | onnx_model = export(net, input_spec=input, path='linear_model.onnx')
40 |
41 | # Infer Model
42 | sess = rt.InferenceSession('linear_model.onnx')
43 |
44 | input_name = sess.get_inputs()[0].name
45 | output_name = sess.get_outputs()[0].name
46 |
47 | input_data = tlx.nn.Input(shape=(3, 2, 2, 8))
48 | input_data = np.array(input_data, dtype=np.float32)
49 |
50 | result = sess.run([output_name], {input_name: input_data})
51 | print('onnx out',result)
--------------------------------------------------------------------------------
/tests/test_mask_conv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import MaskedConv3d
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | self.mask_conv = MaskedConv3d(mask_type='B', out_channels=32, kernel_size=(1, 1, 1), stride=(2, 2, 2), act=tlx.ReLU, name='conv3d_2',
18 | in_channels=3, padding='SAME')
19 |
20 | def forward(self, x):
21 | x = self.mask_conv(x)
22 | return x
23 |
24 | net = MLP()
25 | input = tlx.nn.Input(shape=(5, 10, 10, 10, 3))
26 | net.set_eval()
27 | output = net(input)
28 | print("tlx out", output)
29 | onnx_model = export(net, input_spec=input, path='maskconv.onnx')
30 |
31 | # Infer Model
32 | sess = rt.InferenceSession('maskconv.onnx')
33 |
34 | input_name = sess.get_inputs()[0].name
35 | output_name = sess.get_outputs()[0].name
36 |
37 | input_data = np.array(input, dtype=np.float32)
38 |
39 | result = sess.run([output_name], {input_name: input_data})
40 | print('onnx out', result)
--------------------------------------------------------------------------------
/tests/test_merge.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Linear, Concat, Elementwise
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class CustomModel(Module):
14 | def __init__(self):
15 | super(CustomModel, self).__init__(name="custom")
16 | self.linear1 = Linear(in_features=20, out_features=10, act=tlx.ReLU, name='relu1_1')
17 | self.linear2 = Linear(in_features=20, out_features=10, act=tlx.ReLU, name='relu2_1')
18 | self.concat = Concat(concat_dim=1, name='concat_layer')
19 |
20 | def forward(self, inputs):
21 | d1 = self.linear1(inputs)
22 | d2 = self.linear2(inputs)
23 | outputs = self.concat([d1, d2])
24 | return outputs
25 |
26 | net = CustomModel()
27 | input = tlx.nn.Input(shape=(3, 20), init=tlx.initializers.RandomNormal())
28 | net.set_eval()
29 | output = net(input)
30 | print("tlx out", output)
31 | onnx_model = export(net, input_spec=input, path='concat.onnx')
32 |
33 | # Infer Model
34 | sess = rt.InferenceSession('concat.onnx')
35 |
36 | input_name = sess.get_inputs()[0].name
37 | output_name = sess.get_outputs()[0].name
38 |
39 | input_data = np.array(input, dtype=np.float32)
40 |
41 | result = sess.run([output_name], {input_name: input_data})
42 | print('onnx out', result)
43 |
44 | ##################################### Elementwise ###################################################
45 | class CustomModel2(Module):
46 | def __init__(self):
47 | super(CustomModel2, self).__init__(name="custom")
48 | self.linear1 = Linear(in_features=10, out_features=10, act=tlx.ReLU, name='relu1_1')
49 | self.linear2 = Linear(in_features=10, out_features=10, act=tlx.ReLU, name='relu2_1')
50 | self.linear3 = Linear(in_features=10, out_features=10, act=tlx.ReLU, name='relu3_1')
51 | self.element = Elementwise(combine_fn=tlx.matmul, name='concat')
52 |
53 | def forward(self, inputs):
54 | d1 = self.linear1(inputs)
55 | d2 = self.linear2(inputs)
56 | d3 = self.linear3(inputs)
57 | outputs = self.element([d1, d2, d3])
58 | return outputs
59 |
60 | net = CustomModel2()
61 | input = tlx.nn.Input(shape=(10, 10), init=tlx.initializers.RandomNormal())
62 | net.set_eval()
63 | output = net(input)
64 | print("tlx out", output)
65 | onnx_model2 = export(net, input_spec=input, path='elementwise.onnx')
66 |
67 | # Infer Model
68 | sess = rt.InferenceSession('elementwise.onnx')
69 |
70 | input_name = sess.get_inputs()[0].name
71 | output_name = sess.get_outputs()[0].name
72 |
73 | input_data = np.array(input, dtype=np.float32)
74 |
75 | result = sess.run([output_name], {input_name: input_data})
76 | print('onnx out', result)
--------------------------------------------------------------------------------
/tests/test_noise.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import GaussianNoise
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class CustomModel(Module):
14 | def __init__(self):
15 | super(CustomModel, self).__init__(name="custom")
16 | self.noise = GaussianNoise()
17 |
18 | def forward(self, inputs):
19 | x = self.noise(inputs)
20 | return x
21 |
22 | net = CustomModel()
23 | input = tlx.nn.Input(shape=(3, 20), init=tlx.initializers.RandomNormal())
24 | net.set_eval()
25 | output = net(input)
26 | print("tlx out", output)
27 | onnx_model = export(net, input_spec=input, path='noise.onnx')
28 |
29 | # Infer Model
30 | sess = rt.InferenceSession('noise.onnx')
31 |
32 | input_name = sess.get_inputs()[0].name
33 | output_name = sess.get_outputs()[0].name
34 |
35 | input_data = np.array(input, dtype=np.float32)
36 |
37 | result = sess.run([output_name], {input_name: input_data})
38 | print('onnx out', result)
--------------------------------------------------------------------------------
/tests/test_onnx.py:
--------------------------------------------------------------------------------
1 | import onnx
2 | import numpy as np
3 | from onnx import helper
4 | from onnx import TensorProto
5 | import onnxruntime as rt
6 |
7 | def network_construct():
8 |
9 | X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 3, 3])
10 | Y = helper.make_tensor_value_info('Y', TensorProto.DOUBLE, [1, 1, 2, 2])
11 | X1 = helper.make_tensor_value_info('X1', TensorProto.DOUBLE, [1, 1, 3, 3])
12 |
13 | node_def0 = helper.make_node('Cast', ['X'], ['X1'], to=TensorProto.DOUBLE)
14 |
15 | # Make MaxPool Node
16 | node_def = onnx.helper.make_node(
17 | 'MaxPool',
18 | inputs=['X1'],
19 | outputs=['Y'],
20 | kernel_shape=[2, 2],
21 | strides=[2, 2],
22 | pads=[1, 1, 1, 1] # Top、Left、Bottom、Right
23 | )
24 |
25 | # Make Graph
26 | graph_def = helper.make_graph(
27 | name='test-MaxPool',
28 | inputs=[X],
29 | outputs=[Y],
30 | value_info=[X1],
31 | nodes=[node_def0, node_def]
32 | )
33 |
34 | # Make model
35 | model_def = helper.make_model(
36 | graph_def,
37 | producer_name='yang'
38 | )
39 |
40 | # Check & Save Model
41 | onnx.checker.check_model(model_def)
42 | onnx.save(model_def, 'MaxPool.onnx')
43 |
44 | def model_infer():
45 |
46 | # Infer Model
47 | sess = rt.InferenceSession('MaxPool.onnx')
48 |
49 | input_name = sess.get_inputs()[0].name
50 | output_name = sess.get_outputs()[0].name
51 |
52 | input_data = [[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]
53 | input_data = np.array(input_data, dtype=np.float32)
54 |
55 | result = sess.run([output_name], {input_name: input_data})
56 | print(result)
57 |
58 | def main():
59 | network_construct()
60 | # model_infer()
61 |
62 | if __name__ == '__main__':
63 | main()
64 |
--------------------------------------------------------------------------------
/tests/test_padding.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import PadLayer, ZeroPad1d, ZeroPad2d, ZeroPad3d
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class CustomModel(Module):
14 | def __init__(self):
15 | super(CustomModel, self).__init__(name="custom")
16 | self.pad = PadLayer([[1, 2], [3, 4], [5, 6], [7, 8]], "REFLECT", name='inpad')
17 | self.pad2d = ZeroPad2d(padding=((2, 2), (3, 3)), data_format='channels_last')
18 |
19 | def forward(self, inputs):
20 | x = self.pad(inputs)
21 | x = self.pad2d(x)
22 | return x
23 |
24 | net = CustomModel()
25 | input = tlx.nn.Input(shape=(5, 5, 10, 10), init=tlx.initializers.RandomNormal())
26 | net.set_eval()
27 | output = net(input)
28 | print("tlx out", output.shape)
29 | onnx_model = export(net, input_spec=input, path='padding.onnx')
30 |
31 | # Infer Model
32 | sess = rt.InferenceSession('padding.onnx')
33 |
34 | input_name = sess.get_inputs()[0].name
35 | output_name = sess.get_outputs()[0].name
36 |
37 | input_data = np.array(input, dtype=np.float32)
38 |
39 | result = sess.run([output_name], {input_name: input_data})
40 | print('onnx out', np.shape(result))
--------------------------------------------------------------------------------
/tests/test_pool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | # os.environ["TL_BACKEND"] = 'paddle'
7 | # os.environ["TL_BACKEND"] = 'torch'
8 | # os.environ["TL_BACKEND"] = 'mindspore'
9 | import tensorlayerx as tlx
10 | from tensorlayerx.nn import Module
11 | from tensorlayerx.nn import (Conv1d, MaxPool1d, AvgPool1d)
12 | from tensorlayerx.nn import (Conv2d, MaxPool2d, AvgPool2d)
13 | from tensorlayerx.nn import (Conv3d, MaxPool3d, AvgPool3d)
14 | from tlx2onnx.main import export
15 | import onnxruntime as rt
16 | import numpy as np
17 |
18 |
19 | ############################################ test 2d ###########################################################
20 | class CNN(Module):
21 |
22 | def __init__(self):
23 | super(CNN, self).__init__()
24 | # weights init
25 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
26 | b_init2 = tlx.nn.initializers.constant(value=0.1)
27 | self.conv1 = Conv2d(64, (3, 3), (1, 1), padding=(2, 2), W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
28 | self.pool1 = MaxPool2d(kernel_size=(3, 3), stride=(1, 1), padding='SAME', data_format='channels_last')
29 | self.conv2 = Conv2d(128, (3, 3), (1, 1), padding=(2, 2), W_init=W_init, b_init=b_init2, name='conv2', in_channels=64, data_format='channels_last')
30 | self.pool2 = AvgPool2d(kernel_size=(3, 3), stride=(1, 1), padding='SAME', data_format='channels_last')
31 | def forward(self, x):
32 | z = self.conv1(x)
33 | z = self.pool1(z)
34 | z = self.conv2(z)
35 | z = self.pool2(z)
36 | return z
37 |
38 | net = CNN()
39 | net.set_eval()
40 | input = tlx.nn.Input(shape=(1, 32, 32, 3))
41 | print("tlx output", net(input))
42 | onnx_model = export(net, input_spec=input, path='conv_model.onnx')
43 |
44 | # Infer Model
45 | sess = rt.InferenceSession('conv_model.onnx')
46 |
47 | input_name = sess.get_inputs()[0].name
48 | output_name = sess.get_outputs()[0].name
49 |
50 | input_data = tlx.nn.Input(shape=(1, 32, 32, 3))
51 | input_data = np.array(input_data, dtype=np.float32)
52 |
53 | result = sess.run([output_name], {input_name: input_data})
54 | print("onnx output", result)
55 |
56 | ############################################ test 1d ###########################################################
57 | class CNN1d(Module):
58 |
59 | def __init__(self):
60 | super(CNN1d, self).__init__()
61 | # weights init
62 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
63 | b_init2 = tlx.nn.initializers.constant(value=0.1)
64 | self.conv1 = Conv1d(64, 3, 1, padding=2, W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
65 | self.pool1 = MaxPool1d(kernel_size=3, stride=1, padding='SAME', data_format='channels_last')
66 | self.conv2 = Conv1d(128, 3, 1, padding=2, W_init=W_init, b_init=b_init2, name='conv2', in_channels=64, data_format='channels_last')
67 | self.pool2 = AvgPool1d(kernel_size=3, stride=1, padding='SAME', data_format='channels_last')
68 | def forward(self, x):
69 | z = self.conv1(x)
70 | z = self.pool1(z)
71 | z = self.conv2(z)
72 | z = self.pool2(z)
73 | return z
74 |
75 | net = CNN1d()
76 | input = tlx.nn.Input(shape=(1, 32, 3))
77 | onnx_model_1d = export(net, input_spec=input, path='conv_model_1d.onnx')
78 |
79 | # Infer Model
80 | sess = rt.InferenceSession('conv_model_1d.onnx')
81 |
82 | input_name = sess.get_inputs()[0].name
83 | output_name = sess.get_outputs()[0].name
84 |
85 | input_data = tlx.nn.Input(shape=(1, 32, 3))
86 | input_data = np.array(input_data, dtype=np.float32)
87 |
88 | result = sess.run([output_name], {input_name: input_data})
89 | print(result)
90 |
91 |
92 | ############################################ test 3d ###########################################################
93 | class CNN(Module):
94 |
95 | def __init__(self):
96 | super(CNN, self).__init__()
97 | # weights init
98 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
99 | b_init2 = tlx.nn.initializers.constant(value=0.1)
100 | self.conv1 = Conv3d(64, (3, 3, 3), (1, 1, 1), padding=(2, 2, 2), W_init=W_init, b_init=b_init2, name='conv1', in_channels=3, data_format='channels_last', act = tlx.nn.ReLU)
101 | self.pool1 = MaxPool3d(kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME', data_format='channels_last')
102 | self.conv2 = Conv3d(128, (3, 3, 3), (1, 1, 1), padding=(2, 2, 2), W_init=W_init, b_init=b_init2, name='conv2', in_channels=64, data_format='channels_last')
103 | self.pool2 = AvgPool3d(kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME', data_format='channels_last')
104 | def forward(self, x):
105 | z = self.conv1(x)
106 | z = self.pool1(z)
107 | z = self.conv2(z)
108 | z = self.pool2(z)
109 | return z
110 |
111 | net = CNN()
112 | input = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
113 | onnx_model_3d = export(net, input_spec=input, path='conv_model_3d.onnx')
114 |
115 | # Infer Model
116 | sess = rt.InferenceSession('conv_model_3d.onnx')
117 |
118 | input_name = sess.get_inputs()[0].name
119 | output_name = sess.get_outputs()[0].name
120 |
121 | input_data = tlx.nn.Input(shape=(1, 32, 32, 32, 3))
122 | input_data = np.array(input_data, dtype=np.float32)
123 |
124 | result = sess.run([output_name], {input_name: input_data})
125 | print(result)
126 |
--------------------------------------------------------------------------------
/tests/test_rnn.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | import os
4 | # os.environ["TL_BACKEND"] = 'tensorflow'
5 | # os.environ["TL_BACKEND"] = 'paddle'
6 | os.environ["TL_BACKEND"] = 'torch'
7 | # os.environ["TL_BACKEND"] = 'mindspore'
8 | import tensorlayerx as tlx
9 | from tensorlayerx.nn import Module
10 | from tensorlayerx.nn import RNN, LSTM, GRU, Linear
11 | from tlx2onnx.main import export
12 | import onnxruntime as rt
13 | import numpy as np
14 | tlx.set_seed(42)
15 |
16 | class Rnn(Module):
17 |
18 | def __init__(self):
19 | super(Rnn, self).__init__()
20 | self.rnn = RNN(input_size=5, hidden_size=5, act='relu', bidirectional=False, num_layers=4)
21 | def forward(self, x):
22 | x, _ = self.rnn(x)
23 | return x
24 | model = Rnn()
25 | input = tlx.nn.Input(shape=[1, 5, 5])
26 | model.set_eval()
27 | output = model(input)
28 | print("RNN tlx output", output)
29 | onnx_model = export(model, input_spec=input, path='rnn.onnx')
30 |
31 | sess = rt.InferenceSession('rnn.onnx')
32 | input_name = sess.get_inputs()[0].name
33 | output_name = sess.get_outputs()[0].name
34 |
35 | input_data = np.array(input, dtype=np.float32)
36 | result = sess.run([output_name], {input_name: input_data})
37 | print("RNN onnx output", result)
38 | print("==========================================================================================================")
39 |
40 |
41 | class Lstm(Module):
42 |
43 | def __init__(self):
44 | super(Lstm, self).__init__()
45 | self.rnn = LSTM(input_size=5, hidden_size=5, bidirectional=True, num_layers=4)
46 | def forward(self, x):
47 | x, _ = self.rnn(x)
48 | return x
49 | model = Lstm()
50 | input = tlx.nn.Input(shape=[1, 5, 5])
51 | model.set_eval()
52 | output = model(input)
53 | print("LSTM tlx output", output)
54 | onnx_model = export(model, input_spec=input, path='lstm.onnx')
55 |
56 | sess = rt.InferenceSession('lstm.onnx')
57 | input_name = sess.get_inputs()[0].name
58 | output_name = sess.get_outputs()[0].name
59 |
60 | input_data = np.array(input, dtype=np.float32)
61 | result = sess.run([output_name], {input_name: input_data})
62 | print("LSTM onnx output", result)
63 | print("==========================================================================================================")
64 |
65 | class Gru(Module):
66 |
67 | def __init__(self):
68 | super(Gru, self).__init__()
69 | self.rnn = GRU(input_size=5, hidden_size=5, bidirectional=True, num_layers=4)
70 | def forward(self, x):
71 | x, _ = self.rnn(x)
72 | return x
73 |
74 | model = Gru()
75 | input = tlx.nn.Input(shape=[1, 5, 5])
76 | model.set_eval()
77 | output = model(input)
78 | print("GRU tlx output", output)
79 | onnx_model = export(model, input_spec=input, path='gru.onnx')
80 |
81 | sess = rt.InferenceSession('gru.onnx')
82 | input_name = sess.get_inputs()[0].name
83 | output_name = sess.get_outputs()[0].name
84 |
85 | input_data = np.array(input, dtype=np.float32)
86 | result = sess.run([output_name], {input_name: input_data})
87 | print("GRU onnx output", result)
--------------------------------------------------------------------------------
/tests/test_sampling.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'torch'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import UpSampling2d, DownSampling2d
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | self.upsampling = UpSampling2d(scale=(2, 2), method='bilinear', data_format='channels_first')
18 | self.downsampling = DownSampling2d(scale=(2, 2), method='bilinear', data_format='channels_first')
19 |
20 | def forward(self, x):
21 | x = self.upsampling(x)
22 | x = self.downsampling(x)
23 | return x
24 |
25 |
26 | net = MLP()
27 | input = tlx.nn.Input(shape=(3, 3, 5, 5), init=tlx.initializers.RandomNormal())
28 | net.set_eval()
29 | output = net(input)
30 | print("tlx out", output.shape)
31 | onnx_model = export(net, input_spec=input, path='sampling.onnx')
32 |
33 | # Infer Model
34 | sess = rt.InferenceSession('sampling.onnx')
35 |
36 | input_name = sess.get_inputs()[0].name
37 | output_name = sess.get_outputs()[0].name
38 |
39 | input_data = np.array(input, dtype=np.float32)
40 |
41 | result = sess.run([output_name], {input_name: input_data})
42 | print('onnx out', np.shape(result))
--------------------------------------------------------------------------------
/tests/test_scale.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'torch'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Scale
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class CustomModel(Module):
14 | def __init__(self):
15 | super(CustomModel, self).__init__(name="custom")
16 | self.scale = Scale(init_scale=0.5)
17 |
18 | def forward(self, inputs):
19 | outputs = self.scale(inputs)
20 | return outputs
21 |
22 | net = CustomModel()
23 | input = tlx.nn.Input(shape=(3, 20), init=tlx.initializers.RandomNormal())
24 | net.set_eval()
25 | output = net(input)
26 | print("tlx out", output)
27 | onnx_model = export(net, input_spec=input, path='sacle.onnx')
28 |
29 | # Infer Model
30 | sess = rt.InferenceSession('sacle.onnx')
31 |
32 | input_name = sess.get_inputs()[0].name
33 | output_name = sess.get_outputs()[0].name
34 |
35 | input_data = np.array(input, dtype=np.float32)
36 |
37 | result = sess.run([output_name], {input_name: input_data})
38 | print('onnx out', result)
--------------------------------------------------------------------------------
/tests/test_shape.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Transpose, Reshape
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 |
14 | class MLP(Module):
15 | def __init__(self):
16 | super(MLP, self).__init__()
17 | self.trans1 = Transpose(perm=[0, 1, 2, 3])
18 | self.trans2 = Transpose(perm=[2, 0, 1, 3])
19 | self.reshpe = Reshape(shape=(2, 3, 16))
20 |
21 | def forward(self, x):
22 | z = self.trans1(x)
23 | z = self.trans2(z)
24 | z = self.reshpe(z)
25 | return z
26 |
27 | net = MLP()
28 | net.set_eval()
29 | input = tlx.nn.Input(shape=(3, 2, 2, 8))
30 | print("tlx output", net(input))
31 | onnx_model = export(net, input_spec=input, path='shape_op.onnx')
32 |
33 | # Infer Model
34 | sess = rt.InferenceSession('shape_op.onnx')
35 |
36 | input_name = sess.get_inputs()[0].name
37 | output_name = sess.get_outputs()[0].name
38 |
39 | input_data = tlx.nn.Input(shape=(3, 2, 2, 8))
40 | input_data = np.array(input_data, dtype=np.float32)
41 |
42 | result = sess.run([output_name], {input_name: input_data})
43 | print("onnx output", result)
--------------------------------------------------------------------------------
/tests/test_stack.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | os.environ["TL_BACKEND"] = 'tensorflow'
6 | import tensorlayerx as tlx
7 | from tensorlayerx.nn import Module
8 | from tensorlayerx.nn import Linear, Stack, UnStack
9 | from tlx2onnx.main import export
10 | import onnxruntime as rt
11 | import numpy as np
12 |
13 | class CustomModel(Module):
14 | def __init__(self):
15 | super(CustomModel, self).__init__(name="custom")
16 |
17 | self.l1 = Linear(10, name='dense1')
18 | self.l2 = Linear(10, name='dense2')
19 | self.l3 = Linear(10, name='dense3')
20 | self.stack = Stack(axis=1)
21 | self.unstack = UnStack(axis=1)
22 | self.stack1 = Stack(axis=1)
23 |
24 | def forward(self, inputs):
25 | out1 = self.l1(inputs)
26 | out2 = self.l2(inputs)
27 | out3 = self.l3(inputs)
28 | outputs = self.stack([out1, out2, out3])
29 | o1, o2, o3 = self.unstack(outputs)
30 | outputs = self.stack1([o1, o2, o3])
31 | return outputs
32 |
33 | net = CustomModel()
34 | input = tlx.nn.Input(shape=(10, 784), init=tlx.initializers.RandomNormal())
35 | net.set_eval()
36 | output = net(input)
37 | print("tlx out", output)
38 | onnx_model = export(net, input_spec=input, path='stack.onnx')
39 |
40 | # Infer Model
41 | sess = rt.InferenceSession('stack.onnx')
42 |
43 | input_name = sess.get_inputs()[0].name
44 | output_name = sess.get_outputs()[0].name
45 |
46 | input_data = np.array(input, dtype=np.float32)
47 |
48 | result = sess.run([output_name], {input_name: input_data})
49 | print('onnx out', result)
--------------------------------------------------------------------------------
/tests/test_subpixelconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | import os
4 | # os.environ["TL_BACKEND"] = 'tensorflow'
5 | # os.environ["TL_BACKEND"] = 'paddle'
6 | os.environ["TL_BACKEND"] = 'torch'
7 | # os.environ["TL_BACKEND"] = 'mindspore'
8 | import tensorlayerx as tlx
9 | from tensorlayerx.nn import Module
10 | from tensorlayerx.nn import SubpixelConv2d
11 | from tlx2onnx.main import export
12 | import onnxruntime as rt
13 | import numpy as np
14 | tlx.set_seed(42)
15 |
16 | class conv(Module):
17 |
18 | def __init__(self):
19 | super(conv, self).__init__()
20 | self.conv = SubpixelConv2d(scale=2, data_format="channels_last", act=tlx.ReLU)
21 |
22 | def forward(self, x):
23 |
24 | x = self.conv(x)
25 | return x
26 |
27 | model = conv()
28 | input = tlx.nn.Input(shape= (1, 2, 2, 8), init=tlx.nn.initializers.HeUniform())
29 | model.set_eval()
30 | output = model(input)
31 | print("SubpixelConv2d tlx output", output)
32 | onnx_model = export(model, input_spec=input, path='SubpixelConv2d.onnx', opset_version = 11)
33 |
34 | sess = rt.InferenceSession('SubpixelConv2d.onnx')
35 | input_name = sess.get_inputs()[0].name
36 | output_name = sess.get_outputs()[0].name
37 |
38 | input_data = np.array(input, dtype=np.float32)
39 | result = sess.run([output_name], {input_name: input_data})
40 | print("SubpixelConv2d onnx output", result)
--------------------------------------------------------------------------------
/tests/test_topology.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import tensorlayerx as tlx
5 | from tensorlayerx.nn import Module
6 | from tensorlayerx.nn import Linear, Conv2d, BatchNorm2d, MaxPool2d, Flatten
7 | from tlx2onnx.topology import construct_topology
8 |
9 | class CNN(Module):
10 |
11 | def __init__(self):
12 | super(CNN, self).__init__()
13 | # weights init
14 | W_init = tlx.nn.initializers.truncated_normal(stddev=5e-2)
15 | W_init2 = tlx.nn.initializers.truncated_normal(stddev=0.04)
16 | b_init2 = tlx.nn.initializers.constant(value=0.1)
17 |
18 | self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3, act=tlx.ReLU)
19 | self.bn = BatchNorm2d(num_features=64, act=tlx.ReLU)
20 | self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
21 |
22 | self.conv2 = Conv2d(
23 | 64, (5, 5), (1, 1), padding='SAME', act=tlx.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
24 | )
25 | self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
26 |
27 | self.flatten = Flatten(name='flatten')
28 | self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
29 | self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
30 | self.linear3 = Linear(10, act=None, W_init=W_init2, name='output1', in_features=192)
31 | self.linear4 = Linear(20, act=None, W_init=W_init2, name='output2', in_features=192)
32 | self.concat = tlx.nn.Concat(name='concat')
33 |
34 | def forward(self, x):
35 | z = self.conv1(x)
36 | z = self.bn(z)
37 | z = self.maxpool1(z)
38 | z = self.conv2(z)
39 | z = self.maxpool2(z)
40 | z = self.flatten(z)
41 | z = self.linear1(z)
42 | z = self.linear2(z)
43 | z1 = self.linear3(z)
44 | z2 = self.linear4(z)
45 | # z = tlx.nn.add(z, 10)
46 | z = self.concat([z1, z2])
47 | return z
48 |
49 | model = CNN()
50 | inputs = tlx.nn.Input(shape=(3, 24, 24, 3))
51 | outputs = model(inputs)
52 |
53 | memory = construct_topology(model, inputs)
54 | input_shape = memory[next(iter(memory))]['out_tensors']
55 | output_shape = memory[list(memory.keys())[-1]]['out_tensors']
56 | input_name = memory[next(iter(memory))]['out_nodes_name']
57 | output_name = memory[list(memory.keys())[-1]]['out_nodes_name']
58 |
59 | for m in memory.keys():
60 | print(m, memory[m])
--------------------------------------------------------------------------------
/tlx2onnx/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | MAJOR = 0
5 | MINOR = 0
6 | PATCH = 1
7 | PRE_RELEASE = ''
8 | # Use the following formatting: (major, minor, patch, prerelease)
9 | VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
10 |
11 | __shortversion__ = '.'.join(map(str, VERSION[:3]))
12 | __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
13 | __author__ = "TensorLayerX Contributors"
14 | __producer__ = "tlx2onnx"
15 | __description__ = 'This package converts TensorlayerX models into ONNX for use with any inference engine supporting ONNX.'
16 | __repository_url__ = 'https://github.com/tensorlayer/TLX2ONNX'
17 | __download_url__ = 'https://github.com/tensorlayer/TLX2ONNX'
18 | __license__ = 'apache'
19 | __keywords__ = 'tensorlayerx, onnx, deep learning'
20 |
21 | from .main import export
22 |
23 |
--------------------------------------------------------------------------------
/tlx2onnx/common/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | # onnx function
5 | from .onnx_tool import order_repeated_field
6 | from .onnx_tool import make_node
7 | from .onnx_tool import make_graph
8 |
9 |
10 | # preprocessing
11 | from .preprocessing import *
12 |
13 | # utils
14 | from .utils import logging
15 |
--------------------------------------------------------------------------------
/tlx2onnx/common/onnx_tool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 |
6 | def order_repeated_field(repeated_proto, key_name, order):
7 | order = list(order)
8 | repeated_proto.sort(key=lambda x: order.index(getattr(x, key_name)))
9 |
10 | def make_node(op_type, inputs, outputs, name=None, doc_string=None, domain=None, **kwargs):
11 | node = helper.make_node(op_type, inputs, outputs, name, doc_string, domain, **kwargs)
12 | if doc_string == '':
13 | node.doc_string = ''
14 | order_repeated_field(node.attribute, 'name', kwargs.keys())
15 | if len(outputs) == 1:
16 | outputs = outputs[0]
17 | return node, outputs
18 |
19 | def make_graph(*args, doc_string=None, **kwargs):
20 | graph = helper.make_graph(*args, doc_string=doc_string, **kwargs)
21 | if doc_string == '':
22 | graph.doc_string = ''
23 | return graph
--------------------------------------------------------------------------------
/tlx2onnx/common/preprocessing.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import numpy as np
5 | import tensorlayerx as tlx
6 | from tlx2onnx.op_mapper.op_mapper import OpMapper
7 | from onnx import helper, numpy_helper
8 |
9 | def transpose_shape(shape, perm):
10 | return np.transpose(np.ones(shape), perm).shape
11 |
12 | def to_numpy(tensor):
13 | return tlx.convert_to_numpy(tensor)
14 |
15 | def convert_padding(padding, input_shape, output_shape, kernel_shape, strides, dilations, spatial, data_format):
16 | if dilations is None:
17 | dilations = [1] * spatial
18 | if isinstance(padding, str):
19 | if padding == "SAME":
20 | pads = [0] * (spatial * 2)
21 | if data_format == "channels_last":
22 | input_shape = make_shape_channels_first(input_shape)
23 | output_shape = make_shape_channels_first(output_shape)
24 |
25 | if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)):
26 |
27 | auto_pad = "SAME_UPPER"
28 |
29 | return auto_pad
30 |
31 | for i in range(spatial):
32 | pad = (
33 | (output_shape[i + 2] - 1) * strides[i]
34 | + dilations[i] * (kernel_shape[i] - 1) + 1
35 | - input_shape[i + 2]
36 | )
37 | pad = max(pad, 0)
38 | pads[i] = pad // 2
39 | pads[i + spatial] = pad - pad // 2
40 |
41 | return pads
42 |
43 | elif padding == "VALID":
44 | auto_pad = "VALID"
45 | return auto_pad
46 | elif isinstance(padding, int):
47 | pads = [padding] * spatial * 2
48 | return pads
49 | elif isinstance(padding, tuple):
50 | return list(padding) * 2
51 |
52 | def convert_w(w, data_format, spatial, w_name):
53 | w = tlx.convert_to_numpy(w)
54 | if tlx.BACKEND == 'tensorflow':
55 | if spatial == 2:
56 | w = np.transpose(w, axes=[3, 2, 0, 1])
57 | elif spatial == 1:
58 | w = np.transpose(w, axes=[2, 1, 0])
59 | elif spatial == 3:
60 | w = np.transpose(w, axes=[4, 3, 0, 1, 2])
61 | return numpy_helper.from_array(w, name=w_name)
62 | elif tlx.BACKEND == 'mindspore':
63 | if spatial == 2 and data_format == 'channels_last':
64 | w = np.transpose(w, axes=[3, 0, 1, 2])
65 | return numpy_helper.from_array(w, name=w_name)
66 | return numpy_helper.from_array(w, name=w_name)
67 |
68 | def convert_b(b, b_name):
69 | b = tlx.convert_to_numpy(b)
70 | return numpy_helper.from_array(b, name=b_name)
71 |
72 | def convert_tlx_relu(inputs, outputs, act = None):
73 | opsets = OpMapper.OPSETS['ReLU']
74 | map_func, kw= opsets[1]
75 | kw = {"inputs" : inputs,
76 | "outputs" : outputs}
77 | return map_func(node = None, **kw)
78 |
79 |
80 | def convert_tlx_elu(inputs, outputs, act = None):
81 | opsets = OpMapper.OPSETS['ELU']
82 | map_func, kw = opsets[1]
83 | kw = {"inputs": inputs,
84 | "outputs": outputs,
85 | "alpha": act.alpha}
86 | return map_func(node=None, **kw)
87 |
88 |
89 | def convert_tlx_tanh(inputs, outputs, act = None):
90 | opsets = OpMapper.OPSETS['Tanh']
91 | map_func, kw = opsets[1]
92 | kw = {"inputs": inputs,
93 | "outputs": outputs}
94 | return map_func(node=None, **kw)
95 |
96 |
97 | def convert_tlx_sigmoid(inputs, outputs, act = None):
98 | opsets = OpMapper.OPSETS['Sigmoid']
99 | map_func, kw = opsets[1]
100 | kw = {"inputs": inputs,
101 | "outputs": outputs}
102 | return map_func(node=None, **kw)
103 |
104 |
105 | def convert_tlx_lrelu(inputs, outputs, act = None):
106 | opsets = OpMapper.OPSETS['LeakyReLU']
107 | map_func, kw = opsets[1]
108 | kw = {"inputs": inputs,
109 | "outputs": outputs,
110 | "alpha": act.negative_slope}
111 | return map_func(node=None, **kw)
112 |
113 |
114 | def convert_tlx_softplus(inputs, outputs, act = None):
115 | opsets = OpMapper.OPSETS['Softplus']
116 | map_func, kw = opsets[1]
117 | kw = {"inputs": inputs,
118 | "outputs": outputs}
119 | return map_func(node=None, **kw)
120 |
121 |
122 | def convert_tlx_softmax(inputs, outputs, act = None):
123 | opsets = OpMapper.OPSETS['Softmax']
124 | map_func, kw = opsets[1]
125 | kw = {"inputs": inputs,
126 | "outputs": outputs}
127 | return map_func(node=None, **kw)
128 |
129 |
130 | tlx_act_2_onnx = {
131 | "ReLU" : convert_tlx_relu,
132 | "ELU" : convert_tlx_elu,
133 | "Tanh" : convert_tlx_tanh,
134 | "Sigmoid": convert_tlx_sigmoid,
135 | "LeakyReLU" : convert_tlx_lrelu,
136 | "Softplus" : convert_tlx_softplus,
137 | "Softmax": convert_tlx_softmax,
138 | }
139 |
140 | def make_shape_channels_first(shape):
141 | """Makes a (N, ..., C) shape into (N, C, ...)."""
142 |
143 | return shape[:1] + shape[-1:] + shape[1:-1]
144 |
145 |
146 | def make_shape_channels_last(shape):
147 | """Makes a (N, C, ...) shape into (N, ..., C)."""
148 |
149 | return shape[:1] + shape[1:-1] + shape[1:2]
150 |
151 | def get_channels_first_permutation(spatial):
152 | """Returns a permutation to make a (N, ..., C) array into (N, C, ...)."""
153 |
154 | return [0, spatial + 1] + list(range(1, spatial + 1))
155 |
156 | def get_channels_last_permutation(spatial):
157 | """Returns a permutation to make a (N, C, ...) array into (N, ..., C)."""
158 |
159 | return [0] + list(range(2, spatial+2)) + [1]
160 |
161 | def squeeze_axes(spatial):
162 | return list(range(2, spatial+2))
--------------------------------------------------------------------------------
/tlx2onnx/common/utils.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import time, sys
5 |
6 | levels = {0: 'ERROR', 1: 'WARNING', 2: 'INFO', 3: 'DEBUG'}
7 | class logging():
8 | log_level = 2
9 |
10 | @staticmethod
11 | def log(level=2, message="", use_color=False):
12 | current_time = time.time()
13 | time_array = time.localtime(current_time)
14 | current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
15 | if logging.log_level >= level:
16 | if use_color:
17 | print("\033[1;31;40m{} [{}]\t{}\033[0m".format(
18 | current_time, levels[level], message).encode("utf-8")
19 | .decode("latin1"))
20 | else:
21 | print("{} [{}]\t{}".format(current_time, levels[level], message)
22 | .encode("utf-8").decode("latin1"))
23 | sys.stdout.flush()
24 |
25 | @staticmethod
26 | def debug(message="", use_color=False):
27 | logging.log(level=3, message=message, use_color=use_color)
28 |
29 | @staticmethod
30 | def info(message="", use_color=False):
31 | logging.log(level=2, message=message, use_color=use_color)
32 |
33 | @staticmethod
34 | def warning(message="", use_color=True):
35 | logging.log(level=1, message=message, use_color=use_color)
36 |
37 | @staticmethod
38 | def error(message="", use_color=True, exit=True):
39 | logging.log(level=0, message=message, use_color=use_color)
40 | if exit:
41 | sys.exit(-1)
--------------------------------------------------------------------------------
/tlx2onnx/main.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import tensorlayerx as tlx
5 | from onnx import helper
6 | from .topology import construct_topology
7 | import onnx
8 | from .op_mapper.op_mapper import OpMapper
9 | from .common import make_graph, logging
10 | from .op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
11 |
12 | def export(model, input_spec, path=None, enable_onnx_checker=True, opset_version = 9, dynamic_axes = None ,auto_update_opset=True):
13 | """
14 |
15 | Parameters
16 | ----------
17 | model : object
18 | TensorLayerX instantiate the net object.
19 | input_spec : tensor
20 | TensorLayerX Input.
21 | path : string
22 | ONNX file saving path
23 | enable_onnx_checker : bool
24 | Whether to enable ONNX model checker.
25 | opset_version : int
26 | The version of the default (ai.onnx) opset to target. Must be >= 7 and <= 17.
27 | dynamic_axes : list or tuple
28 | To specify axes of tensors as dynamic.
29 | By default the exported model will have fixed shapes of all input and output tensors.
30 |
31 | Returns
32 | -------
33 | ONNX model file
34 |
35 | Examples
36 | ---------
37 | >>> class NET(Module):
38 | >>> net = NET()
39 | >>> net.set_eval()
40 | >>> input = tlx.nn.Input([10, 50, 50, 32], name='input')
41 | >>> onnx_model = export(net, input_spec=input, path='vgg.onnx')
42 |
43 | """
44 |
45 |
46 | memory = construct_topology(model, input_spec)
47 | input_shape = memory[next(iter(memory))]['out_tensors'][0]
48 | output_shape = memory[list(memory.keys())[-1]]['out_tensors'][0]
49 | input_name = memory[next(iter(memory))]['out_nodes_name'][0]
50 | output_name = memory[list(memory.keys())[-1]]['out_nodes_name'][0]
51 | input_dtype = memory[next(iter(memory))]['in_dtype']
52 | output_dtype = memory[list(memory.keys())[-1]]['out_dtype']
53 | onnx_nodes = []
54 | onnx_values = []
55 | onnx_weights = []
56 | if auto_update_opset:
57 | opset_version = OpMapper.update_opset_version(memory, opset_version)
58 | else:
59 | OpMapper.check_support_version(memory, opset_version)
60 |
61 | for key in memory.keys():
62 | if memory[key]['node'].layer.__class__.__name__ not in tlx.nn.inputs.__all__:
63 | onnx_node, onnx_value, onnx_weight =OpMapper.mapping(memory[key], opset_version)
64 | onnx_nodes.extend(onnx_node)
65 | onnx_values.extend(onnx_value)
66 | onnx_weights.extend(onnx_weight)
67 | else:
68 | pass
69 |
70 | # Make Graph
71 | if dynamic_axes is None:
72 | graph = make_graph(
73 | name='tlx-graph-export',
74 | inputs=[helper.make_tensor_value_info(input_name, NP_TYPE_TO_TENSOR_TYPE[input_dtype], shape=input_shape)],
75 | outputs=[helper.make_tensor_value_info(output_name, NP_TYPE_TO_TENSOR_TYPE[output_dtype], shape=output_shape)],
76 | initializer=onnx_weights,
77 | value_info=onnx_values,
78 | nodes=onnx_nodes
79 | )
80 | else:
81 | graph = make_graph(
82 | name='tlx-graph-export',
83 | inputs=[helper.make_tensor_value_info(input_name, NP_TYPE_TO_TENSOR_TYPE[input_dtype], shape=input_shape)],
84 | outputs=[helper.make_tensor_value_info(output_name, NP_TYPE_TO_TENSOR_TYPE[output_dtype], shape=output_shape)],
85 | initializer=onnx_weights,
86 | nodes=onnx_nodes
87 | )
88 | # Make model
89 | model_def = helper.make_model(
90 | graph,
91 | producer_name='onnx-mode'
92 | )
93 | if dynamic_axes is not None:
94 | for i in dynamic_axes:
95 | model_def.graph.input[0].type.tensor_type.shape.dim[i].dim_param = '?'
96 | model_def.graph.output[0].type.tensor_type.shape.dim[i].dim_param = '?'
97 | if enable_onnx_checker:
98 | onnx.checker.check_model(model_def)
99 | onnx.save(model_def, path)
100 | logging.info("ONNX model saved in {}".format(path))
101 | return model_def
102 |
103 |
104 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from . import datatype_mapping
5 | from . import activation
6 | from . import math
7 | from . import tensor
8 | from . import op_mapper
9 |
10 | from .nn import *
11 | from .math import *
12 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/activation.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper, TensorProto
5 | from .op_mapper import OpMapper
6 | from ..common import make_node, to_numpy
7 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 |
10 | __all__ = ['Relu', 'LeakyReLU', 'ELU', 'Tanh', 'Sigmoid', 'Softmax', 'Softplus', 'ReLU6', 'PRelu',
11 | 'Mish', 'Swish', 'LeakyReLU6']
12 |
13 | @OpMapper(["ReLU"])
14 | class Relu():
15 |
16 | @classmethod
17 | def version_1(cls, node, **kwargs):
18 | Op_name = 'Relu'
19 | onnx_node = []
20 | onnx_value = []
21 | onnx_init = []
22 | if node is not None :
23 | # get in_node_name out_node_name out_tensor_shape
24 | x_name = node['in_nodes_name'][0]
25 | out_name = node['out_nodes_name'][0]
26 | out_shape = node['out_tensors'][0]
27 | r_node, out = make_node(Op_name, inputs=[x_name], outputs=[out_name])
28 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
29 | onnx_node.append(r_node)
30 | onnx_value.append(value)
31 | return onnx_node, onnx_value, onnx_init
32 | return make_node(Op_name, **kwargs)
33 |
34 |
35 | @OpMapper(["LeakyReLU"])
36 | class LeakyReLU():
37 |
38 | @classmethod
39 | def version_1(cls, node, **kwargs):
40 | onnx_node = []
41 | onnx_value = []
42 | onnx_init = []
43 | if node is not None:
44 | x_name = node['in_nodes_name'][0]
45 | out_name = node['out_nodes_name'][0]
46 | out_shape = node['out_tensors'][0]
47 | alpha = node['node'].layer.negative_slope
48 | l_node, out = make_node('LeakyRelu', inputs=[x_name], outputs=[out_name], alpha=alpha)
49 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
50 | onnx_node.append(l_node)
51 | onnx_value.append(value)
52 | return onnx_node, onnx_value, onnx_init
53 | return make_node('LeakyRelu', **kwargs)
54 |
55 |
56 | @OpMapper(["LeakyReLU6"])
57 | class LeakyReLU6():
58 |
59 | @classmethod
60 | def version_1(cls, node, **kwargs):
61 | onnx_node = []
62 | onnx_value = []
63 | onnx_init = []
64 | if node is not None:
65 | x_name = node['in_nodes_name'][0]
66 | out_name = node['out_nodes_name'][0]
67 | out_shape = node['out_tensors'][0]
68 | alpha = node['node'].layer.alpha
69 | l_value = helper.make_tensor_value_info(out_name + 'lrelu', NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
70 | onnx_value.append(l_value)
71 | l_node, out = make_node('LeakyRelu', inputs=[x_name], outputs=[out_name + 'lrelu'], alpha=alpha)
72 | onnx_node.append(l_node)
73 |
74 | value = helper.make_tensor_value_info(out_name, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
75 | onnx_value.append(value)
76 | max = np.array(6).astype(node['dtype'])
77 | max_value = numpy_helper.from_array(max, name='max')
78 | onnx_init.append(max_value)
79 | min_node, out = make_node('Clip', inputs=[out, "", 'max'], outputs=[out_name])
80 | onnx_node.append(min_node)
81 | return onnx_node, onnx_value, onnx_init
82 | return make_node('LeakyRelu', **kwargs)
83 |
84 |
85 | @OpMapper(["ELU"])
86 | class ELU():
87 |
88 | @classmethod
89 | def version_1(cls, node, **kwargs):
90 | onnx_node = []
91 | onnx_value = []
92 | onnx_init = []
93 | if node is not None:
94 | x_name = node['in_nodes_name'][0]
95 | out_name = node['out_nodes_name'][0]
96 | out_shape = node['out_tensors'][0]
97 | alpha = node['node'].layer.alpha
98 | e_node, out = make_node('Elu', inputs=[x_name], outputs=[out_name], alpha=alpha)
99 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
100 | onnx_node.append(e_node)
101 | onnx_value.append(value)
102 | return onnx_node, onnx_value, onnx_init
103 | return make_node('Elu', **kwargs)
104 |
105 |
106 | @OpMapper(["Tanh"])
107 | class Tanh():
108 |
109 | @classmethod
110 | def version_1(cls, node, **kwargs):
111 | onnx_node = []
112 | onnx_value = []
113 | onnx_init = []
114 | if node is not None:
115 | x_name = node['in_nodes_name'][0]
116 | out_name = node['out_nodes_name'][0]
117 | out_shape = node['out_tensors'][0]
118 | t_node, out = make_node('Tanh', inputs=[x_name], outputs=[out_name])
119 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
120 | onnx_node.append(t_node)
121 | onnx_value.append(value)
122 | return onnx_node, onnx_value, onnx_init
123 | return make_node('Tanh', **kwargs)
124 |
125 |
126 | @OpMapper(["Sigmoid"])
127 | class Sigmoid():
128 |
129 | @classmethod
130 | def version_1(cls, node, **kwargs):
131 | onnx_node = []
132 | onnx_value = []
133 | onnx_init = []
134 | if node is not None:
135 | x_name = node['in_nodes_name'][0]
136 | out_name = node['out_nodes_name'][0]
137 | out_shape = node['out_tensors'][0]
138 | s_node, out = make_node('Sigmoid', inputs=[x_name], outputs=[out_name])
139 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
140 | onnx_node.append(s_node)
141 | onnx_value.append(value)
142 | return onnx_node, onnx_value, onnx_init
143 | return make_node('Sigmoid', **kwargs)
144 |
145 |
146 | @OpMapper(["Softmax"])
147 | class Softmax():
148 |
149 | @classmethod
150 | def version_1(cls, node, **kwargs):
151 | onnx_node = []
152 | onnx_value = []
153 | onnx_init = []
154 | if node is not None:
155 | x_name = node['in_nodes_name'][0]
156 | out_name = node['out_nodes_name'][0]
157 | out_shape = node['out_tensors'][0]
158 | axis = node['node'].layer.axis
159 | s_node, out = make_node('Softmax', inputs=[x_name], outputs=[out_name], axis=axis)
160 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
161 | onnx_node.append(s_node)
162 | onnx_value.append(value)
163 | return onnx_node, onnx_value, onnx_init
164 | return make_node('Softmax', **kwargs)
165 |
166 |
167 | @OpMapper(["Softplus"])
168 | class Softplus():
169 |
170 | @classmethod
171 | def version_1(cls, node, **kwargs):
172 | onnx_node = []
173 | onnx_value = []
174 | onnx_init = []
175 | if node is not None:
176 | x_name = node['in_nodes_name'][0]
177 | out_name = node['out_nodes_name'][0]
178 | out_shape = node['out_tensors'][0]
179 | s_node, out = make_node('Softplus', inputs=[x_name], outputs=[out_name])
180 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
181 | onnx_node.append(s_node)
182 | onnx_value.append(value)
183 | return onnx_node, onnx_value, onnx_init
184 | return make_node('Softplus', **kwargs)
185 |
186 |
187 | @OpMapper(["ReLU6"])
188 | class ReLU6():
189 |
190 | @classmethod
191 | def version_1(cls, node, **kwargs):
192 |
193 | onnx_node = []
194 | onnx_value = []
195 | onnx_init = []
196 |
197 | x = node['in_nodes_name'][0]
198 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
199 |
200 | relu_out = helper.make_tensor_value_info(node['in_nodes_name'][0] + 'r', dtype, shape=node['in_tensors'][0])
201 | onnx_value.append(relu_out)
202 | relu_node, out = make_node('Relu', [x], [node['in_nodes_name'][0] + 'r'])
203 | onnx_node.append(relu_node)
204 |
205 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], dtype, shape=node['out_tensors'][0])
206 | onnx_value.append(out_v)
207 |
208 | max_v = np.array(6).astype(node['dtype'])
209 | max_value = numpy_helper.from_array(max_v, name='max_v')
210 | onnx_init.append(max_value)
211 | min_node, out = make_node('Clip', inputs=[out, "", 'max_v'], outputs=node['out_nodes_name'])
212 | onnx_node.append(min_node)
213 |
214 | return onnx_node, onnx_value, onnx_init
215 |
216 |
217 | @OpMapper(["PRelu"])
218 | class PRelu():
219 |
220 | @classmethod
221 | def version_1(cls, node, **kwargs):
222 |
223 | onnx_node = []
224 | onnx_value = []
225 | onnx_init = []
226 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
227 | # get input, output
228 | x = node['in_nodes_name'][0]
229 | y = node['out_nodes_name'][0]
230 | y_shape = node['out_tensors'][0]
231 | out = helper.make_tensor_value_info(y, dtype, shape=y_shape)
232 | onnx_value.append(out)
233 | # get train weights
234 | slope_v = node['node'].layer.alpha
235 | slope_n = node['node'].layer.__class__.__name__ + '/alpha'
236 | weights = numpy_helper.from_array(arr=to_numpy(slope_v), name=slope_n)
237 | onnx_init.append(weights)
238 | # make prelu node
239 | p_node, out = make_node('PRelu', inputs=[x, slope_n], outputs=[y])
240 | onnx_node.append(p_node)
241 | return onnx_node, onnx_value, onnx_init
242 |
243 |
244 | @OpMapper(["Mish"])
245 | class Mish():
246 |
247 | @classmethod
248 | def version_1(cls, node, **kwargs):
249 |
250 | onnx_node = []
251 | onnx_value = []
252 | onnx_init = []
253 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
254 | # get input, output
255 | x = node['in_nodes_name'][0]
256 | y = node['out_nodes_name'][0]
257 | y_shape = node['out_tensors'][0]
258 | # make softplus node
259 | s_value = helper.make_tensor_value_info(y + '_softplus', dtype, y_shape)
260 | onnx_value.append(s_value)
261 | s_node, out = make_node('Softplus', inputs=[x], outputs=[y + '_softplus'])
262 | onnx_node.append(s_node)
263 | # make tanh node
264 | t_value = helper.make_tensor_value_info(y + '_tanh', dtype, y_shape)
265 | onnx_value.append(t_value)
266 | t_node, out = make_node('Tanh', inputs=[out], outputs=[y + '_tanh'])
267 | onnx_node.append(t_node)
268 | # make matmul
269 | out_v = helper.make_tensor_value_info(y, dtype, shape=y_shape)
270 | onnx_value.append(out_v)
271 | o_node, _ = make_node('Mul', inputs=[x, out], outputs=[y])
272 | onnx_node.append(o_node)
273 | return onnx_node, onnx_value, onnx_init
274 |
275 |
276 | @OpMapper(["Swish"])
277 | class Swish():
278 |
279 | @classmethod
280 | def version_1(cls, node, **kwargs):
281 |
282 | onnx_node = []
283 | onnx_value = []
284 | onnx_init = []
285 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
286 | # get input, output
287 | x = node['in_nodes_name'][0]
288 | y = node['out_nodes_name'][0]
289 | y_shape = node['out_tensors'][0]
290 | # make softplus node
291 | s_value = helper.make_tensor_value_info(y + '_sigmoid', dtype, y_shape)
292 | onnx_value.append(s_value)
293 | s_node, out = make_node('Sigmoid', inputs=[x], outputs=[y + '_sigmoid'])
294 | onnx_node.append(s_node)
295 | # make matmul
296 | out_v = helper.make_tensor_value_info(y, dtype, shape=y_shape)
297 | onnx_value.append(out_v)
298 | o_node, _ = make_node('Mul', inputs=[x, out], outputs=[y])
299 | onnx_node.append(o_node)
300 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/datatype_mapping.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 | from __future__ import unicode_literals
7 |
8 | from onnx import TensorProto, SequenceProto
9 | from typing import Text, Any
10 | import numpy as np # type: ignore
11 |
12 | TENSOR_TYPE_TO_NP_TYPE = {
13 | TensorProto.FLOAT: np.dtype('float32'),
14 | TensorProto.UINT8: np.dtype('uint8'),
15 | TensorProto.INT8: np.dtype('int8'),
16 | TensorProto.UINT16: np.dtype('uint16'),
17 | TensorProto.INT16: np.dtype('int16'),
18 | TensorProto.INT32: np.dtype('int32'),
19 | TensorProto.INT64: np.dtype('int64'),
20 | TensorProto.BOOL: np.dtype('bool'),
21 | TensorProto.FLOAT16: np.dtype('float16'),
22 | TensorProto.DOUBLE: np.dtype('float64'),
23 | TensorProto.COMPLEX64: np.dtype('complex64'),
24 | TensorProto.COMPLEX128: np.dtype('complex128'),
25 | TensorProto.UINT32: np.dtype('uint32'),
26 | TensorProto.UINT64: np.dtype('uint64'),
27 | TensorProto.STRING: np.dtype(np.object)
28 | }
29 |
30 | NP_TYPE_TO_TENSOR_TYPE = {v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items()}
31 |
32 | TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE = {
33 | TensorProto.FLOAT: TensorProto.FLOAT,
34 | TensorProto.UINT8: TensorProto.INT32,
35 | TensorProto.INT8: TensorProto.INT32,
36 | TensorProto.UINT16: TensorProto.INT32,
37 | TensorProto.INT16: TensorProto.INT32,
38 | TensorProto.INT32: TensorProto.INT32,
39 | TensorProto.INT64: TensorProto.INT64,
40 | TensorProto.BOOL: TensorProto.INT32,
41 | TensorProto.FLOAT16: TensorProto.UINT16,
42 | TensorProto.BFLOAT16: TensorProto.UINT16,
43 | TensorProto.DOUBLE: TensorProto.DOUBLE,
44 | TensorProto.COMPLEX64: TensorProto.FLOAT,
45 | TensorProto.COMPLEX128: TensorProto.DOUBLE,
46 | TensorProto.UINT32: TensorProto.UINT32,
47 | TensorProto.UINT64: TensorProto.UINT64,
48 | TensorProto.STRING: TensorProto.STRING,
49 | }
50 |
51 | STORAGE_TENSOR_TYPE_TO_FIELD = {
52 | TensorProto.FLOAT: 'float_data',
53 | TensorProto.INT32: 'int32_data',
54 | TensorProto.INT64: 'int64_data',
55 | TensorProto.UINT16: 'int32_data',
56 | TensorProto.DOUBLE: 'double_data',
57 | TensorProto.COMPLEX64: 'float_data',
58 | TensorProto.COMPLEX128: 'double_data',
59 | TensorProto.UINT32: 'uint64_data',
60 | TensorProto.UINT64: 'uint64_data',
61 | TensorProto.STRING: 'string_data',
62 | TensorProto.BOOL: 'int32_data',
63 | }
64 |
65 | STORAGE_ELEMENT_TYPE_TO_FIELD = {
66 | SequenceProto.TENSOR: 'tensor_values',
67 | SequenceProto.SPARSE_TENSOR: 'sparse_tensor_values',
68 | SequenceProto.SEQUENCE: 'sequence_values',
69 | SequenceProto.MAP: 'map_values'
70 | }
71 |
72 | STR_TYPE_TO_TENSOR_TYPE = {
73 | 'float32': TensorProto.FLOAT,
74 | 'uint8': TensorProto.UINT8,
75 | 'int8': TensorProto.INT8,
76 | 'uint16': TensorProto.UINT16,
77 | 'int16': TensorProto.INT16,
78 | 'int32': TensorProto.INT32,
79 | 'int64': TensorProto.INT64,
80 | 'bool': TensorProto.BOOL,
81 | 'float16': TensorProto.FLOAT16,
82 | 'float64': TensorProto.DOUBLE,
83 | 'complex64': TensorProto.COMPLEX64,
84 | 'complex128': TensorProto.COMPLEX128,
85 | 'uint32': TensorProto.UINT32,
86 | 'uint64': TensorProto.UINT64,
87 | }
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/math/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from . import matmul
5 | from . import add
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/math/add.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, TensorProto
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, transpose_shape
7 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper('Add')
10 | class Add():
11 | # supports v7-v12
12 |
13 | @classmethod
14 | def version_7(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 |
19 | x_name = node['in_nodes_name'][0]
20 | y_name = node['in_nodes_name'][1]
21 | out_name = node['out_nodes_name'][0]
22 | # x_shape = node['in_tensors'][0]
23 | # y_shape = node['in_tensors'][1]
24 | # out_shape = node['out_tensors'][0]
25 |
26 | op_type = 'Add'
27 | add_node, _ = make_node(op_type, inputs=[x_name, y_name], outputs=[out_name])
28 | onnx_node.append(add_node)
29 | return onnx_node, onnx_value, onnx_init
30 |
31 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/math/matmul.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, TensorProto
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, transpose_shape
7 |
8 |
9 | @OpMapper('MatMul')
10 | class MatMul():
11 | # supports v1-v12
12 |
13 | @classmethod
14 | def version_1(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 |
19 | x = node['in_nodes_name'][0]
20 | y = node['in_nodes_name'][1]
21 |
22 | if hasattr(node['node'], 'transpose_X'):
23 | perm = list(range(len(node['in_tensors'][0])))
24 | perm[-1], perm[-2] = perm[-2], perm[-1]
25 | if node['dtype'] == 'float64':
26 | cxv = helper.make_tensor_value_info(node['in_nodes_name'][0] + '_cast', TensorProto.FLOAT,
27 | shape=node['in_tensors'][0])
28 | onnx_value.append(cxv)
29 | cxn, x = make_node('Cast', inputs=[x], outputs=[node['in_nodes_name'][0] + '_cast'], to=TensorProto.FLOAT)
30 | onnx_node.append(cxn)
31 |
32 | cxtv = helper.make_tensor_value_info(node['in_nodes_name'][0] + '_t', TensorProto.FLOAT,
33 | shape=transpose_shape(node['in_tensors'][0], perm))
34 | onnx_value.append(cxtv)
35 | cxt, x = make_node('Transpose', inputs=[x], outputs=[['in_nodes_name'][0]] + '_t', perm=perm)
36 | onnx_node.append(cxt)
37 | else:
38 | cxtv = helper.make_tensor_value_info(node['in_nodes_name'][0] + '_t', TensorProto.FLOAT,
39 | shape=transpose_shape(node['in_tensors'][0], perm))
40 | onnx_value.append(cxtv)
41 | cxt, x = make_node('Transpose', inputs=[x], outputs=[['in_nodes_name'][0]] + '_t', perm=perm)
42 | onnx_node.append(cxt)
43 |
44 | if hasattr(node['node'], 'transpose_Y'):
45 | perm = list(range(len(node['in_tensors'][1])))
46 | perm[-1], perm[-2] = perm[-2], perm[-1]
47 | if node['dtype'] == 'float64':
48 | cyv = helper.make_tensor_value_info(node['in_nodes_name'][1] + '_cast', TensorProto.FLOAT,
49 | shape=node['in_tensors'][1])
50 | onnx_value.append(cyv)
51 | cyn, y = make_node('Cast', inputs=[y], outputs=[node['in_nodes_name'][1] + '_cast'], to=TensorProto.FLOAT)
52 | onnx_node.append(cyn)
53 |
54 | cytv = helper.make_tensor_value_info(node['in_nodes_name'][1] + '_t', TensorProto.FLOAT,
55 | shape=transpose_shape(node['in_tensors'][1], perm))
56 | onnx_value.append(cytv)
57 | cyt, y = make_node('Transpose', inputs=[y], outputs=[node['in_nodes_name'][1] + '_t'], perm=perm)
58 | onnx_node.append(cyt)
59 | else:
60 | cytv = helper.make_tensor_value_info(node['in_nodes_name'][1] + '_t', TensorProto.FLOAT,
61 | shape=transpose_shape(node['in_tensors'][1], perm))
62 | onnx_value.append(cytv)
63 | cyt, y = make_node('Transpose', inputs=[y], outputs=[node['in_nodes_name'][1] + '_t'], perm=perm)
64 | onnx_node.append(cyt)
65 |
66 | if node['dtype'] == 'float64':
67 | m_out = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'm', TensorProto.FLOAT, shape=node['out_tensors'][0])
68 | onnx_value.append(m_out)
69 | mat, m_o = make_node('MatMul', inputs=[x, y], outputs=node['out_nodes_name'][0] + 'm')
70 | onnx_node.append(mat)
71 |
72 | out = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.DOUBLE, shape=node['out_tensors'][0])
73 | onnx_value.append(out)
74 | o_node, _ = make_node('Cast', inputs=m_o, to=TensorProto.DOUBLE, outputs=node['out_nodes_name'])
75 | onnx_node.append(o_node)
76 | else:
77 | out = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.FLOAT, shape=node['out_tensors'][0])
78 | onnx_value.append(out)
79 | o_node, _ = make_node('MatMul', inputs=[x, y], outputs=node['out_nodes_name'])
80 | onnx_node.append(o_node)
81 | return onnx_node, onnx_value, onnx_init
82 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from .linear import *
5 | from .dropout import *
6 | from .flatten import *
7 | from .conv import *
8 | from .normalization import *
9 | from .deconv import *
10 | from .globalpool import *
11 | from .shape import *
12 | from .embedding import *
13 | from .pool import *
14 | from .adaptivepool import *
15 | from .dwconv import *
16 | from .extend import *
17 | from .rnn import *
18 | from .resampling import *
19 | from .merge import *
20 | from .noise import *
21 | from .padding import *
22 | from .scale import *
23 | from .stack import *
24 | from .subpixelconv import *
25 | from .mask_conv import *
26 | from .groupconv import *
27 |
28 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/adaptivepool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from collections import OrderedDict
6 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
7 | from tlx2onnx.op_mapper.op_mapper import OpMapper
8 | from tlx2onnx.common import make_node
9 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,get_channels_last_permutation
10 |
11 | def cal_stride_and_kernel(input_shape, output_size, spatial):
12 | input_size = input_shape[2:]
13 | stride = []
14 | kernel = []
15 | for i in range(spatial):
16 | stride_temp = int(input_size[i] / output_size[i])
17 | kernel_temp = input_size[i] - (output_size[i] - 1) * stride_temp
18 | stride.append(stride_temp)
19 | kernel.append(kernel_temp)
20 | return stride, kernel
21 |
22 | @OpMapper(["AdaptiveMaxPool1d", "AdaptiveMaxPool2d", "AdaptiveMaxPool3d", "AdaptiveAvgPool1d", "AdaptiveAvgPool2d", "AdaptiveAvgPool3d"])
23 | class AdaptivePool():
24 | # suppport v1-v11
25 |
26 | @classmethod
27 | def version_1(cls, node, **kwargs):
28 | onnx_node = []
29 | onnx_value = []
30 | onnx_init = []
31 |
32 | attr_dict = OrderedDict()
33 | # get in_node_name out_node_nmae
34 | x_name = node['in_nodes_name'][0]
35 | out_name = node['out_nodes_name'][0]
36 | x_shape = node['in_tensors'][0]
37 | out_shape = node['out_tensors'][0]
38 |
39 | #### get data_type
40 | data_type = node['dtype']
41 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
42 |
43 | # get cur_node_layer node_index
44 | layer = node['node'].layer
45 | layer_name = layer.__class__.__name__
46 | spatial = int(layer_name[-2])
47 | layer_type = layer_name[-9:-2]
48 | if layer_type == "MaxPool":
49 | Op_name = "MaxPool"
50 | elif layer_type == "AvgPool":
51 | Op_name = "AveragePool"
52 |
53 |
54 | # get output size
55 | output_size = layer.output_size
56 | if isinstance(output_size, int):
57 | output_size = (output_size, ) * spatial
58 |
59 | # insert pool attr
60 | data_format = node['attr']['data_format']
61 | attr_dict["auto_pad"] = "VALID"
62 |
63 |
64 | if data_format == 'channels_last':
65 | permutation = get_channels_first_permutation(spatial)
66 | x_shape_t = make_shape_channels_first(x_shape)
67 | strides, kernel_shape = cal_stride_and_kernel(input_shape=x_shape_t, output_size=output_size, spatial=spatial)
68 | attr_dict["strides"] = strides
69 | attr_dict["kernel_shape"] = kernel_shape
70 | # insert transpose op: NHWC -> NCHW
71 | transpose_value = helper.make_tensor_value_info(x_name+'_t', tensor_type, shape=x_shape_t)
72 | onnx_value.append(transpose_value)
73 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name+'_t'], perm = permutation)
74 | onnx_node.append(transpose_node)
75 |
76 | attr_dict["inputs"] = [out]
77 | attr_dict["outputs"] = [out+'_t']
78 | maxpool_node, out = make_node(Op_name, **attr_dict)
79 | onnx_node.append(maxpool_node)
80 | out_shape_t = make_shape_channels_first(out_shape)
81 | maxpool_value = helper.make_tensor_value_info(out, tensor_type, shape=out_shape_t)
82 | onnx_value.append(maxpool_value)
83 |
84 | # insert transpose op: NCHW -> NHWC
85 | permutation = get_channels_last_permutation(spatial)
86 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
87 | onnx_node.append(transpose_node)
88 | transpose_value = helper.make_tensor_value_info(out_name, tensor_type, shape=out_shape)
89 | onnx_value.append(transpose_value)
90 | return onnx_node, onnx_value, onnx_init
91 |
92 | elif data_format == 'channels_first':
93 |
94 | attr_dict["inputs"] = [x_name]
95 | attr_dict["outputs"] = [out_name]
96 | strides, kernel_shape = cal_stride_and_kernel(input_shape=x_shape, output_size=output_size,
97 | spatial=spatial)
98 | attr_dict["strides"] = strides
99 | attr_dict["kernel_shape"] = kernel_shape
100 | maxpool_node, out = make_node(Op_name, **attr_dict)
101 | onnx_node.append(maxpool_node)
102 | maxpool_value = helper.make_tensor_value_info(out, tensor_type, out_shape)
103 | onnx_value.append(maxpool_value)
104 | return onnx_node, onnx_value, onnx_init
105 |
106 | else:
107 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
108 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/conv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | import numpy as np
4 | from onnx import helper, numpy_helper
5 | from collections import OrderedDict
6 | import tensorlayerx as tlx
7 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from tlx2onnx.op_mapper.op_mapper import OpMapper
9 | from tlx2onnx.common import make_node
10 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,get_channels_last_permutation
11 | from tlx2onnx.common import convert_padding, convert_w, tlx_act_2_onnx, convert_b
12 |
13 | @OpMapper(["Conv1d", "Conv2d", "Conv3d"])
14 | class Conv():
15 | # suppport v1-v13
16 |
17 | @classmethod
18 | def any_version(cls, node, opset, **kwargs):
19 | """
20 | Parameters
21 | ----------
22 | node:node dict {node: node,
23 | in_tensors: node inputs,
24 | out_tensors: node outputs,
25 | in_nodes_name: node inputs name,
26 | out_nodes_name: node outputs name}
27 | Returns
28 | -------
29 | """
30 | Op_name = 'Conv'
31 | onnx_node, onnx_value, onnx_init = [], [], []
32 | attr_dict = OrderedDict()
33 |
34 | #### get data_type
35 | data_type = node['dtype']
36 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
37 | #### get in_node_name out_node_nmae
38 | x_name = node['in_nodes_name'][0]
39 | out_name = node['out_nodes_name'][0]
40 | x_shape = node['in_tensors'][0]
41 | out_shape = node['out_tensors'][0]
42 |
43 | #### get cur_node_layer node_index
44 | layer = node['node'].layer
45 | layer_type = layer.__class__.__name__
46 | spatial = int(layer_type[-2])
47 | node_name = layer.name
48 | #### get layer_param
49 | layer_param = layer.all_weights
50 |
51 | #### get layer_act_type
52 | layer_act = layer.act.__class__.__name__
53 |
54 | #### conv inputs
55 | w = None
56 | b = None
57 | if len(layer_param) == 1:
58 | w = layer_param[0]
59 | elif len(layer_param) == 2:
60 | w = layer_param[0]
61 | b = layer_param[1]
62 |
63 | #### insert conv attr
64 | kernel_size = node['attr']['kernel_size']
65 | if isinstance(kernel_size, int):
66 | kernel_size = [kernel_size]
67 | attr_dict["kernel_shape"] = kernel_size
68 | dilations = node['attr']['dilation']
69 | if isinstance(dilations, int):
70 | dilations = [dilations,]
71 | attr_dict["dilations"] = dilations
72 | strides = node['attr']['stride']
73 | if isinstance(strides, int):
74 | strides = [strides]
75 | attr_dict["strides"] = strides
76 | data_format = node['attr']['data_format']
77 | paddding = node['attr']['padding']
78 | attr_dict["group"] = 1
79 | attr_dict["outputs"] = [out_name]
80 |
81 | ####convert padding
82 | pads = convert_padding(
83 | paddding, x_shape, out_shape, attr_dict["kernel_shape"], attr_dict["strides"],
84 | attr_dict["dilations"], spatial, data_format
85 | )
86 | if isinstance(pads, str):
87 | attr_dict["auto_pad"] = pads
88 | else:
89 | attr_dict["pads"] = pads
90 |
91 | if data_format == 'channels_last':
92 | permutation = get_channels_first_permutation(spatial)
93 | x_shape_t = make_shape_channels_first(x_shape)
94 | # insert transpose op: NHWC -> NCHW
95 | transpose_value = helper.make_tensor_value_info(x_name+'_t', tensor_type, shape=x_shape_t)
96 | onnx_value.append(transpose_value)
97 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name+'_t'], perm = permutation)
98 | onnx_node.append(transpose_node)
99 | # convert w
100 | w_name = node_name + '_w'
101 | w_init = convert_w(w, data_format, spatial, w_name)
102 | onnx_init.append(w_init)
103 | attr_dict["inputs"] = [out, w_name]
104 |
105 | #### convert b
106 | if b is not None:
107 | b_name = node_name + '_b'
108 | b_init = convert_b(b, b_name)
109 | onnx_init.append(b_init)
110 | attr_dict["inputs"] = [out, w_name, b_name]
111 |
112 | attr_dict["outputs"] = [out + "_t"]
113 | conv_node, out = make_node(Op_name, **attr_dict)
114 | onnx_node.append(conv_node)
115 | out_shape_t = make_shape_channels_first(out_shape)
116 | conv_value = helper.make_tensor_value_info(out, tensor_type, shape=out_shape_t)
117 | onnx_value.append(conv_value)
118 | # insert transpose op: NCHW -> NHWC and insert act node
119 |
120 | if layer_act != 'NoneType':
121 | act_convert = tlx_act_2_onnx[layer_act]
122 | act_input = out_name + "_act"
123 | act_out = out_name
124 | # insert transpose op
125 | permutation = get_channels_last_permutation(spatial)
126 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[act_input], perm = permutation)
127 | onnx_node.append(transpose_node)
128 | transpose_value = helper.make_tensor_value_info(act_input, tensor_type, shape = out_shape)
129 | onnx_value.append(transpose_value)
130 | # 如果layer存在act,需要新增一个act node 和 对应act输入的act input info, 并且要更新 conv的outputs 为 act的inputs, 此时act的outputs是整个layer的outputs
131 | act_node, _ = act_convert([out], [act_out])
132 | act_input_value_info = helper.make_tensor_value_info(act_out, tensor_type, out_shape)
133 | onnx_value.append(act_input_value_info)
134 | onnx_node.append(act_node)
135 | return onnx_node, onnx_value, onnx_init
136 | else:
137 | permutation = get_channels_last_permutation(spatial)
138 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
139 | onnx_node.append(transpose_node)
140 | transpose_value = helper.make_tensor_value_info(out_name, tensor_type, shape=out_shape)
141 | onnx_value.append(transpose_value)
142 | return onnx_node, onnx_value, onnx_init
143 |
144 |
145 | elif data_format == 'channels_first':
146 |
147 | #### convert w
148 | w_name = node_name + '_w'
149 | w_init = convert_w(w, data_format, spatial, w_name)
150 | onnx_init.append(w_init)
151 | attr_dict["inputs"] = [x_name, w_name]
152 |
153 | #### convert b
154 | if b is not None:
155 | b_name = node_name + '_b'
156 | b_init = convert_b(b, b_name)
157 | onnx_init.append(b_init)
158 | attr_dict["inputs"] = [x_name, w_name, b_name]
159 |
160 | #### make act node
161 | if layer_act != 'NoneType':
162 | act_convert = tlx_act_2_onnx[layer_act]
163 | act_input = out_name + "_act"
164 | act_out = out_name
165 | attr_dict["outputs"] = [act_input]
166 | conv_node, out = make_node(Op_name, **attr_dict)
167 | onnx_node.append(conv_node)
168 | conv_value = helper.make_tensor_value_info(out, tensor_type, shape = out_shape)
169 | onnx_value.append(conv_value)
170 | #insert act node
171 | act_node, out = act_convert([act_input], [act_out])
172 | act_input_value_info = helper.make_tensor_value_info(out, tensor_type, out_shape)
173 | onnx_value.append(act_input_value_info)
174 | onnx_node.append(act_node)
175 | return onnx_node, onnx_value, onnx_init
176 | else:
177 | conv_node, out = make_node(Op_name, **attr_dict)
178 | onnx_node.append(conv_node)
179 | conv_value = helper.make_tensor_value_info(out, tensor_type, out_shape)
180 | onnx_value.append(conv_value)
181 | return onnx_node, onnx_value, onnx_init
182 | else:
183 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
184 |
185 | @classmethod
186 | def version_1(cls, node, **kwargs):
187 |
188 | return cls.any_version(node, 1, **kwargs)
189 |
190 |
191 | @classmethod
192 | def version_11(cls, node, **kwargs):
193 |
194 | return cls.any_version( node, 11, **kwargs)
195 |
196 | @classmethod
197 | def version_13(cls, node, **kwargs):
198 |
199 | return cls.any_version(node, 13, **kwargs)
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/deconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from ...common import tlx_act_2_onnx, convert_padding, make_shape_channels_first, convert_w, \
9 | get_channels_last_permutation, get_channels_first_permutation
10 |
11 | @OpMapper(['ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'])
12 | class ConvTranspose():
13 | # supports v1-v12
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | onnx_node = []
18 | onnx_value = []
19 | onnx_init = []
20 |
21 | x = node['in_nodes_name'][0]
22 | x_shape = node['in_tensors'][0]
23 | out_shape = node['out_tensors'][0]
24 | spatial = int(node['node'].layer.__class__.__name__[-2])
25 |
26 | y = node['node'].layer.name + '/weights'
27 | weights_value = node['node'].layer.filters
28 |
29 | attr_dict = {}
30 | attr_dict['dilations'] = dilations = node['attr']['dilation']
31 | attr_dict['kernel_shape'] = kernel_shape = node['attr']['kernel_size']
32 | attr_dict['strides'] = strides = node['attr']['stride']
33 | pads = node['attr']['padding']
34 | data_format = node['attr']['data_format']
35 |
36 | if data_format == 'channels_last':
37 | # channels last conver weights and input
38 | x_temp_shape = make_shape_channels_first(x_shape)
39 | out_temp_shape = make_shape_channels_first(out_shape)
40 | weights = convert_w(weights_value, data_format, spatial, y)
41 | onnx_init.append(weights)
42 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't', NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_temp_shape)
43 | onnx_value.append(t_x)
44 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'], perm=get_channels_first_permutation(spatial))
45 | onnx_node.append(tx_node)
46 | else:
47 | # Build weights
48 | weights = numpy_helper.from_array(arr=to_numpy(weights_value), name=y)
49 | onnx_init.append(weights)
50 |
51 | # Build padding
52 | pads = convert_padding(
53 | pads, x_shape, out_shape, kernel_shape, strides,
54 | dilations, spatial, data_format
55 | )
56 | if isinstance(pads, str):
57 | attr_dict["auto_pad"] = pads
58 | else:
59 | attr_dict["pads"] = pads
60 |
61 | if node['node'].layer.b_init is not None:
62 | b = numpy_helper.from_array(arr=to_numpy(node['node'].layer.biases), name=node['node'].layer.name + '/b')
63 | onnx_init.append(b)
64 | b_name = node['node'].layer.name + '/b'
65 | input_list = [x, y, b_name]
66 | else:
67 | input_list = [x, y]
68 |
69 | if data_format == 'channels_first':
70 | if node['node'].layer.act is not None:
71 | # Build ConvTranspose
72 | de_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'de', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
73 | shape=out_shape)
74 | onnx_value.append(de_v)
75 | ct_node, out = make_node('ConvTranspose', inputs=input_list,
76 | outputs=[node['out_nodes_name'][0] + 'de'], **attr_dict)
77 | onnx_node.append(ct_node)
78 |
79 | act_op = node['node'].layer.act.__class__.__name__
80 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
81 | shape=out_shape)
82 | onnx_value.append(out_v)
83 | # Using Opmapper
84 | act_node, _ = tlx_act_2_onnx[act_op]([out], node['out_nodes_name'], node['node'].layer.act)
85 | onnx_node.append(act_node)
86 | else:
87 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
88 | shape=out_shape) #
89 | onnx_value.append(out_v)
90 | ct_node, out = make_node('ConvTranspose', inputs=input_list,
91 | outputs=node['out_nodes_name'], **attr_dict)
92 | onnx_node.append(ct_node)
93 | elif data_format == 'channels_last':
94 | if node['node'].layer.act is not None:
95 | # Build ConvTranspose
96 | ct_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'ct', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
97 | shape=out_temp_shape)
98 | onnx_value.append(ct_v)
99 | ct_node, out = make_node('ConvTranspose', inputs=input_list,
100 | outputs=[node['out_nodes_name'][0] + 'ct'], **attr_dict)
101 | onnx_node.append(ct_node)
102 |
103 | act_op = node['node'].layer.act.__class__.__name__
104 | act_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'a', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
105 | shape=out_temp_shape)
106 | onnx_value.append(act_v)
107 | # Using Opmapper
108 | act_node, out = tlx_act_2_onnx[act_op]([out], [node['out_nodes_name'][0] + 'a'], node['node'].layer.act)
109 | onnx_node.append(act_node)
110 | else:
111 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'ct', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
112 | shape=out_temp_shape)
113 | onnx_value.append(out_v)
114 | o_node, out = make_node('ConvTranspose', inputs=input_list,
115 | outputs=[node['out_nodes_name'][0] + 'ct'], **attr_dict)
116 | onnx_node.append(o_node)
117 |
118 | t_out = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_shape)
119 | onnx_value.append(t_out)
120 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=node['out_nodes_name'], perm=get_channels_last_permutation(spatial))
121 | onnx_node.append(tout_node)
122 | else:
123 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
124 |
125 | return onnx_node, onnx_value, onnx_init
126 |
127 |
128 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/dropout.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper('Dropout')
10 | class Dropout():
11 | # supports v1-v15
12 |
13 | @classmethod
14 | def version_1(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 |
19 | x = node['in_nodes_name'][0]
20 | dropout_prob = str(node['node'].layer.p)
21 | dropout_mode = node['node'].layer.is_train
22 | ONNX_TYPE = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
23 |
24 | if dropout_mode == False:
25 | y_v = helper.make_tensor_value_info(node['out_nodes_name'][0], ONNX_TYPE, shape=node['out_tensors'][0])
26 | onnx_value.append(y_v)
27 | o_node, _ = make_node('Identity', inputs=[x], outputs=node['out_nodes_name'])
28 | onnx_node.append(o_node)
29 | elif dropout_mode == True:
30 | y_v = helper.make_tensor_value_info(node['out_nodes_name'][0], ONNX_TYPE, shape=node['out_tensors'][0])
31 | onnx_value.append(y_v)
32 | o_node, _ = make_node('Dropout', inputs=[x, dropout_prob], outputs=node['out_nodes_name'])
33 | onnx_node.append(o_node)
34 | else:
35 | raise Exception("Unexpected situation happend")
36 |
37 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/dwconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from collections import OrderedDict
6 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
7 | from tlx2onnx.op_mapper.op_mapper import OpMapper
8 | from tlx2onnx.common import make_node, convert_w, convert_padding, to_numpy
9 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,tlx_act_2_onnx,get_channels_last_permutation
10 |
11 | @OpMapper(["DepthwiseConv2d"])
12 | class DepthwiseConv():
13 | # suppport v1-v13
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | onnx_node, onnx_value, onnx_init = [], [], []
18 | depth_dict = OrderedDict()
19 | point_dict = OrderedDict()
20 |
21 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
22 | # get input output
23 | x_name = node['in_nodes_name'][0]
24 | x_shape = node['in_tensors'][0]
25 | out_shape = node['out_tensors'][0]
26 | out_name = node['out_nodes_name'][0]
27 |
28 | # get common operation
29 | layer = node['node'].layer
30 | spatial = int(layer.__class__.__name__[-2])
31 | act_op = layer.act.__class__.__name__
32 | data_format = layer.data_format
33 |
34 | # trainable weights
35 | depth_filters = layer.filters
36 | depth_name = layer.name + '_depth_w'
37 | point_filter = layer.point_filter
38 | point_name = layer.name + '_point_w'
39 | depth_init = convert_w(depth_filters, data_format, spatial, depth_name)
40 | onnx_init.append(depth_init)
41 | point_init = convert_w(point_filter, data_format, spatial, point_name)
42 | onnx_init.append(point_init)
43 | if layer.b_init:
44 | b_name = layer.name + '_b'
45 | b = numpy_helper.from_array(arr=to_numpy(node['node'].layer.b), name=b_name)
46 | onnx_init.append(b)
47 |
48 | # constant value
49 | depth_dict['kernel_shape'] = kernel_size = layer.kernel_size
50 | point_dict['kernel_shape'] = [1, 1]
51 | depth_dict['strides'] = stride = layer.stride
52 | padding = layer.padding
53 | depth_dict['dilations'] = dilation = layer.dilation
54 | depth_multiplier = layer.depth_multiplier
55 | in_channels = layer.in_channels
56 |
57 |
58 | ####convert padding
59 | if data_format == 'channels_last':
60 | depth_shape = out_shape[0:3] + [x_shape[3]]
61 | else:
62 | depth_shape = x_shape[0:2] + out_shape[2:]
63 | depth_pads = convert_padding(padding, x_shape, depth_shape, kernel_size, stride, dilation, spatial, data_format)
64 | point_pads = convert_padding(padding, depth_shape, out_shape, (1, 1), (1, 1), (1, 1), spatial, data_format)
65 | if isinstance(depth_pads, str):
66 | depth_dict['auto_pad'] = depth_pads
67 | else:
68 | depth_dict['pads'] = depth_pads
69 |
70 | if isinstance(point_pads, str):
71 | point_dict['auto_pad'] = point_pads
72 | else:
73 | point_dict['pads'] = point_pads
74 |
75 | if data_format == 'channels_last':
76 | permutation = get_channels_first_permutation(spatial)
77 | x_t = make_shape_channels_first(x_shape)
78 |
79 | t_value = helper.make_tensor_value_info(x_name+'_t', dtype, shape=x_t)
80 | onnx_value.append(t_value)
81 | t_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name+'_t'], perm = permutation)
82 | onnx_node.append(t_node)
83 |
84 | # make depthwise
85 | depth_shape_temp = out_shape[0:3] + [x_t[1]]
86 | depthwise_out_shape = make_shape_channels_first(depth_shape_temp)
87 | depth_out = helper.make_tensor_value_info(layer.name + 'depth_out', dtype, shape=depthwise_out_shape)
88 | onnx_value.append(depth_out)
89 | depth_node, out = make_node('Conv', inputs=[out, depth_name], outputs=[layer.name + 'depth_out'],
90 | group= in_channels, **depth_dict)
91 | onnx_node.append(depth_node)
92 |
93 | # make pointwise
94 | point_out = helper.make_tensor_value_info(layer.name + 'point_out', dtype, shape=make_shape_channels_first(out_shape))
95 | onnx_value.append(point_out)
96 | # make bias
97 | if layer.b_init:
98 | point_inputs = [out, point_name, b_name]
99 | else:
100 | point_inputs = [out, point_name]
101 | point_node, out = make_node('Conv', inputs=point_inputs, outputs=[layer.name + 'point_out'],
102 | group=1, **point_dict)
103 | onnx_node.append(point_node)
104 |
105 | # make activation
106 | if node['node'].layer.act is not None:
107 | act_out = helper.make_tensor_value_info(layer.name + 'act_out', dtype, shape=make_shape_channels_first(out_shape))
108 | onnx_value.append(act_out)
109 | act_node, out = tlx_act_2_onnx[act_op]([out], [layer.name + 'act_out'], layer.act)
110 | onnx_node.append(act_node)
111 |
112 | # Convert the result to channel last
113 | permutation = get_channels_last_permutation(spatial)
114 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
115 | onnx_node.append(transpose_node)
116 | transpose_value = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
117 | onnx_value.append(transpose_value)
118 |
119 | elif data_format == 'channels_first':
120 | # make depthwise
121 | depthwise_out_shape = x_shape[0:2] + out_shape[2:]
122 | depth_out = helper.make_tensor_value_info(layer.name + 'depth_out', dtype, shape=depthwise_out_shape)
123 | onnx_value.append(depth_out)
124 | depth_node, out = make_node('Conv', inputs=[x_name, depth_name], outputs=[layer.name + 'depth_out'],
125 | group=in_channels, **depth_dict)
126 | onnx_node.append(depth_node)
127 |
128 | # make activation
129 | if node['node'].layer.act is not None:
130 | # make pointwise
131 | point_out = helper.make_tensor_value_info(layer.name + 'point_out', dtype,
132 | shape=out_shape)
133 | onnx_value.append(point_out)
134 | # make bias
135 | if layer.b_init:
136 | point_inputs = [out, point_name, b_name]
137 | else:
138 | point_inputs = [out, point_name]
139 | point_node, out = make_node('Conv', inputs=point_inputs, outputs=[layer.name + 'point_out'],
140 | group=1, **point_dict)
141 | onnx_node.append(point_node)
142 | act_out = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
143 | onnx_value.append(act_out)
144 | act_node, out = tlx_act_2_onnx[act_op]([out], [out_name], layer.act)
145 | onnx_node.append(act_node)
146 | else:
147 | # make pointwise
148 | point_out = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
149 | onnx_value.append(point_out)
150 | # make bias
151 | if layer.b_init:
152 | point_inputs = [out, point_name, b_name]
153 | else:
154 | point_inputs = [out, point_name]
155 | point_node, out = make_node('Conv', inputs=point_inputs, outputs=[out_name],
156 | group=1, **point_dict)
157 | onnx_node.append(point_node)
158 | else:
159 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
160 | return onnx_node, onnx_value, onnx_init
161 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/embedding.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, TensorProto, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 |
10 |
11 | @OpMapper('OneHot')
12 | class OneHot():
13 | # supports v9-v11
14 |
15 | @classmethod
16 | def version_9(cls, node, **kwargs):
17 | onnx_node = []
18 | onnx_value = []
19 | onnx_init = []
20 |
21 | # input, output
22 | x = node['in_nodes_name'][0]
23 | x_shape = node['in_tensors'][0]
24 | y = node['out_nodes_name'][0]
25 | out_shape = node['out_tensors'][0]
26 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
27 | shape=out_shape)
28 | onnx_value.append(out_v)
29 |
30 | # attr
31 | _depth = node['node'].layer.depth
32 | _on_value = node['node'].layer.on_value
33 | _off_value = node['node'].layer.off_value
34 | axis = node['node'].layer.axis
35 |
36 | # create attr
37 | values = numpy_helper.from_array(np.array([_off_value, _on_value], dtype=np.int64), name='values')
38 | onnx_init.append(values)
39 | depth = numpy_helper.from_array(np.array(_depth, dtype=np.int64), name='depth')
40 | onnx_init.append(depth)
41 |
42 | if node['dtype'] == np.int64:
43 | x_hot = helper.make_tensor_value_info(y + '_hot', TensorProto.INT64, shape=out_shape)
44 | onnx_value.append(x_hot)
45 | oht_node, out = make_node('OneHot', inputs=[x, 'depth', 'values'], outputs=[y + '_hot'], axis=axis)
46 | onnx_node.append(oht_node)
47 | else:
48 | # make cast input to int64
49 | cxv = helper.make_tensor_value_info(x + '_cast', TensorProto.INT64, shape=x_shape)
50 | onnx_value.append(cxv)
51 | cxn, x = make_node('Cast', inputs=[x], outputs=[x + '_cast'], to=TensorProto.INT64)
52 | onnx_node.append(cxn)
53 |
54 | x_hot = helper.make_tensor_value_info(y + '_hot', TensorProto.INT64, shape=out_shape)
55 | onnx_value.append(x_hot)
56 | oht_node, out = make_node('OneHot', inputs=[x, 'depth', 'values'], outputs=[y + '_hot'], axis=axis)
57 | onnx_node.append(oht_node)
58 |
59 | # cast output to dtype
60 | out_node, _ = make_node('Cast', inputs=[out], outputs=[y], to=TensorProto.FLOAT)
61 | onnx_node.append(out_node)
62 |
63 | return onnx_node, onnx_value, onnx_init
64 |
65 |
66 | @OpMapper('Embedding')
67 | class Embedding():
68 | # supports v1-v15
69 |
70 | @classmethod
71 | def version_1(cls, node, **kwargs):
72 | onnx_node = []
73 | onnx_value = []
74 | onnx_init = []
75 |
76 | # input, output
77 | x = node['in_nodes_name'][0]
78 | x_shape = node['in_tensors'][0]
79 | y = node['out_nodes_name'][0]
80 | out_shape = node['out_tensors'][0]
81 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
82 | shape=out_shape)
83 | onnx_value.append(out_v)
84 |
85 | # attr
86 | embeddings = node['node'].layer.embeddings
87 | e_weights = numpy_helper.from_array(arr=to_numpy(embeddings), name='embeddings')
88 | onnx_init.append(e_weights)
89 |
90 | # make gather node
91 | g_node, _ = make_node('Gather', inputs=['embeddings', x], outputs=[y])
92 | onnx_node.append(g_node)
93 |
94 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/extend.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
6 | from tlx2onnx.op_mapper.op_mapper import OpMapper
7 | from tlx2onnx.common import make_node
8 | import numpy as np
9 |
10 |
11 | @OpMapper(["ExpandDims"])
12 | class ExpandDims():
13 | # suppport v1-v13
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | onnx_node, onnx_value, onnx_init = [], [], []
18 | x_name = node['in_nodes_name'][0]
19 | x_shape = node['in_tensors'][0]
20 | out_name = node['out_nodes_name'][0]
21 | out_shape = node['out_tensors'][0]
22 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
23 | axis = node['node'].layer.axis
24 |
25 | # Only Expand first dim
26 | shape = np.array([1] + x_shape).astype(np.int64)
27 | shape_value = numpy_helper.from_array(shape, name='shape')
28 | onnx_init.append(shape_value)
29 | e_value = helper.make_tensor_value_info(out_name + '_e', dtype, [1] + x_shape)
30 | onnx_value.append(e_value)
31 | e_node, out = make_node('Expand', inputs=[x_name, 'shape'], outputs=[out_name + '_e'])
32 | onnx_node.append(e_node)
33 |
34 | if axis == -1 or axis == (len(out_shape) - 1):
35 | r_shape = np.array(x_shape + [1]).astype(np.int64)
36 | else:
37 | r_shape = np.array(x_shape[0:axis] + [1] + x_shape[axis:]).astype(np.int64)
38 | r_shape_value = numpy_helper.from_array(r_shape, name='r_shape')
39 | onnx_init.append(r_shape_value)
40 | t_node, out = make_node('Reshape', inputs=[out, 'r_shape'], outputs=[out_name])
41 | onnx_node.append(t_node)
42 | return onnx_node, onnx_value, onnx_init
43 |
44 |
45 | @OpMapper(["Tile"])
46 | class Tile():
47 | # suppport v1-v13
48 |
49 | @classmethod
50 | def version_1(cls, node, **kwargs):
51 | onnx_node, onnx_value, onnx_init = [], [], []
52 | x_name = node['in_nodes_name'][0]
53 | out_name = node['out_nodes_name'][0]
54 | out_shape = node['out_tensors'][0]
55 | multiples = np.array(node['node'].layer.multiples).astype(np.int64)
56 | multiples_value = numpy_helper.from_array(multiples, name='multiples')
57 | onnx_init.append(multiples_value)
58 | e_node, out = make_node('Tile', inputs=[x_name, 'multiples'], outputs=[out_name])
59 | value = helper.make_tensor_value_info(out, NP_TYPE_TO_TENSOR_TYPE[node['dtype']], out_shape)
60 | onnx_node.append(e_node)
61 | onnx_value.append(value)
62 | return onnx_node, onnx_value, onnx_init
63 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/flatten.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper('Flatten')
10 | class Flatten():
11 | # supports v1-v15
12 |
13 | @classmethod
14 | def version_1(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 |
19 | x = node['in_nodes_name'][0]
20 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
21 | shape=node['out_tensors'][0])
22 | onnx_value.append(out_v)
23 | out_node, _ = make_node('Flatten', inputs=[x], outputs=node['out_nodes_name'])
24 | onnx_node.append(out_node)
25 |
26 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/globalpool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, make_shape_channels_first, get_channels_first_permutation, squeeze_axes
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 |
10 | @OpMapper(['GlobalMaxPool1d', 'GlobalMaxPool2d', 'GlobalMaxPool3d'])
11 | class GlobalMaxPool():
12 | # supports v1-v12
13 |
14 | @classmethod
15 | def version_1(cls, node, **kwargs):
16 | onnx_node = []
17 | onnx_value = []
18 | onnx_init = []
19 |
20 | # input , output
21 | x = node['in_nodes_name'][0]
22 | x_shape = node['in_tensors'][0]
23 |
24 | # added dimensions
25 | out_shape_temp = [node['out_tensors'][0][0], node['out_tensors'][0][1], 1, 1]
26 | out_temp_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'temp', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
27 | shape=out_shape_temp)
28 | onnx_value.append(out_temp_v)
29 | # out dimensions
30 | out_shape = node['out_tensors'][0]
31 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
32 | shape=out_shape)
33 | onnx_value.append(out_v)
34 |
35 | # attrbute
36 | data_format = node['attr']['data_format']
37 | spatial = int(node['node'].layer.__class__.__name__[-2])
38 |
39 | if data_format == 'channels_last':
40 | # channels last conver weights and input
41 | x_shape = make_shape_channels_first(x_shape)
42 | # make channels transpose
43 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't',
44 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape)
45 | onnx_value.append(t_x)
46 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'],
47 | perm=get_channels_first_permutation(spatial))
48 | onnx_node.append(tx_node)
49 | # make Global MaxPool
50 | gmp_node, x = make_node('GlobalMaxPool',
51 | inputs=[x],
52 | outputs=[node['out_nodes_name'][0] + 'temp']
53 | )
54 | onnx_node.append(gmp_node)
55 |
56 | # squeeze dimensions
57 | axes = np.array(squeeze_axes(spatial)).astype(np.int64)
58 | axes_value = numpy_helper.from_array(axes, name='axes')
59 | onnx_init.append(axes_value)
60 | sq_node, _ = make_node('Squeeze', inputs=[x, 'axes'], outputs=node['out_nodes_name'])
61 | onnx_node.append(sq_node)
62 | return onnx_node, onnx_value, onnx_init
63 |
64 |
65 | @OpMapper(['GlobalAvgPool1d', 'GlobalAvgPool2d', 'GlobalAvgPool3d'])
66 | class GlobalAvgPool():
67 | # supports v1-v12
68 |
69 | @classmethod
70 | def version_1(cls, node, **kwargs):
71 | onnx_node = []
72 | onnx_value = []
73 | onnx_init = []
74 |
75 | # input , output
76 | x = node['in_nodes_name'][0]
77 | x_shape = node['in_tensors'][0]
78 |
79 | # added dimensions
80 | out_shape_temp = [node['out_tensors'][0][0], node['out_tensors'][0][1], 1, 1]
81 | out_temp_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'temp', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
82 | shape=out_shape_temp)
83 | onnx_value.append(out_temp_v)
84 | # out dimensions
85 | out_shape = node['out_tensors'][0]
86 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
87 | shape=out_shape)
88 | onnx_value.append(out_v)
89 |
90 | # attrbute
91 | data_format = node['attr']['data_format']
92 | spatial = int(node['node'].layer.__class__.__name__[-2])
93 |
94 | if data_format == 'channels_last':
95 | # channels last conver weights and input
96 | x_shape = make_shape_channels_first(x_shape)
97 | # make channels transpose
98 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't',
99 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape)
100 | onnx_value.append(t_x)
101 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'],
102 | perm=get_channels_first_permutation(spatial))
103 | onnx_node.append(tx_node)
104 | # make Global MaxPool
105 | gmp_node, x = make_node('GlobalAveragePool',
106 | inputs=[x],
107 | outputs=[node['out_nodes_name'][0] + 'temp']
108 | )
109 | onnx_node.append(gmp_node)
110 |
111 | # squeeze dimensions
112 | axes = np.array(squeeze_axes(spatial)).astype(np.int64)
113 | axes_value = numpy_helper.from_array(axes, name='axes')
114 | onnx_init.append(axes_value)
115 | sq_node, _ = make_node('Squeeze', inputs=[x, 'axes'], outputs=node['out_nodes_name'])
116 | onnx_node.append(sq_node)
117 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/groupconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | import numpy as np
4 | from onnx import helper, numpy_helper
5 | from collections import OrderedDict
6 | import tensorlayerx as tlx
7 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from tlx2onnx.op_mapper.op_mapper import OpMapper
9 | from tlx2onnx.common import make_node
10 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,get_channels_last_permutation
11 | from tlx2onnx.common import convert_padding, convert_w, tlx_act_2_onnx, convert_b
12 |
13 | @OpMapper(["GroupConv2d"])
14 | class Conv():
15 | # suppport v1-v13
16 |
17 | @classmethod
18 | def any_version(cls, node, opset, **kwargs):
19 | """
20 | Parameters
21 | ----------
22 | node:node dict {node: node,
23 | in_tensors: node inputs,
24 | out_tensors: node outputs,
25 | in_nodes_name: node inputs name,
26 | out_nodes_name: node outputs name}
27 | Returns
28 | -------
29 | """
30 | Op_name = 'Conv'
31 | onnx_node, onnx_value, onnx_init = [], [], []
32 | attr_dict = OrderedDict()
33 |
34 | #### get data_type
35 | data_type = node['dtype']
36 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
37 | #### get in_node_name out_node_nmae
38 | x_name = node['in_nodes_name'][0]
39 | out_name = node['out_nodes_name'][0]
40 | x_shape = node['in_tensors'][0]
41 | out_shape = node['out_tensors'][0]
42 |
43 | #### get cur_node_layer node_index
44 | layer = node['node'].layer
45 | layer_type = layer.__class__.__name__
46 | spatial = int(layer_type[-2])
47 | node_name = layer.name
48 | #### get layer_param
49 | layer_param = layer.all_weights
50 |
51 | #### get layer_act_type
52 | layer_act = layer.act.__class__.__name__
53 |
54 | #### conv inputs
55 | w = None
56 | b = None
57 | if len(layer_param) == 1:
58 | w = layer_param[0]
59 | elif len(layer_param) == 2:
60 | w = layer_param[0]
61 | b = layer_param[1]
62 |
63 | #### insert conv attr
64 | kernel_size = node['attr']['kernel_size']
65 | if isinstance(kernel_size, int):
66 | kernel_size = [kernel_size]
67 | attr_dict["kernel_shape"] = kernel_size
68 | dilations = node['attr']['dilation']
69 | if isinstance(dilations, int):
70 | dilations = [dilations,]
71 | attr_dict["dilations"] = dilations
72 | strides = node['attr']['stride']
73 | if isinstance(strides, int):
74 | strides = [strides]
75 | attr_dict["strides"] = strides
76 | data_format = node['attr']['data_format']
77 | paddding = node['attr']['padding']
78 | attr_dict["group"] = layer.n_group
79 | attr_dict["outputs"] = [out_name]
80 |
81 | ####convert padding
82 | pads = convert_padding(
83 | paddding, x_shape, out_shape, attr_dict["kernel_shape"], attr_dict["strides"],
84 | attr_dict["dilations"], spatial, data_format
85 | )
86 | if isinstance(pads, str):
87 | attr_dict["auto_pad"] = pads
88 | else:
89 | attr_dict["pads"] = pads
90 |
91 | if data_format == 'channels_last':
92 | permutation = get_channels_first_permutation(spatial)
93 | x_shape_t = make_shape_channels_first(x_shape)
94 | # insert transpose op: NHWC -> NCHW
95 | transpose_value = helper.make_tensor_value_info(x_name+'_t', tensor_type, shape=x_shape_t)
96 | onnx_value.append(transpose_value)
97 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name+'_t'], perm = permutation)
98 | onnx_node.append(transpose_node)
99 | # convert w
100 | w_name = node_name + '_w'
101 | w_init = convert_w(w, data_format, spatial, w_name)
102 | onnx_init.append(w_init)
103 | attr_dict["inputs"] = [out, w_name]
104 |
105 | #### convert b
106 | if b is not None:
107 | b_name = node_name + '_b'
108 | b_init = convert_b(b, b_name)
109 | onnx_init.append(b_init)
110 | attr_dict["inputs"] = [out, w_name, b_name]
111 |
112 | attr_dict["outputs"] = [out + "_t"]
113 | conv_node, out = make_node(Op_name, **attr_dict)
114 | onnx_node.append(conv_node)
115 | out_shape_t = make_shape_channels_first(out_shape)
116 | conv_value = helper.make_tensor_value_info(out, tensor_type, shape=out_shape_t)
117 | onnx_value.append(conv_value)
118 | # insert transpose op: NCHW -> NHWC and insert act node
119 |
120 | if layer_act != 'NoneType':
121 | act_convert = tlx_act_2_onnx[layer_act]
122 | act_input = out_name + "_act"
123 | act_out = out_name
124 | # insert transpose op
125 | permutation = get_channels_last_permutation(spatial)
126 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[act_input], perm = permutation)
127 | onnx_node.append(transpose_node)
128 | transpose_value = helper.make_tensor_value_info(act_input, tensor_type, shape = out_shape)
129 | onnx_value.append(transpose_value)
130 | # 如果layer存在act,需要新增一个act node 和 对应act输入的act input info, 并且要更新 conv的outputs 为 act的inputs, 此时act的outputs是整个layer的outputs
131 | act_node, _ = act_convert([out], [act_out])
132 | act_input_value_info = helper.make_tensor_value_info(act_out, tensor_type, out_shape)
133 | onnx_value.append(act_input_value_info)
134 | onnx_node.append(act_node)
135 | return onnx_node, onnx_value, onnx_init
136 | else:
137 | permutation = get_channels_last_permutation(spatial)
138 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
139 | onnx_node.append(transpose_node)
140 | transpose_value = helper.make_tensor_value_info(out_name, tensor_type, shape=out_shape)
141 | onnx_value.append(transpose_value)
142 | return onnx_node, onnx_value, onnx_init
143 |
144 |
145 | elif data_format == 'channels_first':
146 |
147 | #### convert w
148 | w_name = node_name + '_w'
149 | w_init = convert_w(w, data_format, spatial, w_name)
150 | onnx_init.append(w_init)
151 | attr_dict["inputs"] = [x_name, w_name]
152 |
153 | #### convert b
154 | if b is not None:
155 | b_name = node_name + '_b'
156 | b_init = convert_b(b, b_name)
157 | onnx_init.append(b_init)
158 | attr_dict["inputs"] = [x_name, w_name, b_name]
159 |
160 | #### make act node
161 | if layer_act != 'NoneType':
162 | act_convert = tlx_act_2_onnx[layer_act]
163 | act_input = out_name + "_act"
164 | act_out = out_name
165 | attr_dict["outputs"] = [act_input]
166 | conv_node, out = make_node(Op_name, **attr_dict)
167 | onnx_node.append(conv_node)
168 | conv_value = helper.make_tensor_value_info(out, tensor_type, shape = out_shape)
169 | onnx_value.append(conv_value)
170 | #insert act node
171 | act_node, out = act_convert([act_input], [act_out])
172 | act_input_value_info = helper.make_tensor_value_info(out, tensor_type, out_shape)
173 | onnx_value.append(act_input_value_info)
174 | onnx_node.append(act_node)
175 | return onnx_node, onnx_value, onnx_init
176 | else:
177 | conv_node, out = make_node(Op_name, **attr_dict)
178 | onnx_node.append(conv_node)
179 | conv_value = helper.make_tensor_value_info(out, tensor_type, out_shape)
180 | onnx_value.append(conv_value)
181 | return onnx_node, onnx_value, onnx_init
182 | else:
183 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
184 |
185 | @classmethod
186 | def version_1(cls, node, **kwargs):
187 |
188 | return cls.any_version(node, 1, **kwargs)
189 |
190 |
191 | @classmethod
192 | def version_11(cls, node, **kwargs):
193 |
194 | return cls.any_version( node, 11, **kwargs)
195 |
196 | @classmethod
197 | def version_13(cls, node, **kwargs):
198 |
199 | return cls.any_version(node, 13, **kwargs)
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/linear.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, TensorProto, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from ...common import tlx_act_2_onnx
9 | import numpy as np
10 |
11 |
12 | @OpMapper('Linear')
13 | class Linear():
14 | # supports v1-v12
15 |
16 | @classmethod
17 | def version_1(cls, node, **kwargs):
18 | onnx_node = []
19 | onnx_value = []
20 | onnx_init = []
21 |
22 | x = node['in_nodes_name'][0]
23 | # TODO How to compatible multiple framework parameter names
24 | y = node['node'].layer.name + '/weights'
25 | weights = numpy_helper.from_array(arr=to_numpy(node['node'].layer.weights), name=y)
26 | onnx_init.append(weights)
27 |
28 | # Cast x type to float32
29 | if str(node['dtype']) != 'float32':
30 | c_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 'c', TensorProto.FLOAT, shape=node['in_tensors'][0])
31 | onnx_value.append(c_x)
32 | c_node, x = make_node('Cast', inputs=[x], outputs=[node['in_nodes_name'][0] + 'c'], to=TensorProto.FLOAT)
33 | onnx_node.append(c_node)
34 |
35 | if node['node'].layer.b_init is not None:
36 | # Build multiplication
37 | m_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'm', TensorProto.FLOAT, shape=node['out_tensors'][0])
38 | onnx_value.append(m_v)
39 | m_node, out = make_node('MatMul', inputs=[x, y], outputs=[node['out_nodes_name'][0] + 'm'])
40 | onnx_node.append(m_node)
41 |
42 | if node['node'].layer.act is not None:
43 | # Build addition
44 | b_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'b', TensorProto.FLOAT, shape=node['out_tensors'][0])
45 | onnx_value.append(b_v)
46 | b = numpy_helper.from_array(arr=to_numpy(node['node'].layer.biases).astype(np.float32), name=node['node'].layer.name + '/b')
47 | onnx_init.append(b)
48 | b_node, out = make_node('Add', inputs=[out, node['node'].layer.name + '/b'], outputs=[node['out_nodes_name'][0] + 'b'])
49 | onnx_node.append(b_node)
50 | # Build activation
51 | act_op = node['node'].layer.act.__class__.__name__
52 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.FLOAT, shape=node['out_tensors'][0])
53 | onnx_value.append(out_v)
54 | # Using Opmapper
55 | act_node, _ = tlx_act_2_onnx[act_op]([out], node['out_nodes_name'], node['node'].layer.act)
56 | onnx_node.append(act_node)
57 |
58 | else:
59 | # Build addition
60 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.FLOAT, shape=node['out_tensors'][0])
61 | onnx_value.append(out_v)
62 | b = numpy_helper.from_array(arr=to_numpy(node['node'].layer.biases).astype(np.float32), name=node['node'].layer.name + '/b')
63 | onnx_init.append(b)
64 | o_node, _ = make_node('Add', inputs=[out, node['node'].layer.name + '/b'], outputs=node['out_nodes_name'])
65 | onnx_node.append(o_node)
66 |
67 | else:
68 | if node['node'].layer.act is not None:
69 | # Build multiplication
70 | act_op = node['node'].layer.act.__class__.__name__
71 | m_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'm', TensorProto.FLOAT, shape=node['out_tensors'][0])
72 | onnx_value.append(m_v)
73 | m_node, out = make_node('MatMul', inputs=[x, y], outputs=[node['out_nodes_name'][0] + 'm'])
74 | onnx_node.append(m_node)
75 | # Build activation
76 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.FLOAT, shape=node['out_tensors'][0])
77 | onnx_value.append(out_v)
78 | act_node, out = tlx_act_2_onnx[act_op]([out], node['out_nodes_name'], node['node'].layer.act)
79 | onnx_node.append(act_node)
80 | else:
81 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], TensorProto.FLOAT, shape=node['out_tensors'][0])
82 | onnx_value.append(out_v)
83 | o_node, out = make_node('MatMul', inputs=[x, y], outputs=node['out_nodes_name'])
84 | onnx_node.append(o_node)
85 |
86 | if str(node['dtype']) != 'float32':
87 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
88 | shape=node['out_tensors'][0])
89 | onnx_value.append(out_v)
90 | c_node, out = make_node('Cast', inputs=[out], outputs=node['out_nodes_name'],
91 | to=NP_TYPE_TO_TENSOR_TYPE[node['dtype']])
92 | onnx_node.append(c_node)
93 |
94 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/mask_conv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from ...common import tlx_act_2_onnx, convert_padding, make_shape_channels_first, convert_w, \
9 | get_channels_last_permutation, get_channels_first_permutation
10 |
11 | @OpMapper(['MaskedConv3d'])
12 | class MaskedConv3d():
13 | # supports v1-v12
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | onnx_node = []
18 | onnx_value = []
19 | onnx_init = []
20 |
21 | x = node['in_nodes_name'][0]
22 | x_shape = node['in_tensors'][0]
23 | out_shape = node['out_tensors'][0]
24 | spatial = int(node['node'].layer.__class__.__name__[-2])
25 |
26 | # make weights
27 | y = node['node'].layer.name + '/kernel'
28 | weights_value = node['node'].layer.masked_kernel
29 |
30 | attr_dict = {}
31 | attr_dict['dilations'] = dilations = node['attr']['dilation']
32 | attr_dict['kernel_shape'] = kernel_shape = node['attr']['kernel_size']
33 | attr_dict['strides'] = strides = node['attr']['stride']
34 | pads = node['attr']['padding']
35 | data_format = node['attr']['data_format']
36 |
37 | if data_format == 'channels_last':
38 | # channels last conver weights and input
39 | x_shape_temp = make_shape_channels_first(x_shape)
40 | out_temp_shape = make_shape_channels_first(out_shape)
41 | weights = convert_w(weights_value, data_format, spatial, y)
42 | onnx_init.append(weights)
43 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't', NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape_temp)
44 | onnx_value.append(t_x)
45 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'], perm=get_channels_first_permutation(spatial))
46 | onnx_node.append(tx_node)
47 | else:
48 | # Build weights
49 | weights = numpy_helper.from_array(arr=to_numpy(weights_value), name=y)
50 | onnx_init.append(weights)
51 |
52 | # Build padding
53 | pads = convert_padding(
54 | pads, x_shape, out_shape, kernel_shape, strides,
55 | dilations, spatial, data_format
56 | )
57 | if isinstance(pads, str):
58 | attr_dict["auto_pad"] = pads
59 | else:
60 | attr_dict["pads"] = pads
61 |
62 | if node['node'].layer.b_init is not None:
63 | b = numpy_helper.from_array(arr=to_numpy(node['node'].layer.bias), name=node['node'].layer.name + '/b')
64 | onnx_init.append(b)
65 | b_name = node['node'].layer.name + '/b'
66 | input_list = [x, y, b_name]
67 | else:
68 | input_list = [x, y]
69 |
70 | if data_format == 'channels_first':
71 | if node['node'].layer.act is not None:
72 | # Build Conv3d
73 | de_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'de', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
74 | shape=out_shape)
75 | onnx_value.append(de_v)
76 | ct_node, out = make_node('Conv', inputs=input_list,
77 | outputs=[node['out_nodes_name'][0] + 'de'], **attr_dict)
78 | onnx_node.append(ct_node)
79 |
80 | act_op = node['node'].layer.act.__class__.__name__
81 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
82 | shape=out_shape)
83 | onnx_value.append(out_v)
84 | # Using Opmapper
85 | act_node, _ = tlx_act_2_onnx[act_op]([out], node['out_nodes_name'], node['node'].layer.act)
86 | onnx_node.append(act_node)
87 | else:
88 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
89 | shape=out_shape) #
90 | onnx_value.append(out_v)
91 | ct_node, out = make_node('Conv', inputs=input_list,
92 | outputs=node['out_nodes_name'], **attr_dict)
93 | onnx_node.append(ct_node)
94 | elif data_format == 'channels_last':
95 | if node['node'].layer.act is not None:
96 | # Build Conv
97 | ct_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'ct', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
98 | shape=out_temp_shape)
99 | onnx_value.append(ct_v)
100 | ct_node, out = make_node('Conv', inputs=input_list,
101 | outputs=[node['out_nodes_name'][0] + 'ct'], **attr_dict)
102 | onnx_node.append(ct_node)
103 |
104 | act_op = node['node'].layer.act.__class__.__name__
105 | act_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'a', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
106 | shape=out_temp_shape)
107 | onnx_value.append(act_v)
108 | # Using Opmapper
109 | act_node, out = tlx_act_2_onnx[act_op]([out], [node['out_nodes_name'][0] + 'a'], node['node'].layer.act)
110 | onnx_node.append(act_node)
111 | else:
112 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'ct', NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
113 | shape=out_temp_shape)
114 | onnx_value.append(out_v)
115 | o_node, out = make_node('Conv', inputs=input_list,
116 | outputs=[node['out_nodes_name'][0] + 'ct'], **attr_dict)
117 | onnx_node.append(o_node)
118 |
119 | t_out = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_shape)
120 | onnx_value.append(t_out)
121 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=node['out_nodes_name'], perm=get_channels_last_permutation(spatial))
122 | onnx_node.append(tout_node)
123 | else:
124 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
125 |
126 | return onnx_node, onnx_value, onnx_init
127 |
128 |
129 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/merge.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 |
10 | @OpMapper(['Concat'])
11 | class Concat():
12 | # supports v1-v12
13 |
14 | @classmethod
15 | def version_1(cls, node, **kwargs):
16 | onnx_node = []
17 | onnx_value = []
18 | onnx_init = []
19 | # get inputs outputs
20 | in_name = node['in_nodes_name']
21 | out_name = node['out_nodes_name'][0]
22 | out_shape = node['out_tensors'][0]
23 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
24 | layer = node['node'].layer
25 | concat_dim = layer.concat_dim
26 | # make concat node
27 | out_v = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
28 | onnx_value.append(out_v)
29 | out_node, _ = make_node('Concat', inputs=[s for s in in_name], outputs=node['out_nodes_name'], axis=concat_dim)
30 | onnx_node.append(out_node)
31 | return onnx_node, onnx_value, onnx_init
32 |
33 |
34 | @OpMapper(['Elementwise'])
35 | class Elementwise():
36 | # supports v1-v12
37 |
38 | @classmethod
39 | def version_1(cls, node, **kwargs):
40 | onnx_node = []
41 | onnx_value = []
42 | onnx_init = []
43 | # get inputs outputs
44 | in_name = node['in_nodes_name']
45 | out_name = node['out_nodes_name'][0]
46 | out_shape = node['out_tensors'][0]
47 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
48 | layer = node['node'].layer
49 | combine_fn_name = cls.fn_dict(str(layer.combine_fn.__name__))
50 | print(combine_fn_name)
51 | # make combine_fn node
52 | out_v = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
53 | onnx_value.append(out_v)
54 |
55 | out = in_name[0]
56 | for i in np.arange(1, len(in_name)):
57 | if i == len(in_name) - 1:
58 | out_node, out = make_node(combine_fn_name, inputs=[out, in_name[i]], outputs=[out_name])
59 | onnx_node.append(out_node)
60 | else:
61 | out_node, out = make_node(combine_fn_name, inputs=[out, in_name[i]], outputs=[out_name + str(i)])
62 | onnx_node.append(out_node)
63 | return onnx_node, onnx_value, onnx_init
64 |
65 | @staticmethod
66 | def fn_dict(fn):
67 | # More operator operations can be added from here.
68 | _dict = {
69 | 'matmul': 'MatMul',
70 | 'add': 'Add',
71 | }
72 | return _dict[fn]
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/noise.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper(['GaussianNoise'])
10 | class GaussianNoise():
11 | # supports v1-v12
12 |
13 | @classmethod
14 | def version_1(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 | # get inputs outputs
19 | in_name = node['in_nodes_name'][0]
20 | out_name = node['out_nodes_name'][0]
21 | out_shape = node['out_tensors'][0]
22 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
23 |
24 | layer = node['node'].layer
25 | mean = layer.mean
26 | scale = layer.stddev
27 | seed = layer.seed
28 | # make random normal node
29 | r_out = helper.make_tensor_value_info(out_name + '_r', dtype, shape=out_shape)
30 | onnx_value.append(r_out)
31 | r_node, out = make_node('RandomNormal', inputs='', outputs=[out_name + 'r'],
32 | dtype=dtype, mean=mean, scale=scale, seed=seed, shape=out_shape)
33 | onnx_node.append(r_node)
34 |
35 | a_out = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
36 | onnx_value.append(a_out)
37 | a_node, out = make_node('Add', inputs=[in_name, out], outputs=[out_name])
38 | onnx_node.append(a_node)
39 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/normalization.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
6 | from tlx2onnx.op_mapper.op_mapper import OpMapper
7 | from tlx2onnx.common import make_node, to_numpy, make_shape_channels_first, make_shape_channels_last, \
8 | get_channels_first_permutation, get_channels_last_permutation, tlx_act_2_onnx
9 |
10 |
11 | @OpMapper(['BatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'])
12 | class BatchNorm():
13 | # supports v1-v12
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | onnx_node = []
18 | onnx_value = []
19 | onnx_init = []
20 |
21 | # input , output, data_format
22 | x = node['in_nodes_name'][0]
23 | x_shape = node['in_tensors'][0]
24 |
25 | out_shape = node['out_tensors'][0]
26 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
27 | shape=node['out_tensors'][0])
28 | onnx_value.append(out_v)
29 |
30 | data_format = node['attr']['data_format']
31 | spatial = int(node['node'].layer.__class__.__name__[-2])
32 | # get parameters
33 | beta_name = node['node'].layer.name + '/beta'
34 | beta_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.gamma), name=beta_name)
35 | onnx_init.append(beta_weight)
36 |
37 | gamma_name = node['node'].layer.name + '/gamma'
38 | gamma_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.beta), name=gamma_name)
39 | onnx_init.append(gamma_weight)
40 |
41 | mean_name = node['node'].layer.name + '/mean'
42 | mean_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.moving_mean), name=mean_name)
43 | onnx_init.append(mean_weight)
44 |
45 | var_name = node['node'].layer.name + '/var'
46 | var_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.moving_var), name=var_name)
47 | onnx_init.append(var_weight)
48 |
49 | if data_format == 'channels_last':
50 | # channels last conver weights and input
51 | x_shape = make_shape_channels_first(x_shape)
52 | out_temp_shape = make_shape_channels_first(out_shape)
53 | # make channels transpose
54 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't',
55 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape)
56 | onnx_value.append(t_x)
57 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'],
58 | perm=get_channels_first_permutation(spatial))
59 | onnx_node.append(tx_node)
60 | # make batch normalization
61 | out_temp = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'bn',
62 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_temp_shape)
63 | onnx_value.append(out_temp)
64 | bn_node, out = make_node('BatchNormalization',
65 | inputs=[node['in_nodes_name'][0] + 't', beta_name, gamma_name, mean_name, var_name],
66 | outputs=[node['out_nodes_name'][0] + 'bn']
67 | )
68 | onnx_node.append(bn_node)
69 |
70 | if node['node'].layer.act is not None:
71 | act_op = node['node'].layer.act.__class__.__name__
72 | act_node, out = tlx_act_2_onnx[act_op]([out], [node['out_nodes_name'][0] + 'act'], node['node'].layer.act)
73 | onnx_node.append(act_node)
74 |
75 | # make channels transpose
76 | t_out = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_shape)
77 | onnx_value.append(t_out)
78 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=node['out_nodes_name'], perm=get_channels_last_permutation(spatial))
79 | onnx_node.append(tout_node)
80 |
81 |
82 | elif data_format == 'channels_first':
83 | if node['node'].layer.act is None:
84 | bn_node, out = make_node('BatchNormalization',
85 | inputs=[x, beta_name, gamma_name, mean_name, var_name],
86 | outputs=node['out_nodes_name']
87 | )
88 | onnx_node.append(bn_node)
89 | else:
90 | bn_node, out = make_node('BatchNormalization',
91 | inputs=[x, beta_name, gamma_name, mean_name, var_name],
92 | outputs=[node['out_nodes_name'][0] + 'bn']
93 | )
94 | onnx_node.append(bn_node)
95 | act_op = node['node'].layer.act.__class__.__name__
96 | act_node, out = tlx_act_2_onnx[act_op]([out], node['out_nodes_name'], node['node'].layer.act)
97 | onnx_node.append(act_node)
98 |
99 | return onnx_node, onnx_value, onnx_init
100 |
101 |
102 | @OpMapper(['LayerNorm'])
103 | class LayerNorm():
104 | # supports v17
105 |
106 | @classmethod
107 | def version_17(cls, node, **kwargs):
108 | onnx_node = []
109 | onnx_value = []
110 | onnx_init = []
111 |
112 | # input , output, data_format
113 | x = node['in_nodes_name'][0]
114 | x_shape = node['in_tensors'][0]
115 |
116 | out_shape = node['out_tensors'][0]
117 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
118 | shape=node['out_tensors'][0])
119 | onnx_value.append(out_v)
120 |
121 | spatial = 2
122 | # get parameters
123 | beta_name = node['node'].layer.name + '/beta'
124 | beta_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.beta), name=beta_name)
125 | onnx_init.append(beta_weight)
126 |
127 | gamma_name = node['node'].layer.name + '/gamma'
128 | gamma_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.gamma), name=gamma_name)
129 | onnx_init.append(gamma_weight)
130 |
131 | epsilon = node['node'].layer.epsilon
132 |
133 | # if data_format == 'channels_last':
134 | # channels last conver weights and input
135 | x_shape = make_shape_channels_first(x_shape)
136 | out_temp_shape = make_shape_channels_first(out_shape)
137 | # make channels transpose
138 | t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't',
139 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape)
140 | onnx_value.append(t_x)
141 | tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'],
142 | perm=get_channels_first_permutation(spatial))
143 | onnx_node.append(tx_node)
144 | # make batch normalization
145 | out_temp = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'bn',
146 | NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_temp_shape)
147 | onnx_value.append(out_temp)
148 | ln_node, out = make_node('LayerNormalization',
149 | inputs=[node['in_nodes_name'][0] + 't', beta_name, gamma_name],
150 | outputs=[node['out_nodes_name'][0] + 'bn'], epsilon=epsilon
151 | )
152 | onnx_node.append(ln_node)
153 |
154 | if node['node'].layer.act is not None:
155 | act_op = node['node'].layer.act.__class__.__name__
156 | act_node, out = tlx_act_2_onnx[act_op]([out], [node['out_nodes_name'][0] + 'act'], node['node'].layer.act)
157 | onnx_node.append(act_node)
158 |
159 | # make channels transpose
160 | t_out = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_shape)
161 | onnx_value.append(t_out)
162 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=node['out_nodes_name'], perm=get_channels_last_permutation(spatial))
163 | onnx_node.append(tout_node)
164 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/padding.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 |
10 | @OpMapper(['PadLayer'])
11 | class PadLayer():
12 | # supports v1-v12
13 |
14 | @classmethod
15 | def version_1(cls, node, **kwargs):
16 | onnx_node, onnx_value, onnx_init = [], [], []
17 | # get inputs outputs
18 | in_name = node['in_nodes_name'][0]
19 | out_name = node['out_nodes_name'][0]
20 | out_shape = node['out_tensors'][0]
21 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
22 | layer = node['node'].layer
23 | # get attrs
24 | value = np.array(layer.constant_values).astype(node['dtype'])
25 | c_value = numpy_helper.from_array(value, name=layer.name + 'value')
26 | onnx_init.append(c_value)
27 | # processing mode
28 | mode_dict = {"CONSTANT": 'constant', "REFLECT": 'reflect',"SYMMETRIC": 'edge'}
29 | mode = mode_dict[layer.mode]
30 | # processing padding. `pads` should be a 1D tensor of shape [2 * input_rank].
31 | # `pads` format should be: [x1_begin, x2_begin,...,x1_end, x2_end,...],
32 | padding = layer.padding
33 | pads_temp = padding[0]
34 | for i in np.arange(1, len(padding)):
35 | pads_temp += padding[i]
36 | pads = []
37 | for i in range(len(pads_temp)//2):
38 | pads.append(pads_temp[2*i])
39 | for i in range(len(pads_temp) // 2):
40 | pads.append(pads_temp[i*2+1])
41 | pads = np.array(pads).astype(np.int64)
42 | p_value = numpy_helper.from_array(pads, name=layer.name + 'pads')
43 | onnx_init.append(p_value)
44 | # make nodes
45 | v_out = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
46 | onnx_value.append(v_out)
47 |
48 | if mode == 'constant':
49 | p_node, out = make_node('Pad', inputs=[in_name, layer.name + 'pads', layer.name + 'value'], outputs=[out_name], mode='constant')
50 | onnx_node.append(p_node)
51 | else:
52 | p_node, out = make_node('Pad', inputs=[in_name, layer.name + 'pads'], outputs=[out_name], mode=mode)
53 | onnx_node.append(p_node)
54 |
55 | return onnx_node, onnx_value, onnx_init
56 |
57 |
58 | @OpMapper(['ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d'])
59 | class ZeroPad():
60 | # supports v1-v12
61 |
62 | @classmethod
63 | def version_1(cls, node, **kwargs):
64 | onnx_node, onnx_value, onnx_init = [], [], []
65 | # get inputs outputs
66 | in_name = node['in_nodes_name'][0]
67 | out_name = node['out_nodes_name'][0]
68 | out_shape = node['out_tensors'][0]
69 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
70 | layer = node['node'].layer
71 | # get attrs
72 | padding = layer.padding
73 | data_format = layer.data_format
74 | pads_temp = convert_padding(padding, data_format)
75 |
76 | pads = []
77 | for i in range(len(pads_temp)//2):
78 | pads.append(pads_temp[2*i])
79 | for i in range(len(pads_temp) // 2):
80 | pads.append(pads_temp[i*2+1])
81 | pads = np.array(pads).astype(np.int64)
82 |
83 | p_value = numpy_helper.from_array(pads, name=layer.name + 'pads')
84 | onnx_init.append(p_value)
85 |
86 | # make nodes
87 | v_out = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
88 | onnx_value.append(v_out)
89 | p_node, out = make_node('Pad', inputs=[in_name, layer.name + 'pads'], outputs=[out_name], mode='constant')
90 | onnx_node.append(p_node)
91 |
92 | return onnx_node, onnx_value, onnx_init
93 |
94 |
95 | def convert_padding(padding, data_format):
96 | if np.size(padding) == 2:
97 | if data_format == 'channels_first':
98 | out = (0, 0, 0, 0) + padding
99 | else:
100 | out = (0, 0) + padding + (0, 0)
101 | else:
102 | pads_temp = padding[0]
103 | for i in np.arange(1, len(padding)):
104 | pads_temp += padding[i]
105 | if data_format == 'channels_first':
106 | out = (0, 0, 0, 0) + pads_temp
107 | else:
108 | out = (0, 0) + pads_temp + (0, 0)
109 | return out
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/pool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper
5 | from collections import OrderedDict
6 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
7 | from tlx2onnx.op_mapper.op_mapper import OpMapper
8 | from tlx2onnx.common import make_node
9 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,tlx_act_2_onnx,get_channels_last_permutation
10 | from tlx2onnx.common import convert_padding
11 |
12 |
13 | @OpMapper(["MaxPool1d", "MaxPool2d", "MaxPool3d", "AvgPool1d", "AvgPool2d", "AvgPool3d"])
14 | class Pool():
15 | # suppport v1-v11
16 |
17 | @classmethod
18 | def version_1(cls, node, **kwargs):
19 | onnx_node = []
20 | onnx_value = []
21 | onnx_init = []
22 |
23 | attr_dict = OrderedDict()
24 | # get in_node_name out_node_nmae
25 | x_name = node['in_nodes_name'][0]
26 | out_name = node['out_nodes_name'][0]
27 | x_shape = node['in_tensors'][0]
28 | out_shape = node['out_tensors'][0]
29 |
30 | #### get data_type
31 | data_type = node['dtype']
32 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
33 |
34 | # get cur_node_layer node_index
35 | layer = node['node'].layer
36 | layer_name = layer.__class__.__name__
37 | spatial = int(layer_name[-2])
38 | layer_type = layer_name[:7]
39 | if layer_type == "MaxPool":
40 | Op_name = "MaxPool"
41 | elif layer_type == "AvgPool":
42 | Op_name = "AveragePool"
43 |
44 | # insert pool attr
45 | kernel_size = node['attr']['kernel_size']
46 | if isinstance(kernel_size, int):
47 | kernel_size = [kernel_size]
48 | attr_dict["kernel_shape"] = kernel_size
49 | strides = node['attr']['stride']
50 | if isinstance(strides, int):
51 | strides = [strides]
52 | attr_dict["strides"] = strides
53 | data_format = node['attr']['data_format']
54 | paddding = node['attr']['padding']
55 |
56 | # convert padding
57 | pads = convert_padding(
58 | paddding, x_shape, out_shape, attr_dict["kernel_shape"], attr_dict["strides"],
59 | None, spatial, data_format
60 | )
61 | if isinstance(pads, str):
62 | attr_dict["auto_pad"] = pads
63 | else:
64 | attr_dict["pads"] = pads
65 |
66 | if data_format == 'channels_last':
67 | permutation = get_channels_first_permutation(spatial)
68 | x_shape_t = make_shape_channels_first(x_shape)
69 | # insert transpose op: NHWC -> NCHW
70 | transpose_value = helper.make_tensor_value_info(x_name+'_t', tensor_type, shape=x_shape_t)
71 | onnx_value.append(transpose_value)
72 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name+'_t'], perm = permutation)
73 | onnx_node.append(transpose_node)
74 |
75 | attr_dict["inputs"] = [out]
76 | attr_dict["outputs"] = [out+'_t']
77 | maxpool_node, out = make_node(Op_name, **attr_dict)
78 | onnx_node.append(maxpool_node)
79 | out_shape_t = make_shape_channels_first(out_shape)
80 | maxpool_value = helper.make_tensor_value_info(out, tensor_type, shape=out_shape_t)
81 | onnx_value.append(maxpool_value)
82 |
83 | # insert transpose op: NCHW -> NHWC
84 | permutation = get_channels_last_permutation(spatial)
85 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
86 | onnx_node.append(transpose_node)
87 | transpose_value = helper.make_tensor_value_info(out_name, tensor_type, shape=out_shape)
88 | onnx_value.append(transpose_value)
89 | return onnx_node, onnx_value, onnx_init
90 |
91 | elif data_format == 'channels_first':
92 |
93 | attr_dict["inputs"] = [x_name]
94 | attr_dict["outputs"] = [out_name]
95 | maxpool_node, out = make_node(Op_name, **attr_dict)
96 | onnx_node.append(maxpool_node)
97 | maxpool_value = helper.make_tensor_value_info(out, tensor_type, out_shape)
98 | onnx_value.append(maxpool_value)
99 | return onnx_node, onnx_value, onnx_init
100 |
101 | else:
102 | raise ValueError("Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
103 |
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/resampling.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
6 | from tlx2onnx.op_mapper.op_mapper import OpMapper
7 | from tlx2onnx.common import make_node, get_channels_last_permutation, get_channels_first_permutation
8 | import numpy as np
9 |
10 |
11 | @OpMapper(["UpSampling2d"])
12 | class UpSampling2d():
13 | # suppport v1-v13
14 |
15 | @classmethod
16 | def version_1(cls, node, **kwargs):
17 | # Get inputs outputs
18 | mode = {'nearest': 'nearest', 'bilinear': 'linear', 'bicubic': 'cubic'}
19 | onnx_node, onnx_value, onnx_init = [], [], []
20 | x_name = node['in_nodes_name'][0]
21 | x_shape = node['in_tensors'][0]
22 | out_name = node['out_nodes_name'][0]
23 | out_shape = node['out_tensors'][0]
24 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
25 | layer = node['node'].layer
26 | scale = layer.scale
27 | method = layer.method
28 | data_format = layer.data_format
29 | spatial = int(node['node'].layer.__class__.__name__[-2])
30 |
31 | # Method used
32 | if method not in ['bilinear', 'nearest', 'bicubic'] or method == 'area':
33 | raise Exception('Sampling methods nearest, bilinear, and bicubic are supported.')
34 | # Scale used
35 | scales = np.array([1.0, 1.0, scale[0], scale[1]], dtype=np.float32)
36 | scales_value = numpy_helper.from_array(scales, name=layer.name + 'scales')
37 | onnx_init.append(scales_value)
38 | # Make resize node
39 | if data_format == 'channels_first':
40 | out_v = helper.make_tensor_value_info(out_name, dtype, out_shape)
41 | onnx_value.append(out_v)
42 | out_node, _ = make_node('Resize', inputs=[x_name, '', layer.name + 'scales'], outputs=[out_name], mode=mode[method])
43 | onnx_node.append(out_node)
44 | else:
45 | tx_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name + 't'],
46 | perm=get_channels_first_permutation(spatial))
47 | onnx_node.append(tx_node)
48 | rx_node, out = make_node('Resize', inputs=[out, '', layer.name + 'scales'], outputs=[x_name + 's'], mode=mode[method])
49 | onnx_node.append(rx_node)
50 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=[out_name], perm=get_channels_last_permutation(spatial))
51 | onnx_node.append(tout_node)
52 | return onnx_node, onnx_value, onnx_init
53 |
54 |
55 | @OpMapper(["DownSampling2d"])
56 | class DownSampling2d():
57 | # suppport v1-v13
58 |
59 | @classmethod
60 | def version_1(cls, node, **kwargs):
61 | # Get inputs outputs
62 | mode = {'nearest': 'nearest', 'bilinear': 'linear', 'bicubic': 'cubic'}
63 | onnx_node, onnx_value, onnx_init = [], [], []
64 | x_name = node['in_nodes_name'][0]
65 | x_shape = node['in_tensors'][0]
66 | out_name = node['out_nodes_name'][0]
67 | out_shape = node['out_tensors'][0]
68 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
69 | layer = node['node'].layer
70 | scale = layer.scale
71 | method = layer.method
72 | data_format = layer.data_format
73 | spatial = int(node['node'].layer.__class__.__name__[-2])
74 |
75 | # Method used
76 | if method not in ['bilinear', 'nearest', 'bicubic'] or method == 'area':
77 | raise Exception('Sampling methods nearest, bilinear, and bicubic are supported.')
78 | # Scale used
79 | scale = [1.0 / scale[0], 1.0 / scale[1]]
80 | scales = np.array([1.0, 1.0, scale[0], scale[1]], dtype=np.float32)
81 | scales_value = numpy_helper.from_array(scales, name=layer.name + 'scales')
82 | onnx_init.append(scales_value)
83 | # Make resize node
84 | if data_format == 'channels_first':
85 | out_v = helper.make_tensor_value_info(out_name, dtype, out_shape)
86 | onnx_value.append(out_v)
87 | out_node, _ = make_node('Resize', inputs=[x_name, '', layer.name + 'scales'], outputs=[out_name], mode=mode[method])
88 | onnx_node.append(out_node)
89 | else:
90 | tx_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name + 't'],
91 | perm=get_channels_first_permutation(spatial))
92 | onnx_node.append(tx_node)
93 | rx_node, out = make_node('Resize', inputs=[out, '', layer.name + 'scales'], outputs=[x_name + 's'], mode=mode[method])
94 | onnx_node.append(rx_node)
95 | tout_node, _ = make_node('Transpose', inputs=[out], outputs=[out_name], perm=get_channels_last_permutation(spatial))
96 | onnx_node.append(tout_node)
97 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/scale.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper(['Scale'])
10 | class Scale():
11 | # supports v1-v12
12 |
13 | @classmethod
14 | def version_1(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 | # get inputs outputs
19 | in_name = node['in_nodes_name'][0]
20 | out_name = node['out_nodes_name'][0]
21 | out_shape = node['out_tensors'][0]
22 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
23 | layer = node['node'].layer
24 | # get weights
25 | w_name = layer.name + '/weights'
26 | weights = numpy_helper.from_array(arr=to_numpy(layer.scale), name=w_name)
27 | onnx_init.append(weights)
28 |
29 | # make concat node
30 | out_v = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
31 | onnx_value.append(out_v)
32 | out_node, _ = make_node('Mul', inputs=[in_name, w_name], outputs=[out_name])
33 | onnx_node.append(out_node)
34 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/shape.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, TensorProto, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | from ...common import tlx_act_2_onnx
9 | import numpy as np
10 |
11 |
12 | @OpMapper('Transpose')
13 | class Transpose():
14 | # supports v1-v12
15 |
16 | @classmethod
17 | def version_1(cls, node, **kwargs):
18 | onnx_node = []
19 | onnx_value = []
20 | onnx_init = []
21 |
22 | # input, output
23 | x = node['in_nodes_name'][0]
24 | x_shape = node['in_tensors'][0]
25 | y = node['out_nodes_name'][0]
26 | out_shape = node['out_tensors'][0]
27 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
28 | shape=out_shape)
29 | onnx_value.append(out_v)
30 |
31 | # attr
32 | perm = node['node'].layer.perm
33 | conjugate = node['node'].layer.conjugate
34 |
35 | if conjugate:
36 | raise NotImplementedError("parameter conjugate is not supported.")
37 |
38 | t_node, _ = make_node('Transpose', inputs=[x], outputs=[y], perm=perm)
39 | onnx_node.append(t_node)
40 |
41 | return onnx_node, onnx_value, onnx_init
42 |
43 |
44 | @OpMapper('Reshape')
45 | class Reshape():
46 | # supports v1-v12
47 |
48 | @classmethod
49 | def version_1(cls, node, **kwargs):
50 | onnx_node = []
51 | onnx_value = []
52 | onnx_init = []
53 |
54 | # input, output
55 | x = node['in_nodes_name'][0]
56 | x_shape = node['in_tensors'][0]
57 | y = node['out_nodes_name'][0]
58 | out_shape = node['out_tensors'][0]
59 | out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']],
60 | shape=out_shape)
61 | onnx_value.append(out_v)
62 |
63 | # attr
64 | shape = np.array(node['node'].layer.shape, dtype=np.int64)
65 | shape_value = numpy_helper.from_array(shape, name='shape')
66 | onnx_init.append(shape_value)
67 |
68 | t_node, _ = make_node('Reshape', inputs=[x, 'shape'], outputs=[y])
69 | onnx_node.append(t_node)
70 |
71 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/stack.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from ..op_mapper import OpMapper
6 | from ...common import make_node, to_numpy
7 | from ..datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 |
9 | @OpMapper(['Stack'])
10 | class Stack():
11 | # supports v1-v12
12 |
13 | @classmethod
14 | def version_11(cls, node, **kwargs):
15 | onnx_node = []
16 | onnx_value = []
17 | onnx_init = []
18 | # get inputs outputs
19 | in_names = node['in_nodes_name']
20 | out_name = node['out_nodes_name'][0]
21 | out_shape = node['out_tensors'][0]
22 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
23 | layer = node['node'].layer
24 | axis = layer.axis
25 |
26 | # make concat node
27 | out_v = helper.make_tensor_value_info(out_name, dtype, shape=out_shape)
28 | onnx_value.append(out_v)
29 | seq_construct_node = helper.make_node('SequenceConstruct', [v for v in in_names], [layer.name + 'S'])
30 | onnx_node.append(seq_construct_node)
31 | out_node, _ = make_node('ConcatFromSequence', inputs=[layer.name + 'S'], outputs=[out_name], new_axis=1, axis=axis)
32 | onnx_node.append(out_node)
33 | return onnx_node, onnx_value, onnx_init
34 |
35 |
36 | @OpMapper(['UnStack'])
37 | class UnStack():
38 | # supports v1-v12
39 |
40 | @classmethod
41 | def version_1(cls, node, **kwargs):
42 | onnx_node = []
43 | onnx_value = []
44 | onnx_init = []
45 | # get inputs outputs
46 | in_name = node['in_nodes_name'][0]
47 | out_names = node['out_nodes_name']
48 | out_shape = node['out_tensors']
49 | dtype = NP_TYPE_TO_TENSOR_TYPE[node['dtype']]
50 | layer = node['node'].layer
51 | axis = layer.axis
52 |
53 | # make concat node
54 | out_node, _ = make_node('Split', inputs=[in_name], outputs=[v for v in out_names], axis=axis)
55 | onnx_node.append(out_node)
56 | return onnx_node, onnx_value, onnx_init
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/nn/subpixelconv.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | import numpy as np
4 | import onnx
5 | from onnx import helper, numpy_helper
6 | from collections import OrderedDict
7 | import tensorlayerx as tlx
8 | from tlx2onnx.op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
9 | from tlx2onnx.op_mapper.op_mapper import OpMapper
10 | from tlx2onnx.common import make_node, tlx_act_2_onnx
11 | from tlx2onnx.common import make_shape_channels_first, get_channels_first_permutation,get_channels_last_permutation
12 |
13 | @OpMapper(["SubpixelConv2d"])
14 | class SubpixelConv():
15 |
16 | @classmethod
17 | def version_1(cls, node, **kwargs):
18 | onnx_node = []
19 | onnx_value = []
20 | onnx_init = []
21 |
22 | op_type = "DepthToSpace"
23 | attr_dict = OrderedDict()
24 | # get in_node_name out_node_nmae
25 | x_name = node['in_nodes_name'][0]
26 | out_name = node['out_nodes_name'][0]
27 | x_shape = node['in_tensors'][0]
28 | out_shape = node['out_tensors'][0]
29 |
30 | # get data_type
31 | data_type = node['dtype']
32 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
33 |
34 | # get cur_node_layer node_index
35 | layer = node['node'].layer
36 | layer_name = layer.__class__.__name__
37 | spatial = int(layer_name[-2])
38 |
39 | # get layer attr
40 | scale = layer.scale
41 | data_format = layer.data_format
42 | attr_dict["blocksize"] = scale
43 |
44 | if data_format == "channels_last":
45 | permutation = get_channels_first_permutation(spatial)
46 | x_shape_t = make_shape_channels_first(x_shape)
47 | transpose_value = helper.make_tensor_value_info(x_name + '_t', tensor_type, shape=x_shape_t)
48 | onnx_value.append(transpose_value)
49 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name + '_t'], perm=permutation)
50 | onnx_node.append(transpose_node)
51 | depth_to_space, out = make_node(op_type, inputs=[out], outputs=[out + '_t'], **attr_dict)
52 | onnx_node.append(depth_to_space)
53 | if node['node'].layer.act is not None:
54 | act_op = node['node'].layer.act.__class__.__name__
55 | act_node, out = tlx_act_2_onnx[act_op]([out], [out + '_act'])
56 | onnx_node.append(act_node)
57 | permutation = get_channels_last_permutation(spatial)
58 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
59 | onnx_node.append(transpose_node)
60 | return onnx_node, onnx_value, onnx_init
61 |
62 | elif data_format == 'channels_first':
63 | if node['node'].layer.act is None:
64 | depth_to_space, out = make_node(op_type, inputs=[x_name], outputs=[out_name], **attr_dict)
65 | onnx_node.append(depth_to_space)
66 | return onnx_node, onnx_value, onnx_init
67 | else:
68 | depth_to_space, out = make_node(op_type, inputs=[x_name], outputs=[out_name+ '_act'], **attr_dict)
69 | onnx_node.append(depth_to_space)
70 | act_op = node['node'].layer.act.__class__.__name__
71 | act_node, out = tlx_act_2_onnx[act_op]([out], [out_name])
72 | onnx_node.append(act_node)
73 | return onnx_node, onnx_value, onnx_init
74 | else:
75 | raise ValueError(
76 | "Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
77 |
78 | @classmethod
79 | def version_11(cls, node, **kwargs):
80 | onnx_node = []
81 | onnx_value = []
82 | onnx_init = []
83 |
84 | op_type = "DepthToSpace"
85 | attr_dict = OrderedDict()
86 | # get in_node_name out_node_nmae
87 | x_name = node['in_nodes_name'][0]
88 | out_name = node['out_nodes_name'][0]
89 | x_shape = node['in_tensors'][0]
90 | out_shape = node['out_tensors'][0]
91 |
92 | # get data_type
93 | data_type = node['dtype']
94 | tensor_type = NP_TYPE_TO_TENSOR_TYPE[data_type]
95 |
96 | # get cur_node_layer node_index
97 | layer = node['node'].layer
98 | layer_name = layer.__class__.__name__
99 | spatial = int(layer_name[-2])
100 |
101 | # get layer attr
102 | scale = layer.scale
103 | data_format = layer.data_format
104 | attr_dict["blocksize"] = scale
105 | if tlx.BACKEND in ["tensorflow", "mindspore"]:
106 | attr_dict["mode"] = "DCR"
107 | elif tlx.BACKEND in ["torch", "paddle"]:
108 | attr_dict["mode"] = "CRD"
109 |
110 | if data_format == "channels_last":
111 | permutation = get_channels_first_permutation(spatial)
112 | x_shape_t = make_shape_channels_first(x_shape)
113 | transpose_value = helper.make_tensor_value_info(x_name + '_t', tensor_type, shape=x_shape_t)
114 | onnx_value.append(transpose_value)
115 | transpose_node, out = make_node('Transpose', inputs=[x_name], outputs=[x_name + '_t'], perm=permutation)
116 | onnx_node.append(transpose_node)
117 | depth_to_space, out = make_node(op_type, inputs=[out], outputs=[out + '_t'], **attr_dict)
118 | onnx_node.append(depth_to_space)
119 | if node['node'].layer.act is not None:
120 | act_op = node['node'].layer.act.__class__.__name__
121 | act_node, out = tlx_act_2_onnx[act_op]([out], [out + '_act'])
122 | onnx_node.append(act_node)
123 | permutation = get_channels_last_permutation(spatial)
124 | transpose_node, out = make_node('Transpose', inputs=[out], outputs=[out_name], perm=permutation)
125 | onnx_node.append(transpose_node)
126 | return onnx_node, onnx_value, onnx_init
127 |
128 | elif data_format == 'channels_first':
129 | if node['node'].layer.act is None:
130 | depth_to_space, out = make_node(op_type, inputs=[x_name], outputs=[out_name], **attr_dict)
131 | onnx_node.append(depth_to_space)
132 | return onnx_node, onnx_value, onnx_init
133 | else:
134 | depth_to_space, out = make_node(op_type, inputs=[x_name], outputs=[out_name+ '_act'], **attr_dict)
135 | onnx_node.append(depth_to_space)
136 | act_op = node['node'].layer.act.__class__.__name__
137 | act_node, out = tlx_act_2_onnx[act_op]([out], [out_name])
138 | onnx_node.append(act_node)
139 | return onnx_node, onnx_value, onnx_init
140 | else:
141 | raise ValueError(
142 | "Only support 'channels_first' or 'channels_last' data_format mode, but got {}.".format(data_format))
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/op_mapper.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import absolute_import
5 |
6 | import inspect
7 | import logging
8 |
9 | OP_MAPPING_NO_REGISTER = 0
10 | OP_MAPPING_NO_VERSION = 1
11 | OP_MAPPING_SUCCESSED = 2
12 | OP_MAPPING_FAILED = 3
13 |
14 |
15 | def get_max_support_version(versions, opset_version):
16 | max_version = -1
17 | for vs in sorted(versions):
18 | if vs <= opset_version:
19 | max_version = vs
20 | return max_version
21 |
22 | class OpMapper(object):
23 | OPSETS = {}
24 | # TODO: CUSTOM_OP = {}
25 | def __init__(self, tlx_op, **kwargs):
26 | if not isinstance(tlx_op, list):
27 | tlx_op = [tlx_op]
28 | self.tlx_op = tlx_op
29 | self.kwargs = kwargs
30 |
31 | def __call__(self, cls):
32 | for k, v in inspect.getmembers(cls, inspect.ismethod):
33 | if k.startswith("version_"):
34 | version = int(k.replace("version_", ""))
35 | for op in self.tlx_op:
36 | if op not in OpMapper.OPSETS:
37 | OpMapper.OPSETS[op] = {}
38 | opset_dict = OpMapper.OPSETS[op]
39 | opset_dict[version] = (v, self.kwargs)
40 |
41 | @staticmethod
42 | def mapping(node_info, opset_version):
43 | """
44 |
45 | Parameters
46 | ----------
47 | node_info : dict
48 | tlx_node information
49 | opset_version : int
50 | the version of onnx_op to convert
51 |
52 | Returns
53 | -------
54 |
55 | """
56 | node = node_info['node']
57 | try:
58 | # TODO : if node.layer.__class__.__name__ in CUSTOM_OP
59 | node_type = node.layer.__class__.__name__
60 | opsets = OpMapper.OPSETS[node_type]
61 | versions = list(opsets.keys())
62 | convert_verison = get_max_support_version(versions, opset_version)
63 | mapper_func, kw= opsets[convert_verison]
64 | return mapper_func(node_info, **kw)
65 | except Exception as e:
66 | raise Exception(
67 | "Unsupported mapping node [{}] to onnx node, which op_type is {}, specific error: .".
68 | format(node.layer, node.layer.__class__.__name__) + str(e)
69 | )
70 |
71 | @staticmethod
72 | def update_opset_version(graph, opset_version):
73 | recommend_opset_version = OpMapper.check_support_version(
74 | graph, opset_version, True
75 | )
76 | # TODO : CUSTOM OP CHECK
77 | # for tlx_node_list in graph:
78 | # for tlx_node in tlx_node_list:
79 | # pass
80 | if opset_version != recommend_opset_version:
81 | warning_info = "\n======================\n"
82 | warning_info += "\nFor a successful conversion, set the recommended opset version : {}\n".format(
83 | recommend_opset_version)
84 | warning_info += "\n======================\n"
85 | logging.warning(warning_info)
86 | return recommend_opset_version
87 |
88 | @staticmethod
89 | def check_support_version(graph, opset_version, for_check = False):
90 | op_mapping_status = {
91 | OP_MAPPING_NO_REGISTER: [],
92 | OP_MAPPING_NO_VERSION: [],
93 | }
94 | for key in graph.keys():
95 | tlx_node = graph[key]["node"]
96 | # TODO : CUSTOM OP CHECK
97 | if tlx_node.layer.__class__.__name__ in ['Input', '_InputLayer']:
98 | continue
99 | node_type = tlx_node.layer.__class__.__name__
100 | # check act_type
101 | if hasattr(tlx_node.layer, "act") and tlx_node.layer.act != None:
102 | act_type = tlx_node.layer.act.__class__.__name__
103 | if act_type not in OpMapper.OPSETS:
104 | op_mapping_status[OP_MAPPING_NO_REGISTER].append(node_type)
105 | else:
106 | opsets = OpMapper.OPSETS[act_type]
107 | versions = list(opsets.keys())
108 | convert_version = get_max_support_version(versions, opset_version)
109 | if convert_version == -1:
110 | op_mapping_status[OP_MAPPING_NO_VERSION].append(act_type)
111 |
112 | # check node_type
113 | if node_type not in OpMapper.OPSETS:
114 | op_mapping_status[OP_MAPPING_NO_REGISTER].append(node_type)
115 | else:
116 | opsets = OpMapper.OPSETS[node_type]
117 | versions = list(opsets.keys())
118 | convert_version = get_max_support_version(versions, opset_version)
119 | if convert_version == -1:
120 | op_mapping_status[OP_MAPPING_NO_VERSION].append(node_type)
121 |
122 | if len(op_mapping_status[OP_MAPPING_NO_REGISTER]) > 0:
123 | unsupported_op_types = set(op_mapping_status[OP_MAPPING_NO_REGISTER])
124 | error_info = "\nThere's {} ops are not supported yet\n".format(
125 | len(unsupported_op_types))
126 | for op_type in unsupported_op_types:
127 | error_info += "=========== {} ===========\n".format(op_type)
128 | raise NotImplementedError(error_info)
129 |
130 | if len(op_mapping_status[OP_MAPPING_NO_VERSION]) > 0:
131 | unsupported_op_types = set(op_mapping_status[OP_MAPPING_NO_VERSION])
132 | recommend_opset_version = -1
133 | for op_type in unsupported_op_types:
134 | opsets = OpMapper.OPSETS[op_type]
135 | if min(opsets.keys()) > recommend_opset_version:
136 | recommend_opset_version = min(opsets.keys())
137 | warning_info = "\nThere are {} ops that are not supported in opset version {}, please set opset version >= {}.\n".format(
138 | len(unsupported_op_types), opset_version,
139 | recommend_opset_version)
140 | for op_type in unsupported_op_types:
141 | warning_info += "=========== {} ===========\n".format(op_type)
142 | if for_check:
143 | logging.warning(warning_info)
144 | return recommend_opset_version
145 | raise NotImplementedError(warning_info)
146 |
147 | return opset_version
--------------------------------------------------------------------------------
/tlx2onnx/op_mapper/tensor.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from onnx import helper, numpy_helper
5 | from collections import OrderedDict
6 | import tensorlayerx as tlx
7 | from .datatype_mapping import NP_TYPE_TO_TENSOR_TYPE
8 | import numpy as np
9 | from .op_mapper import OpMapper
10 | from ..common import make_node
11 |
12 | # TODO : CONCAT, SPLIT, STACK,.....CONVERTER
--------------------------------------------------------------------------------
/tlx2onnx/topology.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | # The computed graph of TLX returns the information needed to build ONNX.
4 |
5 |
6 | import tensorlayerx as tlx
7 |
8 | def memory_node_info(node):
9 | node_info = {}
10 | if node.layer.__class__.__name__ in tlx.nn.inputs.__all__:
11 | node_info['in_tensors'] = None
12 | node_info['out_tensors'] = [list(out_tensor.shape) for out_tensor in node.out_tensors]
13 | node_info['node'] = node
14 | node_info['in_nodes_name'] = None
15 | node_info['out_nodes_name'] = [node.node_name + str(idx) for idx, onode in enumerate(node.out_tensors)]
16 | node_info['dtype'] = tlx.convert_to_numpy(node.out_tensors[0]).dtype
17 | node_info['in_dtype'] = tlx.convert_to_numpy(node.out_tensors[0]).dtype
18 | node_info['out_dtype'] = tlx.convert_to_numpy(node.out_tensors[0]).dtype
19 | node_info['attr'] = node.attr
20 | else:
21 | node_info['in_tensors'] = [list(in_tensor.shape) for in_tensor, idx in zip(node.in_tensors, node.in_tensors_idxes)]
22 | node_info['out_tensors'] = [list(out_tensor.shape) for out_tensor in node.out_tensors]
23 | node_info['node'] = node
24 | node_info['in_nodes_name'] = [inode.node_name + str(idx) for inode, idx in zip(node.in_nodes, node.in_tensors_idxes)]
25 | node_info['out_nodes_name'] = [node.node_name + str(id) for id, onode in enumerate(node.out_tensors)]
26 | node_info['dtype'] = tlx.convert_to_numpy(node.in_tensors[0]).dtype
27 | node_info['in_dtype'] = tlx.convert_to_numpy(node.in_tensors[0]).dtype
28 | node_info['out_dtype'] = tlx.convert_to_numpy(node.out_tensors[0]).dtype
29 | node_info['attr'] = node.attr
30 | return node_info
31 |
32 |
33 | def construct_topology(model, inputs):
34 | """
35 |
36 | Parameters
37 | ----------
38 | model: TensorLayerX model,
39 | inputs
40 |
41 | Returns
42 | -------
43 |
44 | """
45 |
46 |
47 | node_by_depth, all_layers = model.build_graph(inputs)
48 |
49 | memory = dict()
50 | # get each layer's by going through the graph in depth order
51 | for depth, nodes in enumerate(node_by_depth):
52 | if depth == 0:
53 | if isinstance(inputs, list):
54 | assert len(inputs) == len(nodes)
55 | for idx, node in enumerate(nodes):
56 | memory[node.node_name] = memory_node_info(node)
57 | else:
58 | memory[nodes[0].node_name] = memory_node_info(nodes[0])
59 | else:
60 | for node in nodes:
61 | memory[node.node_name] = memory_node_info(node)
62 | return memory
63 |
64 |
65 |
--------------------------------------------------------------------------------