├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── onnx2keras ├── __init__.py ├── activation_layers.py ├── constant_layers.py ├── converter.py ├── convolution_layers.py ├── elementwise_layers.py ├── layers.py ├── linear_layers.py ├── normalization_layers.py ├── operation_layers.py ├── padding_layers.py ├── pooling_layers.py ├── reshape_layers.py ├── upsampling_layers.py └── utils.py ├── requirements.txt ├── setup.py └── test ├── README.md ├── __init__.py ├── layers ├── __init__.py ├── activations │ ├── __init__.py │ ├── test_elu.py │ ├── test_hard_tanh.py │ ├── test_leaky_relu.py │ ├── test_log_sigmoid.py │ ├── test_log_softmax.py │ ├── test_prelu.py │ ├── test_relu.py │ ├── test_relu6.py │ ├── test_selu.py │ ├── test_sigmoid.py │ ├── test_softmax.py │ ├── test_tanh.py │ └── test_threshold.py ├── constants │ ├── __init__.py │ └── test_constant.py ├── convolutions │ ├── __init__.py │ ├── test_conv2d.py │ ├── test_conv3d.py │ └── test_convtranspose2d.py ├── elementwise │ ├── __init__.py │ ├── test_add.py │ ├── test_div.py │ ├── test_mul.py │ └── test_sub.py ├── linears │ ├── __init__.py │ └── test_linear.py ├── normalizations │ ├── __init__.py │ ├── test_bn2d.py │ └── test_in2d.py ├── operations │ ├── __init__.py │ ├── test_cast.py │ ├── test_clip.py │ ├── test_floor.py │ └── test_norm.py ├── poolings │ ├── __init__.py │ ├── test_avgpool2d.py │ ├── test_avgpool3d.py │ ├── test_global_avgpool2d.py │ ├── test_global_maxpool2d.py │ ├── test_maxpool2d.py │ └── test_maxpool3d.py └── reshapes │ ├── __init__.py │ ├── test_slice.py │ ├── test_split.py │ └── test_squeeze.py ├── models ├── __init__.py ├── test_alexnet.py ├── test_densenet.py ├── test_googlenet.py ├── test_mbnet2.py ├── test_mnasnet.py ├── test_resnet18.py ├── test_resnext.py ├── test_squeezenet.py └── test_vgg.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | __pycache__/ 3 | *.pyc 4 | *.egg-info 5 | *.onnx 6 | *.h5 7 | dist/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Grigory Malivenko 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include requirements.txt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # onnx2keras 2 | 3 | ONNX to Keras deep neural network converter. 4 | 5 | [![GitHub License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 6 | [![Python Version](https://img.shields.io/badge/python-2.7%2C3.6-lightgrey.svg)](https://github.com/gmalivenko/onnx2keras) 7 | [![Downloads](https://pepy.tech/badge/onnx2keras)](https://pepy.tech/project/onnx2keras) 8 | ![PyPI](https://img.shields.io/pypi/v/onnx2keras.svg) 9 | 10 | ## Requirements 11 | 12 | TensorFlow 2.0 13 | 14 | ## API 15 | 16 | `onnx_to_keras(onnx_model, input_names, input_shapes=None, name_policy=None, verbose=True, change_ordering=False) -> {Keras model}` 17 | 18 | `onnx_model`: ONNX model to convert 19 | 20 | `input_names`: list with graph input names 21 | 22 | `input_shapes`: override input shapes (experimental) 23 | 24 | `name_policy`: ['renumerate', 'short', 'default'] override layer names (experimental) 25 | 26 | `verbose`: detailed output 27 | 28 | `change_ordering:` change ordering to HWC (experimental) 29 | 30 | 31 | ## Getting started 32 | 33 | ### ONNX model 34 | ```python 35 | import onnx 36 | from onnx2keras import onnx_to_keras 37 | 38 | # Load ONNX model 39 | onnx_model = onnx.load('resnet18.onnx') 40 | 41 | # Call the converter (input - is the main model input name, can be different for your model) 42 | k_model = onnx_to_keras(onnx_model, ['input']) 43 | ``` 44 | 45 | Keras model will be stored to the `k_model` variable. So simple, isn't it? 46 | 47 | 48 | ### PyTorch model 49 | 50 | Using ONNX as intermediate format, you can convert PyTorch model as well. 51 | 52 | ```python 53 | import numpy as np 54 | import torch 55 | from torch.autograd import Variable 56 | from pytorch2keras.converter import pytorch_to_keras 57 | import torchvision.models as models 58 | 59 | if __name__ == '__main__': 60 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 61 | input_var = Variable(torch.FloatTensor(input_np)) 62 | model = models.resnet18() 63 | model.eval() 64 | k_model = \ 65 | pytorch_to_keras(model, input_var, [(3, 224, 224,)], verbose=True, change_ordering=True) 66 | 67 | for i in range(3): 68 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 69 | input_var = Variable(torch.FloatTensor(input_np)) 70 | output = model(input_var) 71 | pytorch_output = output.data.numpy() 72 | keras_output = k_model.predict(np.transpose(input_np, [0, 2, 3, 1])) 73 | error = np.max(pytorch_output - keras_output) 74 | print('error -- ', error) # Around zero :) 75 | ``` 76 | 77 | ### Deplying model as frozen graph 78 | 79 | You can try using the snippet below to convert your onnx / PyTorch model to frozen graph. It may be useful for deploy for Tensorflow.js / for Tensorflow for Android / for Tensorflow C-API. 80 | 81 | ```python 82 | import numpy as np 83 | import torch 84 | from pytorch2keras.converter import pytorch_to_keras 85 | from torch.autograd import Variable 86 | import tensorflow as tf 87 | from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 88 | 89 | 90 | # Create and load model 91 | model = Model() 92 | model.load_state_dict(torch.load('model-checkpoint.pth')) 93 | model.eval() 94 | 95 | # Make dummy variables (and checking if the model works) 96 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 97 | input_var = Variable(torch.FloatTensor(input_np)) 98 | output = model(input_var) 99 | 100 | # Convert the model! 101 | k_model = \ 102 | pytorch_to_keras(model, input_var, (3, 224, 224), 103 | verbose=True, name_policy='short', 104 | change_ordering=True) 105 | 106 | # Save model to SavedModel format 107 | tf.saved_model.save(k_model, "./models") 108 | 109 | # Convert Keras model to ConcreteFunction 110 | full_model = tf.function(lambda x: k_model(x)) 111 | full_model = full_model.get_concrete_function( 112 | tf.TensorSpec(k_model.inputs[0].shape, k_model.inputs[0].dtype)) 113 | 114 | # Get frozen ConcreteFunction 115 | frozen_func = convert_variables_to_constants_v2(full_model) 116 | frozen_func.graph.as_graph_def() 117 | 118 | print("-" * 50) 119 | print("Frozen model layers: ") 120 | for layer in [op.name for op in frozen_func.graph.get_operations()]: 121 | print(layer) 122 | 123 | print("-" * 50) 124 | print("Frozen model inputs: ") 125 | print(frozen_func.inputs) 126 | print("Frozen model outputs: ") 127 | print(frozen_func.outputs) 128 | 129 | # Save frozen graph from frozen ConcreteFunction to hard drive 130 | tf.io.write_graph(graph_or_graph_def=frozen_func.graph, 131 | logdir="./frozen_models", 132 | name="frozen_graph.pb", 133 | as_text=False) 134 | ``` 135 | 136 | 137 | ## License 138 | This software is covered by MIT License. 139 | -------------------------------------------------------------------------------- /onnx2keras/__init__.py: -------------------------------------------------------------------------------- 1 | from .converter import onnx_to_keras 2 | from .utils import check_torch_keras_error 3 | 4 | __all__ = ['onnx_to_keras', 'check_torch_keras_error'] 5 | -------------------------------------------------------------------------------- /onnx2keras/activation_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import ensure_tf_type, ensure_numpy_type 4 | 5 | 6 | def convert_relu(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert ReLU activation layer 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | if len(node.input) != 1: 18 | assert AttributeError('More than 1 input for an activation layer.') 19 | 20 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 21 | 22 | relu = keras.layers.Activation('relu', name=keras_name) 23 | layers[node_name] = relu(input_0) 24 | 25 | 26 | def convert_elu(node, params, layers, lambda_func, node_name, keras_name): 27 | """ 28 | Convert ELU activation layer 29 | :param node: current operation node 30 | :param params: operation attributes 31 | :param layers: available keras layers 32 | :param lambda_func: function for keras Lambda layer 33 | :param node_name: internal converter name 34 | :param keras_name: resulting layer name 35 | :return: None 36 | """ 37 | if len(node.input) != 1: 38 | assert AttributeError('More than 1 input for an activation layer.') 39 | 40 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 41 | 42 | elu = \ 43 | keras.layers.ELU(alpha=params['alpha'], name=keras_name) 44 | layers[node_name] = elu(input_0) 45 | 46 | 47 | def convert_lrelu(node, params, layers, lambda_func, node_name, keras_name): 48 | """ 49 | Convert LeakyReLU activation layer 50 | :param node: current operation node 51 | :param params: operation attributes 52 | :param layers: available keras layers 53 | :param lambda_func: function for keras Lambda layer 54 | :param node_name: internal converter name 55 | :param keras_name: resulting layer name 56 | :return: None 57 | """ 58 | if len(node.input) != 1: 59 | assert AttributeError('More than 1 input for an activation layer.') 60 | 61 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 62 | 63 | leakyrelu = \ 64 | keras.layers.LeakyReLU(alpha=params['alpha'], name=keras_name) 65 | layers[node_name] = leakyrelu(input_0) 66 | 67 | 68 | def convert_sigmoid(node, params, layers, lambda_func, node_name, keras_name): 69 | """ 70 | Convert Sigmoid activation layer 71 | :param node: current operation node 72 | :param params: operation attributes 73 | :param layers: available keras layers 74 | :param lambda_func: function for keras Lambda layer 75 | :param node_name: internal converter name 76 | :param keras_name: resulting layer name 77 | :return: None 78 | """ 79 | if len(node.input) != 1: 80 | assert AttributeError('More than 1 input for an activation layer.') 81 | 82 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 83 | 84 | sigmoid = keras.layers.Activation('sigmoid', name=keras_name) 85 | layers[node_name] = sigmoid(input_0) 86 | 87 | 88 | def convert_tanh(node, params, layers, lambda_func, node_name, keras_name): 89 | """ 90 | Convert Tanh activation layer 91 | :param node: current operation node 92 | :param params: operation attributes 93 | :param layers: available keras layers 94 | :param lambda_func: function for keras Lambda layer 95 | :param node_name: internal converter name 96 | :param keras_name: resulting layer name 97 | :return: None 98 | """ 99 | if len(node.input) != 1: 100 | assert AttributeError('More than 1 input for an activation layer.') 101 | 102 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 103 | 104 | tanh = keras.layers.Activation('tanh', name=keras_name) 105 | layers[node_name] = tanh(input_0) 106 | 107 | 108 | def convert_selu(node, params, layers, lambda_func, node_name, keras_name): 109 | """ 110 | Convert SELU activation layer 111 | :param node: current operation node 112 | :param params: operation attributes 113 | :param layers: available keras layers 114 | :param lambda_func: function for keras Lambda layer 115 | :param node_name: internal converter name 116 | :param keras_name: resulting layer name 117 | :return: None 118 | """ 119 | if len(node.input) != 1: 120 | assert AttributeError('More than 1 input for an activation layer.') 121 | 122 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 123 | 124 | selu = keras.layers.Activation('selu', name=keras_name) 125 | layers[node_name] = selu(input_0) 126 | 127 | 128 | def convert_softmax(node, params, layers, lambda_func, node_name, keras_name): 129 | """ 130 | Convert softmax activation layer 131 | :param node: current operation node 132 | :param params: operation attributes 133 | :param layers: available keras layers 134 | :param lambda_func: function for keras Lambda layer 135 | :param node_name: internal converter name 136 | :param keras_name: resulting layer name 137 | :return: None 138 | """ 139 | if len(node.input) != 1: 140 | assert AttributeError('More than 1 input for an activation layer.') 141 | 142 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 143 | 144 | def target_layer(x, axis=params['axis']): 145 | import tensorflow as tf 146 | return tf.nn.softmax(x, axis=axis) 147 | 148 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 149 | layers[node_name] = lambda_layer(input_0) 150 | layers[node_name].set_shape(layers[node_name].shape) 151 | lambda_func[keras_name] = target_layer 152 | 153 | 154 | def convert_prelu(node, params, layers, lambda_func, node_name, keras_name): 155 | """ 156 | Convert PReLU activation layer 157 | :param node: current operation node 158 | :param params: operation attributes 159 | :param layers: available keras layers 160 | :param lambda_func: function for keras Lambda layer 161 | :param node_name: internal converter name 162 | :param keras_name: resulting layer name 163 | :return: None 164 | """ 165 | logger = logging.getLogger('onnx2keras.prelu') 166 | 167 | if len(node.input) != 2: 168 | assert AttributeError('Activation layer PReLU should have 2 inputs.') 169 | 170 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 171 | W = ensure_numpy_type(layers[node.input[1]]) 172 | 173 | if params['change_ordering']: 174 | logger.warning('PRelu + change ordering needs to be fixed after TF graph is built.') 175 | logger.warning('It\'s experimental.') 176 | 177 | shared_axes = [2, 3] 178 | 179 | # for case when W.shape (n,). When activation is used for single dimension vector. 180 | shared_axes = shared_axes if len(W.shape) > 1 else None 181 | 182 | prelu = keras.layers.PReLU(weights=[W], shared_axes=shared_axes, name=keras_name) 183 | layers[node_name] = prelu(input_0) 184 | -------------------------------------------------------------------------------- /onnx2keras/constant_layers.py: -------------------------------------------------------------------------------- 1 | def convert_constant(node, params, layers, lambda_func, node_name, keras_name): 2 | """ 3 | Convert Constant layer 4 | :param node: current operation node 5 | :param params: operation attributes 6 | :param layers: available keras layers 7 | :param lambda_func: function for keras Lambda layer 8 | :param node_name: internal converter name 9 | :param keras_name: resulting layer name 10 | :return: None 11 | """ 12 | layers[node_name] = params['value'] 13 | -------------------------------------------------------------------------------- /onnx2keras/converter.py: -------------------------------------------------------------------------------- 1 | """ 2 | The ONNX to keras converter module 3 | """ 4 | 5 | from tensorflow import keras 6 | import logging 7 | import inspect 8 | import collections 9 | from onnx import numpy_helper 10 | 11 | from .layers import AVAILABLE_CONVERTERS 12 | 13 | 14 | def onnx_node_attributes_to_dict(args): 15 | """ 16 | Parse ONNX attributes to Python dictionary 17 | :param args: ONNX attributes object 18 | :return: Python dictionary 19 | """ 20 | def onnx_attribute_to_dict(onnx_attr): 21 | """ 22 | Parse ONNX attribute 23 | :param onnx_attr: ONNX attribute 24 | :return: Python data type 25 | """ 26 | if onnx_attr.HasField('t'): 27 | return numpy_helper.to_array(getattr(onnx_attr, 't')) 28 | 29 | for attr_type in ['f', 'i', 's']: 30 | if onnx_attr.HasField(attr_type): 31 | return getattr(onnx_attr, attr_type) 32 | 33 | for attr_type in ['floats', 'ints', 'strings']: 34 | if getattr(onnx_attr, attr_type): 35 | return list(getattr(onnx_attr, attr_type)) 36 | return {arg.name: onnx_attribute_to_dict(arg) for arg in args} 37 | 38 | 39 | def onnx_to_keras(onnx_model, input_names, 40 | input_shapes=None, name_policy=None, verbose=True, change_ordering=False): 41 | """ 42 | Convert ONNX graph to Keras model format 43 | :param onnx_model: loaded ONNX model 44 | :param input_names: list with input names 45 | :param input_shapes: override input shapes (experimental) 46 | :param name_policy: override layer names. None, "short" or "renumerate" (experimental) 47 | :param verbose: verbose output 48 | :param change_ordering: change ordering to HWC (experimental) 49 | :return: Keras model 50 | """ 51 | # Use channels first format by default. 52 | keras_fmt = keras.backend.image_data_format() 53 | keras.backend.set_image_data_format('channels_first') 54 | 55 | if verbose: 56 | logging.basicConfig(level=logging.DEBUG) 57 | 58 | logger = logging.getLogger('onnx2keras') 59 | 60 | logger.info('Converter is called.') 61 | 62 | onnx_weights = onnx_model.graph.initializer 63 | onnx_inputs = onnx_model.graph.input 64 | onnx_outputs = [i.name for i in onnx_model.graph.output] 65 | onnx_nodes = onnx_model.graph.node 66 | 67 | logger.debug('List input shapes:') 68 | logger.debug(input_shapes) 69 | 70 | logger.debug('List inputs:') 71 | for i, input in enumerate(onnx_inputs): 72 | logger.debug('Input {0} -> {1}.'.format(i, input.name)) 73 | 74 | logger.debug('List outputs:') 75 | for i, output in enumerate(onnx_outputs): 76 | logger.debug('Output {0} -> {1}.'.format(i, output)) 77 | 78 | logger.debug('Gathering weights to dictionary.') 79 | weights = {} 80 | for onnx_w in onnx_weights: 81 | try: 82 | if len(onnx_w.ListFields()) < 4: 83 | onnx_extracted_weights_name = onnx_w.ListFields()[1][1] 84 | else: 85 | onnx_extracted_weights_name = onnx_w.ListFields()[2][1] 86 | weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) 87 | except: 88 | onnx_extracted_weights_name = onnx_w.ListFields()[3][1] 89 | weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) 90 | 91 | logger.debug('Found weight {0} with shape {1}.'.format( 92 | onnx_extracted_weights_name, 93 | weights[onnx_extracted_weights_name].shape)) 94 | 95 | layers = dict() 96 | lambda_funcs = dict() 97 | keras_outputs = [] 98 | keras_inputs = [] 99 | 100 | for i, input_name in enumerate(input_names): 101 | for onnx_i in onnx_inputs: 102 | if onnx_i.name == input_name: 103 | if input_shapes: 104 | input_shape = input_shapes[i] 105 | else: 106 | input_shape = [i.dim_value for i in onnx_i.type.tensor_type.shape.dim][1:] 107 | 108 | layers[input_name] = keras.layers.InputLayer( 109 | input_shape=input_shape, name=input_name 110 | ).output 111 | 112 | keras_inputs.append(layers[input_name]) 113 | 114 | logger.debug('Found input {0} with shape {1}'.format(input_name, input_shape)) 115 | 116 | # Convert every operation separable 117 | node_names = [] 118 | for node_index, node in enumerate(onnx_nodes): 119 | node_type = node.op_type 120 | node_params = onnx_node_attributes_to_dict(node.attribute) 121 | 122 | # Add global converter info: 123 | node_params['change_ordering'] = change_ordering 124 | node_params['name_policy'] = name_policy 125 | 126 | node_name = str(node.output[0]) 127 | keras_names = [] 128 | for output_index, output in enumerate(node.output): 129 | if name_policy == 'short': 130 | keras_name = keras_name_i = str(output)[:8] 131 | suffix = 1 132 | while keras_name_i in node_names: 133 | keras_name_i = keras_name + '_' + str(suffix) 134 | suffix += 1 135 | keras_names.append(keras_name_i) 136 | elif name_policy == 'renumerate': 137 | postfix = node_index if len(node.output) == 1 else "%s_%s" % (node_index, output_index) 138 | keras_names.append('LAYER_%s' % postfix) 139 | else: 140 | keras_names.append(output) 141 | 142 | if len(node.output) != 1: 143 | logger.warning('Trying to convert multi-output node') 144 | node_params['_outputs'] = list(node.output) 145 | node_names.extend(keras_names) 146 | else: 147 | keras_names = keras_names[0] 148 | node_names.append(keras_names) 149 | 150 | logger.debug('######') 151 | logger.debug('...') 152 | logger.debug('Converting ONNX operation') 153 | logger.debug('type: %s', node_type) 154 | logger.debug('node_name: %s', node_name) 155 | logger.debug('node_params: %s', node_params) 156 | logger.debug('...') 157 | 158 | logger.debug('Check if all inputs are available:') 159 | if len(node.input) == 0 and node_type != 'Constant': 160 | raise AttributeError('Operation doesn\'t have an input. Aborting.') 161 | 162 | for i, node_input in enumerate(node.input): 163 | logger.debug('Check input %i (name %s).', i, node_input) 164 | if node_input not in layers: 165 | logger.debug('The input not found in layers / model inputs.') 166 | 167 | if node_input in weights: 168 | logger.debug('Found in weights, add as a numpy constant.') 169 | layers[node_input] = weights[node_input] 170 | else: 171 | raise AttributeError('Current node is not in weights / model inputs / layers.') 172 | else: 173 | logger.debug('... found all, continue') 174 | 175 | keras.backend.set_image_data_format('channels_first') 176 | AVAILABLE_CONVERTERS[node_type]( 177 | node, 178 | node_params, 179 | layers, 180 | lambda_funcs, 181 | node_name, 182 | keras_names 183 | ) 184 | if isinstance(keras_names, list): 185 | keras_names = keras_names[0] 186 | 187 | try: 188 | logger.debug('Output TF Layer -> ' + str(layers[keras_names])) 189 | except KeyError: 190 | pass 191 | 192 | # Check for terminal nodes 193 | for layer in onnx_outputs: 194 | if layer in layers: 195 | keras_outputs.append(layers[layer]) 196 | 197 | # Create model 198 | model = keras.models.Model(inputs=keras_inputs, outputs=keras_outputs) 199 | 200 | if change_ordering: 201 | change_ord_axes_map = { 202 | 3: 2, 203 | 1: 3, 204 | -1: 1 205 | } 206 | 207 | import numpy as np 208 | conf = model.get_config() 209 | 210 | for layer in conf['layers']: 211 | if layer['config'] and 'shared_axes' in layer['config']: 212 | # TODO: check axes first (if it's not 4D tensor) 213 | layer['config']['shared_axes'] = [1, 2] 214 | 215 | if layer['config'] and 'batch_input_shape' in layer['config']: 216 | layer['config']['batch_input_shape'] = \ 217 | tuple(np.reshape(np.array( 218 | [ 219 | [None] + 220 | list(layer['config']['batch_input_shape'][2:][:]) + 221 | [layer['config']['batch_input_shape'][1]] 222 | ]), -1 223 | )) 224 | if layer['config'] and 'target_shape' in layer['config']: 225 | if len(list(layer['config']['target_shape'][1:][:])) > 0: 226 | layer['config']['target_shape'] = \ 227 | tuple(np.reshape(np.array( 228 | list(layer['config']['target_shape'][1:]) + 229 | [layer['config']['target_shape'][0]] 230 | ), -1),) 231 | 232 | if layer['config'] and 'data_format' in layer['config']: 233 | layer['config']['data_format'] = 'channels_last' 234 | if layer['config'] and 'axis' in layer['config']: 235 | axis = layer['config']['axis'] 236 | # BatchNorm wrap axis with ListWrapper instead single INT value 237 | if isinstance(axis, (tuple, list)): 238 | axis = axis[0] 239 | layer['config']['axis'] = change_ord_axes_map.get(axis, layer['config']['axis']) 240 | 241 | for layer in conf['layers']: 242 | if 'function' in layer['config'] and layer['config']['function'][1] is not None: 243 | kerasf = list(layer['config']['function']) 244 | dargs = list(kerasf[1]) 245 | func = lambda_funcs.get(layer['name']) 246 | 247 | if func: 248 | # ReduceSum operation has 'axis' param as array of ints. When onnx uses ReduceSum 249 | # to reproduce SoftMax - dargs become something like [[1]] (list of lists) 250 | # that why we handle collections.Iterable 251 | if len(dargs) > 1 or isinstance(dargs[0], (tuple, list)): 252 | params = inspect.signature(func).parameters 253 | i = list(params.keys()).index('axes') if ('axes' in params) else -1 254 | 255 | if i > 0: 256 | i -= 1 257 | axes = list(range(len(dargs[i].shape))) 258 | axes = axes[0:1] + axes[2:] + axes[1:2] 259 | dargs[i] = np.transpose(dargs[i], axes) 260 | 261 | i = list(params.keys()).index('axis') if ('axis' in params) else -1 262 | 263 | if i > 0: 264 | i -= 1 265 | axis = np.array(dargs[i]) 266 | axes_map = np.array([0, 3, 1, 2]) 267 | # to list because some tf operations check only for core python types (e.g tf.norm) 268 | dargs[i] = axes_map[axis].tolist() 269 | else: 270 | # if map exits will change else will remain the same 271 | dargs[0] = change_ord_axes_map.get(dargs[0], dargs[0]) 272 | 273 | kerasf[1] = tuple(dargs) 274 | layer['config']['function'] = tuple(kerasf) 275 | 276 | keras.backend.set_image_data_format('channels_last') 277 | model_tf_ordering = keras.models.Model.from_config(conf) 278 | 279 | for dst_layer, src_layer, conf in zip(model_tf_ordering.layers, model.layers, conf['layers']): 280 | W = src_layer.get_weights() 281 | # TODO: check axes first (if it's not 4D tensor) 282 | if conf['config'] and 'shared_axes' in conf['config']: 283 | W[0] = W[0].transpose(1, 2, 0) 284 | dst_layer.set_weights(W) 285 | 286 | model = model_tf_ordering 287 | 288 | keras.backend.set_image_data_format(keras_fmt) 289 | 290 | return model 291 | -------------------------------------------------------------------------------- /onnx2keras/convolution_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import ensure_tf_type, ensure_numpy_type 4 | 5 | 6 | def convert_conv(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert convolution layer 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | logger = logging.getLogger('onnx2keras.conv') 18 | 19 | if len(node.input) == 3: 20 | logger.debug('Conv with bias') 21 | # Has bias 22 | has_bias = True 23 | W = ensure_numpy_type(layers[node.input[1]]) 24 | bias = ensure_numpy_type(layers[node.input[2]]) 25 | 26 | elif len(node.input) == 2: 27 | logger.debug('Conv without bias') 28 | has_bias = False 29 | W = ensure_numpy_type(layers[node.input[1]]) 30 | bias = None 31 | 32 | else: 33 | raise NotImplementedError('Not implemented') 34 | 35 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 36 | n_groups = params['group'] if 'group' in params else 1 37 | dilation = params['dilations'][0] if 'dilations' in params else 1 38 | pads = params['pads'] if 'pads' in params else [0, 0, 0] 39 | strides = params['strides'] if 'strides' in params else [1, 1, 1] 40 | 41 | if len(W.shape) == 5: # 3D conv 42 | logger.debug('3D convolution') 43 | if pads[0] > 0 or pads[1] > 0 or pads[2] > 0: 44 | logger.debug('Paddings exist, add ZeroPadding layer') 45 | padding_name = keras_name + '_pad' 46 | padding_layer = keras.layers.ZeroPadding3D( 47 | padding=(pads[0], pads[1], pads[2]), 48 | name=padding_name 49 | ) 50 | layers[padding_name] = input_0 = padding_layer(input_0) 51 | out_channels, channels_per_group, dimension, height, width = W.shape 52 | W = W.transpose(2, 3, 4, 1, 0) 53 | 54 | if has_bias: 55 | weights = [W, bias] 56 | else: 57 | weights = [W] 58 | 59 | conv = keras.layers.Conv3D( 60 | filters=out_channels, 61 | kernel_size=(dimension, height, width), 62 | strides=(strides[0], strides[1], strides[2]), 63 | padding='valid', 64 | weights=weights, 65 | use_bias=has_bias, 66 | activation=None, 67 | dilation_rate=dilation, 68 | bias_initializer='zeros', kernel_initializer='zeros', 69 | name=keras_name, 70 | groups=n_groups 71 | ) 72 | layers[node_name] = conv(input_0) 73 | 74 | elif len(W.shape) == 4: # 2D conv 75 | logger.debug('2D convolution') 76 | 77 | padding = None 78 | if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): 79 | padding = (pads[0], pads[1]) 80 | elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): 81 | padding = ((pads[0], pads[2]), (pads[1], pads[3])) 82 | 83 | if padding: 84 | logger.debug('Paddings exist, add ZeroPadding layer') 85 | padding_name = keras_name + '_pad' 86 | padding_layer = keras.layers.ZeroPadding2D( 87 | padding=padding, 88 | name=padding_name, 89 | data_format='channels_first' 90 | ) 91 | layers[padding_name] = input_0 = padding_layer(input_0) 92 | 93 | W = W.transpose(2, 3, 1, 0) 94 | height, width, channels_per_group, out_channels = W.shape 95 | in_channels = channels_per_group * n_groups 96 | 97 | if n_groups == in_channels and n_groups != 1: 98 | logger.debug('Number of groups is equal to input channels, use DepthWise convolution') 99 | W = W.transpose(0, 1, 3, 2) 100 | if has_bias: 101 | weights = [W, bias] 102 | else: 103 | weights = [W] 104 | 105 | conv = keras.layers.DepthwiseConv2D( 106 | kernel_size=(height, width), 107 | strides=(strides[0], strides[1]), 108 | padding='valid', 109 | use_bias=has_bias, 110 | activation=None, 111 | depth_multiplier=1, 112 | weights=weights, 113 | dilation_rate=dilation, 114 | bias_initializer='zeros', kernel_initializer='zeros', 115 | name=keras_name 116 | ) 117 | layers[node_name] = conv(input_0) 118 | 119 | elif n_groups != 1: 120 | logger.debug('Number of groups more than 1, but less than number of in_channel, use group convolution') 121 | 122 | # Example from https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html 123 | def target_layer(x, groups=n_groups, stride_y=strides[0], stride_x=strides[1]): 124 | import tensorflow as tf 125 | from tensorflow.keras import backend as K 126 | data_format = 'NCHW' if K.image_data_format() == 'channels_first' else 'NHWC' 127 | 128 | if data_format == 'NCHW': 129 | x = tf.transpose(x, [0, 2, 3, 1]) 130 | 131 | def convolve_lambda_biased(i, k, b): 132 | import tensorflow as tf 133 | conv = tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], dilations=[1, dilation, dilation, 1], padding='VALID', data_format='NHWC') 134 | return tf.nn.bias_add(conv, b, data_format='NHWC') 135 | 136 | def convolve_lambda(i, k): 137 | import tensorflow as tf 138 | return tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], dilations=[1, dilation, dilation, 1], padding='VALID', data_format='NHWC') 139 | 140 | input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) 141 | weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=W) 142 | if has_bias: 143 | bias_groups = tf.split(axis=0, num_or_size_splits=groups, value=bias) 144 | output_groups = [convolve_lambda_biased(i, k, b) for i, k, b in 145 | zip(input_groups, weight_groups, bias_groups)] 146 | else: 147 | output_groups = [convolve_lambda(i, k) for i, k in zip(input_groups, weight_groups)] 148 | 149 | layer = tf.concat(axis=3, values=output_groups) 150 | if data_format == 'NCHW': 151 | layer = tf.transpose(layer, [0, 3, 1, 2]) 152 | 153 | return layer 154 | 155 | lambda_layer = keras.layers.Lambda(target_layer) 156 | layers[node_name] = lambda_layer(input_0) 157 | 158 | else: 159 | if has_bias: 160 | weights = [W, bias] 161 | else: 162 | weights = [W] 163 | 164 | conv = keras.layers.Conv2D( 165 | filters=out_channels, 166 | kernel_size=(height, width), 167 | strides=(strides[0], strides[1]), 168 | padding='valid', 169 | weights=weights, 170 | use_bias=has_bias, 171 | activation=None, 172 | dilation_rate=dilation, 173 | bias_initializer='zeros', kernel_initializer='zeros', 174 | name=keras_name 175 | ) 176 | 177 | layers[node_name] = conv(input_0) 178 | else: 179 | # 1D conv 180 | W = W.transpose(2, 1, 0) 181 | width, channels, n_filters = W.shape 182 | print(width, channels, n_filters, has_bias) 183 | 184 | if has_bias: 185 | weights = [W, bias] 186 | else: 187 | weights = [W] 188 | 189 | def target_layer(x, w=weights, stride=strides[0]): 190 | import tensorflow as tf 191 | w = tf.convert_to_tensor(w[0]) 192 | x = tf.transpose(x, [0, 2, 1]) 193 | x = tf.nn.conv1d(x, w, stride=stride, padding='SAME', data_format='NWC') 194 | return tf.transpose(x, [0, 2, 1]) 195 | 196 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 197 | lambda_layer[keras_name] = target_layer 198 | layers[node_name] = lambda_layer(input_0) 199 | 200 | # padding_name = keras_name + '_pad' 201 | # padding_layer = keras.layers.ZeroPadding1D( 202 | # padding=(pads[0]), 203 | # name=padding_name 204 | # ) 205 | # print(input_0) 206 | # layers[node_name] = padding_layer(input_0) 207 | # input_0.set_shape(input_0._keras_shape) 208 | # print(input_0._keras_shape) 209 | # print(input_0, n_filters, width) 210 | # conv = keras.layers.Conv1D( 211 | # filters=n_filters, 212 | # kernel_size=width, 213 | # strides=strides[0], 214 | # padding='valid', 215 | # weights=weights, 216 | # use_bias=has_bias, 217 | # activation=None, 218 | # dilation_rate=dilation, 219 | # name=keras_name 220 | # ) 221 | # layers[node_name] = conv(input_0) 222 | 223 | 224 | def convert_convtranspose(node, params, layers, 225 | lambda_func, node_name, keras_name): 226 | """ 227 | Convert transposed convolution layer 228 | :param node: current operation node 229 | :param params: operation attributes 230 | :param layers: available keras layers 231 | :param lambda_func: function for keras Lambda layer 232 | :param node_name: internal converter name 233 | :param keras_name: resulting layer name 234 | :return: None 235 | """ 236 | logger = logging.getLogger('onnx2keras.convtranpose') 237 | 238 | if len(node.input) == 3: 239 | logger.debug('ConvTranspose with bias') 240 | # Has bias 241 | has_bias = True 242 | W = ensure_numpy_type(layers[node.input[1]]) 243 | bias = ensure_numpy_type(layers[node.input[2]]) 244 | 245 | elif len(node.input) == 2: 246 | logger.debug('ConvTranspose without bias') 247 | has_bias = False 248 | W = ensure_numpy_type(layers[node.input[1]]) 249 | bias = None 250 | 251 | else: 252 | raise NotImplementedError('Not implemented') 253 | 254 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 255 | n_groups = params['group'] if 'group' in params else 1 256 | dilation = params['dilations'][0] if 'dilations' in params else 1 257 | pads = params['pads'] if 'pads' in params else [0, 0] 258 | strides = params['strides'] if 'strides' in params else [1, 1] 259 | 260 | if len(W.shape) == 5: # 3D conv 261 | raise NotImplementedError('Not implemented') 262 | 263 | elif len(W.shape) == 4: # 2D conv 264 | W = W.transpose(2, 3, 1, 0) 265 | height, width, n_filters, channels = W.shape 266 | 267 | if has_bias: 268 | weights = [W, bias] 269 | else: 270 | weights = [W] 271 | 272 | if n_groups > 1: 273 | raise AttributeError('Cannot convert ConvTranspose2d with groups != 1') 274 | 275 | if dilation > 1: 276 | raise AttributeError('Cannot convert ConvTranspose2d with dilation_rate != 1') 277 | 278 | conv = keras.layers.Conv2DTranspose( 279 | filters=n_filters, 280 | kernel_size=(height, width), 281 | strides=strides, 282 | padding='valid', 283 | output_padding=0, 284 | weights=weights, 285 | use_bias=has_bias, 286 | activation=None, 287 | dilation_rate=dilation, 288 | bias_initializer='zeros', kernel_initializer='zeros', 289 | name=keras_name 290 | ) 291 | 292 | if 'output_shape' in params and 'pads' not in params: 293 | logger.debug('!!!!! Paddings will be calculated automatically !!!!!') 294 | pads = [strides[0] * (int(input_0.shape[2]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][0], 295 | strides[1] * (int(input_0.shape[3]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][1]] 296 | 297 | layers[node_name] = input_0 = conv(input_0) 298 | 299 | # Magic ad-hoc. 300 | # See the Keras issue: https://github.com/keras-team/keras/issues/6777 301 | # input_0.set_shape(input_0.shape) 302 | 303 | if 'output_padding' in params and (params['output_padding'][0] > 0 or params['output_padding'][1] > 0): 304 | raise AttributeError('Cannot convert ConvTranspose2d with output_padding != 0') 305 | 306 | if pads[0] > 0: 307 | logger.debug('Add cropping layer for output padding') 308 | assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) 309 | 310 | crop = keras.layers.Cropping2D( 311 | pads[:2], 312 | name=keras_name + '_crop' 313 | ) 314 | layers[node_name] = crop(input_0) 315 | else: 316 | raise AttributeError('Layer is not supported for now') 317 | -------------------------------------------------------------------------------- /onnx2keras/elementwise_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import is_numpy, ensure_tf_type 4 | 5 | 6 | def convert_elementwise_div(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert element-wise division 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | logger = logging.getLogger('onnx2keras.div') 18 | 19 | if len(node.input) != 2: 20 | raise AttributeError('Number of inputs is not equal 2 for element-wise layer') 21 | 22 | if is_numpy(layers[node.input[0]]) and is_numpy(layers[node.input[1]]): 23 | logger.debug('Divide numpy arrays.') 24 | layers[node_name] = layers[node.input[0]] / layers[node.input[1]] 25 | else: 26 | logger.debug('Convert inputs to Keras/TF layers if needed.') 27 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) 28 | input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) 29 | 30 | def target_layer(x): 31 | import tensorflow as tf 32 | layer = tf.divide( 33 | x[0], 34 | x[1] 35 | ) 36 | return layer 37 | 38 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 39 | layers[node_name] = lambda_layer([input_0, input_1]) 40 | lambda_func[keras_name] = target_layer 41 | 42 | 43 | def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_name): 44 | """ 45 | Convert element-wise add. 46 | :param node: current operation node 47 | :param params: operation attributes 48 | :param layers: available keras layers 49 | :param lambda_func: function for keras Lambda layer 50 | :param node_name: internal converter name 51 | :param keras_name: resulting layer name 52 | :return: None 53 | """ 54 | logger = logging.getLogger('onnx2keras.add') 55 | 56 | if len(node.input) != 2: 57 | raise AttributeError('Number of inputs is not equal 2 for element-wise layer') 58 | 59 | logger.debug('Convert inputs to Keras/TF layers if needed.') 60 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) 61 | input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) 62 | 63 | try: 64 | if not is_numpy(layers[node.input[0]]) and not is_numpy(layers[node.input[1]]): 65 | add = keras.layers.Add(name=keras_name) 66 | layers[node_name] = add([input_0, input_1]) 67 | else: 68 | raise ValueError('Operands are different.') 69 | 70 | except (IndexError, ValueError): 71 | logger.warning('Failed to use keras.layers.Add. Fallback to TF lambda.') 72 | 73 | def target_layer(x): 74 | import tensorflow as tf 75 | print(x[0], x[1]) 76 | layer = tf.add( 77 | x[0], 78 | x[1] 79 | ) 80 | return layer 81 | 82 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 83 | layers[node_name] = lambda_layer([input_0, input_1]) 84 | lambda_func[keras_name] = target_layer 85 | 86 | 87 | def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): 88 | """ 89 | Convert element-wise mul. 90 | :param node: current operation node 91 | :param params: operation attributes 92 | :param layers: available keras layers 93 | :param lambda_func: function for keras Lambda layer 94 | :param node_name: internal converter name 95 | :param keras_name: resulting layer name 96 | :return: None 97 | """ 98 | logger = logging.getLogger('onnx2keras.mul') 99 | 100 | if len(node.input) != 2: 101 | raise AttributeError('Number of inputs is not equal 2 for element-wise layer') 102 | 103 | logger.debug('Convert inputs to Keras/TF layers if needed.') 104 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) 105 | input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) 106 | 107 | try: 108 | mul = keras.layers.Multiply(name=keras_name) 109 | layers[node_name] = mul([input_0, input_1]) 110 | except (IndexError, ValueError): 111 | logger.warning('Failed to use keras.layers.Multiply. Fallback to TF lambda.') 112 | 113 | # Doesn't work with constants 114 | # IndexError: tuple index out of range 115 | 116 | def target_layer(x): 117 | import tensorflow as tf 118 | layer = tf.multiply( 119 | x[0], 120 | x[1] 121 | ) 122 | return layer 123 | 124 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 125 | layers[node_name] = lambda_layer([input_0, input_1]) 126 | lambda_func[keras_name] = target_layer 127 | 128 | 129 | def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): 130 | """ 131 | Convert element-wise sub. 132 | :param node: current operation node 133 | :param params: operation attributes 134 | :param layers: available keras layers 135 | :param lambda_func: function for keras Lambda layer 136 | :param node_name: internal converter name 137 | :param keras_name: resulting layer name 138 | :return: None 139 | """ 140 | logger = logging.getLogger('onnx2keras.sub') 141 | 142 | if len(node.input) != 2: 143 | raise AttributeError('Number of inputs is not equal 2 for element-wise layer') 144 | 145 | logger.debug('Convert inputs to Keras/TF layers if needed.') 146 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) 147 | input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) 148 | 149 | try: 150 | sub = keras.layers.Subtract(name=keras_name) 151 | layers[node_name] = sub([input_0, input_1]) 152 | except (IndexError, ValueError): 153 | logger.warning('Failed to use keras.layers.Subtract. Fallback to TF lambda.') 154 | 155 | # Doesn't work with constants 156 | # IndexError: tuple index out of range 157 | 158 | def target_layer(x): 159 | import tensorflow as tf 160 | layer = tf.subtract( 161 | x[0], 162 | x[1] 163 | ) 164 | return layer 165 | 166 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 167 | layers[node_name] = lambda_layer([input_0, input_1]) 168 | lambda_func[keras_name] = target_layer 169 | 170 | 171 | def convert_min(node, params, layers, lambda_func, node_name, keras_name): 172 | """ 173 | Convert Min layer 174 | :param node: current operation node 175 | :param params: operation attributes 176 | :param layers: available keras layers 177 | :param lambda_func: function for keras Lambda layer 178 | :param node_name: internal converter name 179 | :param keras_name: resulting layer name 180 | :return: None 181 | """ 182 | if len(node.input) < 2: 183 | assert AttributeError('Less than 2 inputs for min layer.') 184 | 185 | inputs = list() 186 | for i, inp in enumerate(node.input): 187 | input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) 188 | inputs.append(input_) 189 | layers[node_name] = keras.layers.Minimum(name=keras_name)(inputs) 190 | 191 | 192 | def convert_max(node, params, layers, lambda_func, node_name, keras_name): 193 | """ 194 | Convert Max layer 195 | :param node: current operation node 196 | :param params: operation attributes 197 | :param layers: available keras layers 198 | :param lambda_func: function for keras Lambda layer 199 | :param node_name: internal converter name 200 | :param keras_name: resulting layer name 201 | :return: None 202 | """ 203 | if len(node.input) < 2: 204 | assert AttributeError('Less than 2 inputs for max layer.') 205 | 206 | inputs = list() 207 | for i, inp in enumerate(node.input): 208 | input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) 209 | inputs.append(input_) 210 | layers[node_name] = keras.layers.Maximum(name=keras_name)(inputs) 211 | 212 | 213 | def convert_mean(node, params, layers, lambda_func, node_name, keras_name): 214 | """ 215 | Convert Mean layer 216 | :param node: current operation node 217 | :param params: operation attributes 218 | :param layers: available keras layers 219 | :param lambda_func: function for keras Lambda layer 220 | :param node_name: internal converter name 221 | :param keras_name: resulting layer name 222 | :return: None 223 | :TODO: Test if this supports multidirectional (i.e., Numpy-style) broadcasting as required 224 | """ 225 | if len(node.input) < 2: 226 | assert AttributeError('Less than 2 inputs for mean layer.') 227 | 228 | inputs = list() 229 | for i, inp in enumerate(node.input): 230 | input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) 231 | inputs.append(input_) 232 | layers[node_name] = keras.layers.Average(name=keras_name)(inputs) 233 | -------------------------------------------------------------------------------- /onnx2keras/layers.py: -------------------------------------------------------------------------------- 1 | from .convolution_layers import convert_conv, convert_convtranspose 2 | from .activation_layers import convert_relu, convert_elu, convert_lrelu, convert_selu, \ 3 | convert_sigmoid, convert_tanh, convert_softmax, convert_prelu 4 | from .operation_layers import convert_clip, convert_exp, convert_reduce_sum, convert_reduce_mean, \ 5 | convert_log, convert_pow, convert_sqrt, convert_split, convert_cast, convert_floor, convert_identity, \ 6 | convert_argmax, convert_reduce_l2, convert_reduce_max 7 | from .elementwise_layers import convert_elementwise_div, convert_elementwise_add, convert_elementwise_mul, convert_elementwise_sub, convert_max, convert_min, convert_mean 8 | from .linear_layers import convert_gemm 9 | from .reshape_layers import convert_transpose, convert_shape, convert_gather, convert_unsqueeze, \ 10 | convert_concat, convert_reshape, convert_flatten, convert_slice, convert_squeeze, convert_expand 11 | from .constant_layers import convert_constant 12 | from .normalization_layers import convert_batchnorm, convert_instancenorm, convert_dropout, convert_lrn 13 | from .pooling_layers import convert_avgpool, convert_maxpool, convert_global_avg_pool 14 | from .padding_layers import convert_padding 15 | from .upsampling_layers import convert_upsample 16 | 17 | 18 | AVAILABLE_CONVERTERS = { 19 | 'Conv': convert_conv, 20 | 'ConvTranspose': convert_convtranspose, 21 | 'Relu': convert_relu, 22 | 'Elu': convert_elu, 23 | 'LeakyRelu': convert_lrelu, 24 | 'Sigmoid': convert_sigmoid, 25 | 'Tanh': convert_tanh, 26 | 'Selu': convert_selu, 27 | 'Clip': convert_clip, 28 | 'Exp': convert_exp, 29 | 'Log': convert_log, 30 | 'Softmax': convert_softmax, 31 | 'PRelu': convert_prelu, 32 | 'ReduceMax': convert_reduce_max, 33 | 'ReduceSum': convert_reduce_sum, 34 | 'ReduceMean': convert_reduce_mean, 35 | 'Pow': convert_pow, 36 | 'Slice': convert_slice, 37 | 'Squeeze': convert_squeeze, 38 | 'Expand': convert_expand, 39 | 'Sqrt': convert_sqrt, 40 | 'Split': convert_split, 41 | 'Cast': convert_cast, 42 | 'Floor': convert_floor, 43 | 'Identity': convert_identity, 44 | 'ArgMax': convert_argmax, 45 | 'ReduceL2': convert_reduce_l2, 46 | 'Max': convert_max, 47 | 'Min': convert_min, 48 | 'Mean': convert_mean, 49 | 'Div': convert_elementwise_div, 50 | 'Add': convert_elementwise_add, 51 | 'Sum': convert_elementwise_add, 52 | 'Mul': convert_elementwise_mul, 53 | 'Sub': convert_elementwise_sub, 54 | 'Gemm': convert_gemm, 55 | 'MatMul': convert_gemm, 56 | 'Transpose': convert_transpose, 57 | 'Constant': convert_constant, 58 | 'BatchNormalization': convert_batchnorm, 59 | 'InstanceNormalization': convert_instancenorm, 60 | 'Dropout': convert_dropout, 61 | 'LRN': convert_lrn, 62 | 'MaxPool': convert_maxpool, 63 | 'AveragePool': convert_avgpool, 64 | 'GlobalAveragePool': convert_global_avg_pool, 65 | 'Shape': convert_shape, 66 | 'Gather': convert_gather, 67 | 'Unsqueeze': convert_unsqueeze, 68 | 'Concat': convert_concat, 69 | 'Reshape': convert_reshape, 70 | 'Pad': convert_padding, 71 | 'Flatten': convert_flatten, 72 | 'Upsample': convert_upsample, 73 | } 74 | -------------------------------------------------------------------------------- /onnx2keras/linear_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import is_numpy 4 | 5 | def convert_gemm(node, params, layers, lambda_func, node_name, keras_name): 6 | """ 7 | Convert Linear / GEMM layer 8 | :param node: current operation node 9 | :param params: operation attributes 10 | :param layers: available keras layers 11 | :param lambda_func: function for keras Lambda layer 12 | :param node_name: internal converter name 13 | :param keras_name: resulting layer name 14 | :return: None 15 | """ 16 | logger = logging.getLogger('onnx2keras.gemm') 17 | 18 | # Check if Bias available 19 | if len(node.input) == 3: 20 | has_bias = True 21 | keras_weights = [layers[node.input[1]], layers[node.input[2]]] 22 | logger.debug('Convert GEMM with bias.') 23 | elif len(node.input) == 2: 24 | has_bias = False 25 | keras_weights = [layers[node.input[1]]] 26 | logger.debug('Convert GEMM without bias.') 27 | else: 28 | raise AttributeError('More than 3 or less than 2 inputs') 29 | 30 | # Linear can have additional flag to transpose weights 31 | if 'transB' in params and params['transB'] == 1: 32 | logger.debug('Transposing W matrix.') 33 | keras_weights[0] = keras_weights[0].transpose() 34 | 35 | # Estimate input/output neurons 36 | input_channels, output_channels = keras_weights[0].shape 37 | logger.debug('Input units %s, output units %s.', input_channels, output_channels) 38 | 39 | if is_numpy(keras_weights[0]): 40 | dense = keras.layers.Dense( 41 | output_channels, 42 | weights=keras_weights, name=keras_name, bias_initializer='zeros', kernel_initializer='zeros', use_bias=has_bias 43 | ) 44 | 45 | # The first input - always X 46 | try: 47 | layers[node_name] = dense(layers[node.input[0]]) 48 | except ValueError: 49 | reshape = keras.layers.Reshape([input_channels], name=keras_name + '_reshape') 50 | reshaped_x = reshape(layers[node.input[0]]) 51 | layers[node_name] = dense(reshaped_x) 52 | 53 | else: 54 | layers[node_name] = keras.layers.Multiply()([layers[node.input[0]], layers[node.input[1]]]) 55 | -------------------------------------------------------------------------------- /onnx2keras/normalization_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow as tf 3 | import tensorflow_addons as tfa 4 | import logging 5 | from .utils import ensure_tf_type, ensure_numpy_type 6 | 7 | 8 | def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name): 9 | """ 10 | Convert BatchNorm2d layer 11 | :param node: current operation node 12 | :param params: operation attributes 13 | :param layers: available keras layers 14 | :param lambda_func: function for keras Lambda layer 15 | :param node_name: internal converter name 16 | :param keras_name: resulting layer name 17 | :return: None 18 | """ 19 | logger = logging.getLogger('onnx2keras.batchnorm2d') 20 | 21 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 22 | 23 | if len(node.input) == 5: 24 | weights = [ 25 | ensure_numpy_type(layers[node.input[1]]), 26 | ensure_numpy_type(layers[node.input[2]]), 27 | ensure_numpy_type(layers[node.input[3]]), 28 | ensure_numpy_type(layers[node.input[4]]) 29 | ] 30 | elif len(node.input) == 3: 31 | weights = [ 32 | ensure_numpy_type(layers[node.input[1]]), 33 | ensure_numpy_type(layers[node.input[2]]) 34 | ] 35 | else: 36 | raise AttributeError('Unknown arguments for batch norm') 37 | 38 | eps = params['epsilon'] if 'epsilon' in params else 1e-05 # default epsilon 39 | momentum = params['momentum'] if 'momentum' in params else 0.9 # default momentum 40 | 41 | if len(weights) == 2: 42 | logger.debug('Batch normalization without running averages') 43 | bn = keras.layers.BatchNormalization( 44 | axis=1, momentum=momentum, epsilon=eps, 45 | center=False, scale=False, 46 | weights=weights, 47 | name=keras_name 48 | ) 49 | else: 50 | bn = keras.layers.BatchNormalization( 51 | axis=1, momentum=momentum, epsilon=eps, 52 | weights=weights, 53 | name=keras_name 54 | ) 55 | 56 | layers[node_name] = bn(input_0) 57 | 58 | 59 | def convert_instancenorm(node, params, layers, lambda_func, node_name, keras_name): 60 | """ 61 | Convert InstanceNorm2d layer 62 | :param node: current operation node 63 | :param params: operation attributes 64 | :param layers: available keras layers 65 | :param lambda_func: function for keras Lambda layer 66 | :param node_name: internal converter name 67 | :param keras_name: resulting layer name 68 | :return: None 69 | """ 70 | logger = logging.getLogger('onnx2keras.instancenorm2d') 71 | 72 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 73 | 74 | if len(node.input) == 3: 75 | gamma = ensure_numpy_type(layers[node.input[1]]) 76 | beta = ensure_numpy_type(layers[node.input[2]]) 77 | else: 78 | raise AttributeError('Unknown arguments for instance norm') 79 | 80 | epsilon = params['epsilon'] 81 | 82 | instance_norm = tfa.layers.InstanceNormalization( 83 | axis=1, 84 | epsilon=epsilon, 85 | beta_initializer=tf.constant_initializer(beta), 86 | gamma_initializer=tf.constant_initializer(gamma), 87 | trainable=False 88 | ) 89 | layers[node_name] = instance_norm(input_0) 90 | 91 | 92 | def convert_dropout(node, params, layers, lambda_func, node_name, keras_name): 93 | """ 94 | Convert Dropout layer 95 | :param node: current operation node 96 | :param params: operation attributes 97 | :param layers: available keras layers 98 | :param lambda_func: function for keras Lambda layer 99 | :param node_name: internal converter name 100 | :param keras_name: resulting layer name 101 | :return: None 102 | """ 103 | logger = logging.getLogger('onnx2keras.dropout') 104 | 105 | # In ONNX Dropout returns dropout mask as well. 106 | if isinstance(keras_name, list) and len(keras_name) > 1: 107 | keras_name = keras_name[0] 108 | 109 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 110 | 111 | ratio = params['ratio'] if 'ratio' in params else 0.0 112 | lambda_layer = keras.layers.Dropout(ratio, name=keras_name) 113 | layers[node_name] = lambda_layer(input_0) 114 | 115 | 116 | def convert_lrn(node, params, layers, lambda_func, node_name, keras_name): 117 | """ 118 | Convert LRN layer 119 | :param node: current operation node 120 | :param params: operation attributes 121 | :param layers: available keras layers 122 | :param lambda_func: function for keras Lambda layer 123 | :param node_name: internal converter name 124 | :param keras_name: resulting layer name 125 | :return: None 126 | """ 127 | logger = logging.getLogger('onnx2keras.LRN') 128 | logger.debug('LRN can\'t be tested with PyTorch exporter, so the support is experimental.') 129 | 130 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 131 | 132 | def target_layer(x, depth_radius=params['size'], bias=params['bias'], alpha=params['alpha'], beta=params['beta']): 133 | import tensorflow as tf 134 | from keras import backend as K 135 | data_format = 'NCHW' if K.image_data_format() == 'channels_first' else 'NHWC' 136 | 137 | if data_format == 'NCHW': 138 | x = tf.transpose(x, [0, 2, 3, 1]) 139 | 140 | layer = tf.nn.local_response_normalization( 141 | x, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta 142 | ) 143 | 144 | if data_format == 'NCHW': 145 | layer = tf.transpose(x, [0, 3, 1, 2]) 146 | 147 | return layer 148 | 149 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 150 | layers[node_name] = lambda_layer(input_0) 151 | lambda_func[keras_name] = target_layer 152 | -------------------------------------------------------------------------------- /onnx2keras/operation_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | from tensorflow.keras import backend as K 3 | import logging 4 | from .utils import is_numpy, ensure_tf_type, ensure_numpy_type 5 | import numpy as np 6 | 7 | # Handle python 2.7 import error 8 | try: 9 | from collections.abc import Iterable 10 | except ImportError: 11 | from collections import Iterable 12 | 13 | 14 | def convert_clip(node, params, layers, lambda_func, node_name, keras_name): 15 | """ 16 | Convert clip layer 17 | :param node: current operation node 18 | :param params: operation attributes 19 | :param layers: available keras layers 20 | :param lambda_func: function for keras Lambda layer 21 | :param node_name: internal converter name 22 | :param keras_name: resulting layer name 23 | :return: None 24 | """ 25 | logger = logging.getLogger('onnx2keras.clip') 26 | if len(node.input) != 1: 27 | assert AttributeError('More than 1 input for clip layer.') 28 | 29 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 30 | 31 | if params['min'] == 0: 32 | logger.debug("Using ReLU({0}) instead of clip".format(params['max'])) 33 | layer = keras.layers.ReLU(max_value=params['max'], name=keras_name) 34 | else: 35 | def target_layer(x, vmin=params['min'], vmax=params['max']): 36 | import tensorflow as tf 37 | return tf.clip_by_value(x, vmin, vmax) 38 | layer = keras.layers.Lambda(target_layer, name=keras_name) 39 | lambda_func[keras_name] = target_layer 40 | 41 | layers[node_name] = layer(input_0) 42 | 43 | 44 | def convert_log(node, params, layers, lambda_func, node_name, keras_name): 45 | """ 46 | Convert Log layer 47 | :param node: current operation node 48 | :param params: operation attributes 49 | :param layers: available keras layers 50 | :param lambda_func: function for keras Lambda layer 51 | :param node_name: internal converter name 52 | :param keras_name: resulting layer name 53 | :return: None 54 | """ 55 | if len(node.input) != 1: 56 | assert AttributeError('More than 1 input for log layer.') 57 | 58 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 59 | 60 | def target_layer(x): 61 | import tensorflow.keras.backend as K 62 | return K.log(x) 63 | 64 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 65 | layers[node_name] = lambda_layer(input_0) 66 | lambda_func[keras_name] = target_layer 67 | 68 | 69 | def convert_exp(node, params, layers, lambda_func, node_name, keras_name): 70 | """ 71 | Convert Exp layer 72 | :param node: current operation node 73 | :param params: operation attributes 74 | :param layers: available keras layers 75 | :param lambda_func: function for keras Lambda layer 76 | :param node_name: resulting layer name 77 | :return: None 78 | """ 79 | if len(node.input) != 1: 80 | assert AttributeError('More than 1 input for log layer.') 81 | 82 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 83 | 84 | def target_layer(x): 85 | import tensorflow.keras.backend as K 86 | return K.exp(x) 87 | 88 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 89 | layers[node_name] = lambda_layer(input_0) 90 | lambda_func[keras_name] = target_layer 91 | 92 | 93 | def convert_reduce_sum(node, params, layers, lambda_func, node_name, keras_name): 94 | """ 95 | Convert reduce sum. 96 | :param node: current operation node 97 | :param params: operation attributes 98 | :param layers: available keras layers 99 | :param lambda_func: function for keras Lambda layer 100 | :param node_name: internal converter name 101 | :param keras_name: resulting layer name 102 | :return: None 103 | """ 104 | if len(node.input) != 1: 105 | assert AttributeError('More than 1 input for reduce sum layer.') 106 | 107 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 108 | 109 | axis = params['axes'] 110 | 111 | def target_layer(x, axis=axis): 112 | import tensorflow.keras.backend as K 113 | return K.sum(x, keepdims=True, axis=axis) 114 | 115 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 116 | layers[node_name] = lambda_layer(input_0) 117 | layers[node_name].set_shape(layers[node_name].shape) 118 | lambda_func[keras_name] = target_layer 119 | 120 | 121 | def convert_reduce_mean(node, params, layers, lambda_func, node_name, keras_name): 122 | """ 123 | Convert reduce mean. 124 | :param node: current operation node 125 | :param params: operation attributes 126 | :param layers: available keras layers 127 | :param lambda_func: function for keras Lambda layer 128 | :param node_name: internal converter name 129 | :param keras_name: resulting layer name 130 | :return: None 131 | """ 132 | if len(node.input) != 1: 133 | assert AttributeError('More than 1 input for reduce mean layer.') 134 | 135 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 136 | 137 | def target_layer(x, axis=params['axes'], keepdims=params['keepdims']): 138 | import tensorflow.keras.backend as K 139 | return K.mean(x, keepdims=(keepdims == 1), axis=axis) 140 | 141 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 142 | layers[node_name] = lambda_layer(input_0) 143 | layers[node_name].set_shape(layers[node_name].shape) 144 | lambda_func[keras_name] = target_layer 145 | 146 | 147 | def convert_reduce_max(node, params, layers, lambda_func, node_name, keras_name): 148 | """ 149 | Convert reduce max. 150 | :param node: current operation node 151 | :param params: operation attributes 152 | :param layers: available keras layers 153 | :param lambda_func: function for keras Lambda layer 154 | :param node_name: internal converter name 155 | :param keras_name: resulting layer name 156 | :return: None 157 | """ 158 | if len(node.input) != 1: 159 | assert AttributeError('More than 1 input for reduce max layer.') 160 | 161 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 162 | 163 | def target_layer(x, axis=params['axes'], keepdims=params['keepdims']): 164 | import tensorflow.keras.backend as K 165 | return K.max(x, keepdims=(keepdims == 1), axis=axis) 166 | 167 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 168 | layers[node_name] = lambda_layer(input_0) 169 | layers[node_name].set_shape(layers[node_name].shape) 170 | lambda_func[keras_name] = target_layer 171 | 172 | 173 | def convert_pow(node, params, layers, lambda_func, node_name, keras_name): 174 | """ 175 | Convert Pow layer 176 | :param node: current operation node 177 | :param params: operation attributes 178 | :param layers: available keras layers 179 | :param lambda_func: function for keras Lambda layer 180 | :param node_name: internal converter name 181 | :param keras_name: resulting layer name 182 | :return: None 183 | """ 184 | if len(node.input) != 2: 185 | assert AttributeError('More than 2 inputs for pow layer.') 186 | 187 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 188 | power = ensure_numpy_type(layers[node.input[1]]) 189 | 190 | def target_layer(x, a=power): 191 | import tensorflow.keras.backend as K 192 | return K.pow(x, a) 193 | 194 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 195 | layers[node_name] = lambda_layer(input_0) 196 | lambda_func[keras_name] = target_layer 197 | 198 | 199 | def convert_sqrt(node, params, layers, lambda_func, node_name, keras_name): 200 | """ 201 | Convert Sqrt layer 202 | :param node: current operation node 203 | :param params: operation attributes 204 | :param layers: available keras layers 205 | :param lambda_func: function for keras Lambda layer 206 | :param node_name: internal converter name 207 | :param keras_name: resulting layer name 208 | :return: None 209 | """ 210 | if len(node.input) != 1: 211 | assert AttributeError('More than 1 input for sqrt layer.') 212 | 213 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 214 | 215 | def target_layer(x): 216 | import tensorflow.keras.backend as K 217 | return K.sqrt(x) 218 | 219 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 220 | layers[node_name] = lambda_layer(input_0) 221 | lambda_func[keras_name] = target_layer 222 | 223 | 224 | def convert_split(node, params, layers, lambda_func, node_name, keras_names): 225 | """ 226 | Convert Split layer 227 | :param node: current operation node 228 | :param params: operation attributes 229 | :param layers: available keras layers 230 | :param lambda_func: function for keras Lambda layer 231 | :param node_name: internal converter name 232 | :param keras_name: resulting layer name 233 | :return: None 234 | """ 235 | if len(node.input) != 1: 236 | assert AttributeError('More than 1 input for split layer.') 237 | 238 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_names[0]) 239 | splits = params["split"] 240 | axis = params.get("axis", 0) 241 | if not isinstance(splits, Iterable): 242 | # This might not work if `split` is a tensor. 243 | chunk_size = K.int_size(input_0)[axis] // splits 244 | splits = (chunk_size,) * splits 245 | 246 | cur = 0 247 | for i, split in enumerate(splits): 248 | node_name = params['_outputs'][i] 249 | 250 | def target_layer(x, axis=axis, start_i=cur, end_i=cur+split): 251 | slices = [slice(None, None)] * len(K.int_shape(x)) 252 | slices[axis] = slice(start_i, end_i) 253 | return x[tuple(slices)] 254 | 255 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_names[i]) 256 | layers[node_name] = lambda_layer(input_0) 257 | lambda_func[keras_names[i]] = target_layer 258 | cur += split 259 | 260 | 261 | def convert_cast(node, params, layers, lambda_func, node_name, keras_name): 262 | """ 263 | Convert Cast layer 264 | :param node: current operation node 265 | :param params: operation attributes 266 | :param layers: available keras layers 267 | :param lambda_func: function for keras Lambda layer 268 | :param node_name: internal converter name 269 | :param keras_name: resulting layer name 270 | :return: None 271 | """ 272 | logger = logging.getLogger('onnx2keras.cast') 273 | 274 | if len(node.input) != 1: 275 | assert AttributeError('More than 1 input for cast layer.') 276 | 277 | if is_numpy(layers[node.input[0]]): 278 | logger.debug('Cast numpy array') 279 | 280 | cast_map = { 281 | 1: np.float32, 282 | 2: np.uint8, 283 | 3: np.int8, 284 | 5: np.int16, 285 | 6: np.int32, 286 | 7: np.int64, 287 | 9: np.bool, 288 | 10: np.float16, 289 | 11: np.double, 290 | } 291 | 292 | layers[node_name] = cast_map[params['to']](layers[node.input[0]]) 293 | else: 294 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 295 | 296 | def target_layer(x, dtype=params['to']): 297 | import tensorflow as tf 298 | cast_map = { 299 | 1: tf.float32, 300 | 2: tf.uint8, 301 | 3: tf.int8, 302 | 5: tf.int16, 303 | 6: tf.int32, 304 | 7: tf.int64, 305 | 9: tf.bool, 306 | 10: tf.float16, 307 | 11: tf.double, 308 | } 309 | return tf.cast(x, cast_map[dtype]) 310 | 311 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 312 | layers[node_name] = lambda_layer(input_0) 313 | lambda_func[keras_name] = target_layer 314 | 315 | 316 | def convert_floor(node, params, layers, lambda_func, node_name, keras_name): 317 | """ 318 | Convert Floor layer 319 | :param node: current operation node 320 | :param params: operation attributes 321 | :param layers: available keras layers 322 | :param lambda_func: function for keras Lambda layer 323 | :param node_name: internal converter name 324 | :param keras_name: resulting layer name 325 | :return: None 326 | """ 327 | if len(node.input) != 1: 328 | assert AttributeError('More than 1 input for floor layer.') 329 | 330 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 331 | 332 | def target_layer(x): 333 | # Floor is absent in keras.backend 334 | import tensorflow as tf 335 | return tf.floor(x) 336 | 337 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 338 | layers[node_name] = lambda_layer(input_0) 339 | lambda_func[keras_name] = target_layer 340 | 341 | 342 | def convert_identity(node, params, layers, lambda_func, node_name, keras_name): 343 | """ 344 | Convert Identity layer 345 | :param node: current operation node 346 | :param params: operation attributes 347 | :param layers: available keras layers 348 | :param lambda_func: function for keras Lambda layer 349 | :param node_name: internal converter name 350 | :param keras_name: resulting layer name 351 | :return: None 352 | """ 353 | if len(node.input) != 1: 354 | assert AttributeError('More than 1 input for itentity layer.') 355 | 356 | layers[node_name] = layers[node.input[0]] 357 | 358 | 359 | def convert_argmax(node, params, layers, lambda_func, node_name, keras_name): 360 | """ 361 | Convert ArgMax layer 362 | :param node: current operation node 363 | :param params: operation attributes 364 | :param layers: available keras layers 365 | :param lambda_func: function for keras Lambda layer 366 | :param node_name: internal converter name 367 | :param keras_name: resulting layer name 368 | :return: None 369 | """ 370 | if len(node.input) != 1: 371 | assert AttributeError('More than 1 input for argmax layer.') 372 | 373 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 374 | axis = params.get("axis", -1) 375 | 376 | def target_layer(x, axis=axis): 377 | import tensorflow as tf 378 | return tf.argmax(x, axis=axis) 379 | 380 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 381 | layers[node_name] = lambda_layer(input_0) 382 | lambda_func[keras_name] = target_layer 383 | 384 | 385 | def convert_reduce_l2(node, params, layers, lambda_func, node_name, keras_name): 386 | """ 387 | Convert ReduceL2 layer 388 | :param node: current operation node 389 | :param params: operation attributes 390 | :param layers: available keras layers 391 | :param lambda_func: function for keras Lambda layer 392 | :param node_name: internal converter name 393 | :param keras_name: resulting layer name 394 | :return: None 395 | """ 396 | if len(node.input) != 1: 397 | assert AttributeError('More than 1 input for reduce_l2 layer.') 398 | 399 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 400 | axis = params.get("axes", [-1]) 401 | keepdims = params.get("keepdims", 0) 402 | 403 | def target_layer(x, axis=axis, keepdims=keepdims): 404 | import tensorflow as tf 405 | return tf.norm(x, axis=axis, keepdims=keepdims == 1) 406 | 407 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 408 | layers[node_name] = lambda_layer(input_0) 409 | lambda_func[keras_name] = target_layer 410 | -------------------------------------------------------------------------------- /onnx2keras/padding_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import ensure_tf_type 4 | 5 | 6 | def convert_padding(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert Constant layer 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | # It's binary by-default 18 | logger = logging.getLogger("onnx2keras.padding") 19 | params['mode'] = params['mode'].decode('ascii') 20 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 21 | 22 | if 'pads' in params: 23 | pads = params['pads'] 24 | else: 25 | pads = layers[node.input[1]] 26 | 27 | print(pads) 28 | 29 | if params['mode'] == 'constant': 30 | 31 | if 'value' in params and params['value'] != 0.0: 32 | raise AssertionError('Cannot convert non-zero padding') 33 | 34 | # Magic ordering 35 | if len(pads) == 8: 36 | padding_layer = keras.layers.ZeroPadding2D( 37 | padding=((pads[2], pads[6]), (pads[3], pads[7])), 38 | name=keras_name 39 | ) 40 | else: 41 | logger.warning("Caution - no test yet") 42 | padding_layer = keras.layers.ZeroPadding3D( 43 | padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])), 44 | name=keras_name 45 | ) 46 | layers[node_name] = padding_layer(input_0) 47 | elif params['mode'] == 'reflect': 48 | 49 | def target_layer(x, pads=pads): 50 | import tensorflow as tf 51 | if len(pads) == 8: 52 | layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') 53 | else: 54 | logger.warning("Caution - no test yet") 55 | layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') 56 | return layer 57 | 58 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 59 | layers[node_name] = lambda_layer(input_0) 60 | lambda_func[keras_name] = target_layer 61 | elif params['mode'] == 'edge': 62 | 63 | def target_layer(x, pads=pads): 64 | import tensorflow as tf 65 | if len(pads) == 8: # TODO not tested yet 66 | layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'SYMMETRIC') 67 | else: 68 | logger.warning("Caution - no test yet") 69 | layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC') 70 | return layer 71 | 72 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 73 | layers[node_name] = lambda_layer(input_0) 74 | lambda_func[keras_name] = target_layer 75 | 76 | else: 77 | raise AttributeError('Unknown padding') 78 | -------------------------------------------------------------------------------- /onnx2keras/pooling_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import logging 3 | from .utils import ensure_tf_type 4 | 5 | 6 | def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert MaxPooling layer 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | logger = logging.getLogger('onnx2keras.maxpool') 18 | 19 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 20 | 21 | kernel_shape = params['kernel_shape'] 22 | stride_shape = params['strides'] 23 | 24 | pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] 25 | pad = 'valid' 26 | 27 | if all([shape % 2 == 1 for shape in kernel_shape]) and \ 28 | all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ 29 | all([shape == 1 for shape in stride_shape]): 30 | pad = 'same' 31 | logger.debug('Use `same` padding parameters.') 32 | else: 33 | logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') 34 | padding_name = keras_name + '_pad' 35 | if len(kernel_shape) == 2: 36 | padding = None 37 | 38 | if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): 39 | padding = (pads[0], pads[1]) 40 | elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): 41 | padding = ((pads[0], pads[2]), (pads[1], pads[3])) 42 | 43 | if padding is not None: 44 | padding_layer = keras.layers.ZeroPadding2D( 45 | padding=padding, 46 | name=padding_name 47 | ) 48 | layers[padding_name] = input_0 = padding_layer(input_0) 49 | else: # 3D padding 50 | padding_layer = keras.layers.ZeroPadding3D( 51 | padding=pads[:len(stride_shape)], 52 | name=padding_name 53 | ) 54 | layers[padding_name] = input_0 = padding_layer(input_0) 55 | if len(kernel_shape) == 2: 56 | pooling = keras.layers.MaxPooling2D( 57 | pool_size=kernel_shape, 58 | strides=stride_shape, 59 | padding=pad, 60 | name=keras_name, 61 | data_format='channels_first' 62 | ) 63 | else: 64 | pooling = keras.layers.MaxPooling3D( 65 | pool_size=kernel_shape, 66 | strides=stride_shape, 67 | padding=pad, 68 | name=keras_name, 69 | data_format='channels_first' 70 | ) 71 | 72 | layers[node_name] = pooling(input_0) 73 | 74 | 75 | def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): 76 | """ 77 | Convert AvgPooling layer 78 | :param node: current operation node 79 | :param params: operation attributes 80 | :param layers: available keras layers 81 | :param lambda_func: function for keras Lambda layer 82 | :param node_name: internal converter name 83 | :param keras_name: resulting layer name 84 | :return: None 85 | """ 86 | logger = logging.getLogger('onnx2keras.avgpool') 87 | 88 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 89 | 90 | kernel_shape = params['kernel_shape'] 91 | stride_shape = params['strides'] 92 | 93 | pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] 94 | pad = 'valid' 95 | 96 | if all([shape % 2 == 1 for shape in kernel_shape]) and \ 97 | all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ 98 | all([shape == 1 for shape in stride_shape]): 99 | pad = 'same' 100 | logger.debug('Use `same` padding parameters.') 101 | else: 102 | logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') 103 | padding_name = keras_name + '_pad' 104 | if len(kernel_shape) == 2: 105 | padding_layer = keras.layers.ZeroPadding2D( 106 | padding=pads[:len(stride_shape)], 107 | name=padding_name 108 | ) 109 | else: # 3D padding 110 | padding_layer = keras.layers.ZeroPadding3D( 111 | padding=pads[:len(stride_shape)], 112 | name=padding_name 113 | ) 114 | layers[padding_name] = input_0 = padding_layer(input_0) 115 | if len(kernel_shape) == 2: 116 | pooling = keras.layers.AveragePooling2D( 117 | pool_size=kernel_shape, 118 | strides=stride_shape, 119 | padding=pad, 120 | name=keras_name, 121 | data_format='channels_first' 122 | ) 123 | else: 124 | pooling = keras.layers.AveragePooling3D( 125 | pool_size=kernel_shape, 126 | strides=stride_shape, 127 | padding=pad, 128 | name=keras_name, 129 | data_format='channels_first' 130 | ) 131 | layers[node_name] = pooling(input_0) 132 | 133 | 134 | def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_name): 135 | """ 136 | Convert GlobalAvgPool layer 137 | :param node: current operation node 138 | :param params: operation attributes 139 | :param layers: available keras layers 140 | :param lambda_func: function for keras Lambda layer 141 | :param node_name: internal converter name 142 | :param keras_name: resulting layer name 143 | :return: None 144 | """ 145 | logger = logging.getLogger('onnx2keras.global_avg_pool') 146 | 147 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 148 | 149 | global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', name=keras_name) 150 | input_0 = global_pool(input_0) 151 | 152 | def target_layer(x): 153 | from tensorflow import keras 154 | return keras.backend.expand_dims(x) 155 | 156 | logger.debug('Now expand dimensions twice.') 157 | lambda_layer1 = keras.layers.Lambda(target_layer, name=keras_name + '_EXPAND1') 158 | lambda_layer2 = keras.layers.Lambda(target_layer, name=keras_name + '_EXPAND2') 159 | input_0 = lambda_layer1(input_0) # double expand dims 160 | layers[node_name] = lambda_layer2(input_0) 161 | lambda_func[keras_name + '_EXPAND1'] = target_layer 162 | lambda_func[keras_name + '_EXPAND2'] = target_layer 163 | -------------------------------------------------------------------------------- /onnx2keras/reshape_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import numpy as np 3 | import logging 4 | from .utils import is_numpy, ensure_tf_type, ensure_numpy_type 5 | 6 | 7 | def convert_transpose(node, params, layers, lambda_func, node_name, keras_name): 8 | """ 9 | Convert transpose. 10 | :param node: current operation node 11 | :param params: operation attributes 12 | :param layers: available keras layers 13 | :param lambda_func: function for keras Lambda layer 14 | :param node_name: internal converter name 15 | :param keras_name: resulting layer name 16 | :return: None 17 | """ 18 | logger = logging.getLogger('onnx2keras.transpose') 19 | input_name = node.input[0] 20 | 21 | if params['perm'][0] != 0: 22 | logger.warning('Can\'t permute batch dimension. Result may be wrong.') 23 | if is_numpy(layers[input_name]): 24 | logger.warning('Transposing numpy array.') 25 | layers[node_name] = np.transpose(layers[input_name], axes=params['perm']) 26 | else: 27 | raise NotImplementedError('Can\'t modify this type of data') 28 | else: 29 | permute = keras.layers.Permute(params['perm'][1:], name=keras_name) 30 | layers[node_name] = permute(layers[input_name]) 31 | 32 | 33 | def convert_shape(node, params, layers, lambda_func, node_name, keras_name): 34 | """ 35 | Convert shape. 36 | :param node: current operation node 37 | :param params: operation attributes 38 | :param layers: available keras layers 39 | :param lambda_func: function for keras Lambda layer 40 | :param node_name: internal converter name 41 | :param keras_name: resulting layer name 42 | :return: None 43 | """ 44 | logger = logging.getLogger('onnx2keras.shape') 45 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 46 | 47 | logger.debug('Actual shape:') 48 | logger.debug(np.array(input_0.shape)) 49 | 50 | shapes = [] 51 | for i in input_0.shape: 52 | if i is not None: 53 | shapes.append(i) 54 | else: 55 | shapes.append(None) 56 | 57 | layers[node_name] = np.array(shapes) 58 | 59 | 60 | def convert_gather(node, params, layers, lambda_func, node_name, keras_name): 61 | """ 62 | Convert gather. 63 | :param node: current operation node 64 | :param params: operation attributes 65 | :param layers: available keras layers 66 | :param lambda_func: function for keras Lambda layer 67 | :param node_name: internal converter name 68 | :param keras_name: resulting layer name 69 | :return: None 70 | """ 71 | logger = logging.getLogger('onnx2keras.gather') 72 | 73 | if is_numpy(layers[node.input[0]]) and is_numpy(layers[node.input[1]]): 74 | logger.debug('Gather from numpy array') 75 | 76 | if params['axis'] == 0: 77 | layers[node_name] = np.array(layers[node.input[0]][layers[node.input[1]]]) 78 | elif params['axis'] == 1: 79 | layers[node_name] = np.array(layers[:, node.input[0]][layers[node.input[1]]]) 80 | elif params['axis'] == 2: 81 | layers[node_name] = np.array(layers[:, :, node.input[0]][layers[node.input[1]]]) 82 | elif params['axis'] == 3: 83 | layers[node_name] = np.array(layers[:, :, :, node.input[0]][layers[node.input[1]]]) 84 | else: 85 | raise AttributeError('Can\'t gather by axis more than 3.') 86 | else: 87 | raise AttributeError('Can\'t gather from tf tensor.') 88 | 89 | 90 | def convert_concat(node, params, layers, lambda_func, node_name, keras_name): 91 | """ 92 | Convert concat. 93 | :param node: current operation node 94 | :param params: operation attributes 95 | :param layers: available keras layers 96 | :param lambda_func: function for keras Lambda layer 97 | :param node_name: internal converter name 98 | :param keras_name: resulting layer name 99 | :return: None 100 | """ 101 | logger = logging.getLogger('onnx2keras.concat') 102 | 103 | layer_input = [layers[node.input[i]] for i in range(len(node.input))] 104 | 105 | if all([is_numpy(layers[node.input[i]]) for i in range(len(node.input))]): 106 | logger.debug('Concat numpy arrays.') 107 | layers[node_name] = np.concatenate(layer_input, axis=params['axis']) 108 | else: 109 | logger.debug('Concat Keras layers.') 110 | if len(layer_input) > 1: 111 | try: 112 | layers[node_name] = keras.layers.concatenate(inputs=layer_input, 113 | axis=params['axis'], 114 | name=keras_name) 115 | except: 116 | logger.warning('!!! IMPORTANT INFORMATION !!!') 117 | logger.warning('Something goes wrong with concat layers. Will use TF fallback.') 118 | logger.warning('---') 119 | 120 | def target_layer(x, axis=params['axis']): 121 | import tensorflow as tf 122 | x = tf.concat(x, axis=axis) 123 | return x 124 | 125 | lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) 126 | layers[node_name] = lambda_layer(layer_input) 127 | lambda_func["%s_CHW" % keras_name] = target_layer 128 | else: 129 | layers[node_name] = layer_input[0] 130 | 131 | 132 | def convert_reshape(node, params, layers, lambda_func, node_name, keras_name): 133 | """ 134 | Convert reshape. 135 | :param node: current operation node 136 | :param params: operation attributes 137 | :param layers: available keras layers 138 | :param lambda_func: function for keras Lambda layer 139 | :param node_name: internal converter name 140 | :param keras_name: resulting layer name 141 | :return: None 142 | """ 143 | logger = logging.getLogger('onnx2keras.reshape') 144 | 145 | input_0 = layers[node.input[0]] 146 | input_1 = layers[node.input[1]] 147 | 148 | if is_numpy(input_1): 149 | logger.debug('The second argument is numpy array.') 150 | if is_numpy(input_0): 151 | logger.debug('The first argument is numpy array. Apply np.reshape.') 152 | layers[node_name] = np.reshape(input_0, np.int32(input_1)) 153 | else: 154 | if params['change_ordering']: 155 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 156 | 157 | # Fix critical issue with NHWC 158 | if input_1[0] is None and input_1[1] == -1: 159 | logger.warning('!!! IMPORTANT INFORMATION !!!') 160 | logger.warning('The target shape if [None, -1] that means flatten.') 161 | logger.warning('But the target ordering is NHWC, so we cant simply perform flatten') 162 | logger.warning('The layer will be converted as lambda with tf.transpose') 163 | logger.warning('---') 164 | 165 | def target_layer(x): 166 | import tensorflow as tf 167 | x = tf.transpose(x, [0, 3, 1, 2]) 168 | return x 169 | 170 | lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) 171 | layers[node_name] = lambda_layer(input_0) 172 | lambda_func[keras_name] = target_layer 173 | else: 174 | layers[node_name] = input_0 175 | 176 | reshape = keras.layers.Reshape(np.int32(input_1[1:]), name=keras_name) 177 | layers[node_name] = reshape(layers[node_name]) 178 | 179 | else: 180 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 181 | logger.debug('The first argument is Keras/tf layer. Apply keras.Reshape.') 182 | logger.debug('Target shape :') 183 | logger.debug(np.int32(input_1[1:])) 184 | 185 | if len(np.int32(input_1[1:])) == 1 and np.int32(input_1[1:])[0] == -1: 186 | logger.debug('The first argument is Keras/tf layer. Apply keras.Flatten.') 187 | flatten = keras.layers.Flatten(name=keras_name) 188 | layers[node_name] = flatten(input_0) 189 | else: 190 | reshape = keras.layers.Reshape(np.int32(input_1[1:]), name=keras_name) 191 | layers[node_name] = reshape(input_0) 192 | else: 193 | raise AttributeError('Can\'t reshape dynamic size.') 194 | 195 | 196 | def convert_unsqueeze(node, params, layers, lambda_func, node_name, keras_name): 197 | """ 198 | Convert unsqueeze. 199 | :param node: current operation node 200 | :param params: operation attributes 201 | :param layers: available keras layers 202 | :param lambda_func: function for keras Lambda layer 203 | :param node_name: internal converter name 204 | :param keras_name: resulting layer name 205 | :return: None 206 | """ 207 | logger = logging.getLogger('onnx2keras.unsqueeze') 208 | 209 | if len(node.input) != 1: 210 | raise AttributeError('Number of inputs is not equal 1 for unsqueeze layer') 211 | 212 | if is_numpy(layers[node.input[0]]): 213 | logger.debug('Work with numpy types.') 214 | layers[node_name] = layers[node.input[0]] 215 | for axis in params['axes']: 216 | layers[node_name] = np.expand_dims(layers[node_name], axis) 217 | else: 218 | 219 | if len(params['axes']) != 1: 220 | raise AttributeError('Number of axes is not equal 1. Cannot unsqueeze') 221 | 222 | # if params['axes'][0] != 0: 223 | # raise AttributeError('Axes is not 0. Cannot unsqueeze') 224 | 225 | def target_layer(x, axis=params['axes'][0]): 226 | from tensorflow import keras 227 | return keras.backend.expand_dims(x, axis) 228 | 229 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 230 | layers[node_name] = lambda_layer(layers[node.input[0]]) 231 | lambda_func[keras_name] = target_layer 232 | 233 | 234 | def convert_flatten(node, params, layers, lambda_func, node_name, keras_name): 235 | """ 236 | Convert flatten. 237 | :param node: current operation node 238 | :param params: operation attributes 239 | :param layers: available keras layers 240 | :param lambda_func: function for keras Lambda layer 241 | :param node_name: internal converter name 242 | :param keras_name: resulting layer name 243 | :return: None 244 | """ 245 | logger = logging.getLogger('onnx2keras.flatten') 246 | 247 | if len(node.input) != 1: 248 | raise AttributeError('Number of inputs is not equal 1 for flatten layer') 249 | 250 | logger.debug('Convert inputs to Keras/TF layers if needed.') 251 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 252 | 253 | if params['change_ordering']: 254 | # Fix critical issue with flatten 255 | def target_layer(x): 256 | import tensorflow as tf 257 | x = tf.transpose(x, [0, 3, 1, 2]) 258 | return x 259 | 260 | lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) 261 | tensor_chw = lambda_layer(input_0) 262 | flatten = keras.layers.Flatten(name=keras_name) 263 | layers[node_name] = flatten(tensor_chw) 264 | lambda_func["%s_CHW" % keras_name] = target_layer 265 | else: 266 | reshape = keras.layers.Reshape([-1], name=keras_name) 267 | layers[node_name] = reshape(input_0) 268 | 269 | 270 | def convert_slice(node, params, layers, lambda_func, node_name, keras_name): 271 | """ 272 | Convert slice. 273 | :param node: current operation node 274 | :param params: operation attributes 275 | :param layers: available keras layers 276 | :param lambda_func: function for keras Lambda layer 277 | :param node_name: internal converter name 278 | :param keras_name: resulting layer name 279 | :return: None 280 | """ 281 | logger = logging.getLogger('onnx2keras.slice') 282 | 283 | if is_numpy(layers[node.input[0]]): 284 | if params['change_ordering']: 285 | raise NotImplementedError("change_ordering for Slice is not implemented") 286 | logger.debug('Slice numpy constants') 287 | if 'axes' in params: 288 | if len(params["axes"]) != 1: 289 | raise NotImplementedError("Multiple axes in Slice is not implemented") 290 | axes = params["axes"][0] 291 | ends = params["ends"][0] 292 | starts = params["starts"][0] 293 | else: 294 | raise AttributeError('Not implemented') 295 | 296 | if axes == 0: 297 | layers[node_name] = layers[node.input[0]][starts:ends] 298 | elif axes == 1: 299 | layers[node_name] = layers[node.input[0]][:, starts:ends] 300 | elif axes == 2: 301 | layers[node_name] = layers[node.input[0]][:, :, starts:ends] 302 | elif axes == 3: 303 | layers[node_name] = layers[node.input[0]][:, :, :, starts:ends] 304 | else: 305 | raise AttributeError('Not implemented') 306 | else: 307 | logger.debug('Convert inputs to Keras/TF layers if needed.') 308 | input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) 309 | layers[node_name] = input_0 310 | 311 | if 'axes' in params: 312 | if len(params["axes"]) != 1: 313 | raise NotImplementedError("Multiple axes in Slice is not implemented") 314 | axes = params["axes"][0] 315 | ends = params["ends"][0] 316 | starts = params["starts"][0] 317 | else: 318 | starts = ensure_numpy_type(layers[node.input[1]]) 319 | ends = ensure_numpy_type(layers[node.input[2]]) 320 | axes = ensure_numpy_type(layers[node.input[3]]) 321 | 322 | for i in range(len(starts)): 323 | if axes[i] != i: 324 | assert AttributeError('Cant slice permuted axes') 325 | 326 | if isinstance(axes, list) or isinstance(axes, np.ndarray): 327 | if params['change_ordering']: 328 | raise NotImplementedError("change_ordering for Slice is not implemented") 329 | 330 | def target_layer(x, axes=np.array(axes), starts=starts, ends=ends): 331 | import tensorflow as tf 332 | rank = max(axes) 333 | s = [0 for _ in range(rank+1)] 334 | e = [0 for _ in range(rank+1)] 335 | mask = 0xff 336 | for _s, _e, axis in zip(starts, ends, axes): 337 | s[axis] = _s 338 | e[axis] = _e 339 | mask = mask ^ (0x1 << axis) 340 | return tf.strided_slice(x, s, e, begin_mask=mask, end_mask=mask) 341 | 342 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 343 | layers[node_name] = lambda_layer(input_0) 344 | lambda_func[keras_name] = target_layer 345 | else: 346 | def target_layer(x, axis=axes, starts=starts, ends=ends): 347 | import tensorflow as tf 348 | rank = axis 349 | s = [0 for _ in range(rank+1)] 350 | e = [0 for _ in range(rank+1)] 351 | mask = 0xff 352 | s[axis] = starts 353 | e[axis] = ends 354 | mask = mask ^ (0x1 << axis) 355 | return tf.strided_slice(x, s, e, begin_mask=mask, end_mask=mask) 356 | 357 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 358 | layers[node_name] = lambda_layer(input_0) 359 | lambda_func[keras_name] = target_layer 360 | 361 | 362 | def convert_squeeze(node, params, layers, lambda_func, node_name, keras_name): 363 | """ 364 | Convert Squeeze layer 365 | :param node: current operation node 366 | :param params: operation attributes 367 | :param layers: available keras layers 368 | :param lambda_func: function for keras Lambda layer 369 | :param node_name: internal converter name 370 | :param keras_name: resulting layer name 371 | :return: None 372 | """ 373 | if len(node.input) != 1: 374 | assert AttributeError('More than 1 input for squeeze layer.') 375 | 376 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 377 | 378 | def target_layer(x, axis=params['axes'][0]): 379 | from tensorflow import keras 380 | return keras.backend.squeeze(x, axis) 381 | 382 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 383 | layers[node_name] = lambda_layer(input_0) 384 | lambda_func[keras_name] = target_layer 385 | 386 | 387 | def convert_expand(node, params, layers, lambda_func, node_name, keras_name): 388 | """ 389 | Convert Expand layer 390 | :param node: current operation node 391 | :param params: operation attributes 392 | :param layers: available keras layers 393 | :param lambda_func: function for keras Lambda layer 394 | :param node_name: internal converter name 395 | :param keras_name: resulting layer name 396 | :return: None 397 | """ 398 | if len(node.input) != 2: 399 | assert AttributeError('More than 2 input for expand layer.') 400 | 401 | input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) 402 | input_1 = ensure_numpy_type(layers[node.input[1]]) 403 | 404 | def target_layer(x, shape=input_1): 405 | from tensorflow import keras 406 | 407 | # if (len(x.shape) == len(shape)): 408 | # for axis, new_shape in enumerate(shape): 409 | # if axis == 0: 410 | # continue 411 | # x = keras.backend.repeat_elements(x, int(new_shape // x.shape[axis]), axis) 412 | # pass 413 | 414 | x = keras.backend.repeat_elements(x, int(shape[1] // x.shape[1]), 1) 415 | x = keras.backend.repeat_elements(x, int(shape[2] // x.shape[2]), 2) 416 | return x 417 | 418 | # Proper version 419 | # return tf.broadcast_to(x, (1, *shape[1:])) 420 | 421 | lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) 422 | layers[node_name] = lambda_layer(input_0) 423 | lambda_func[keras_name] = target_layer 424 | -------------------------------------------------------------------------------- /onnx2keras/upsampling_layers.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import numpy as np 3 | import logging 4 | 5 | 6 | def convert_upsample(node, params, layers, lambda_func, node_name, keras_name): 7 | """ 8 | Convert upsample. 9 | :param node: current operation node 10 | :param params: operation attributes 11 | :param layers: available keras layers 12 | :param lambda_func: function for keras Lambda layer 13 | :param node_name: internal converter name 14 | :param keras_name: resulting layer name 15 | :return: None 16 | """ 17 | logger = logging.getLogger('onnx2keras.upsample') 18 | logger.warning('!!! EXPERIMENTAL SUPPORT (upsample) !!!') 19 | 20 | if "scales" in params: 21 | # for opset version - 7 22 | if len(node.input) != 1: 23 | raise AttributeError('Unsupported number of inputs') 24 | scale = np.uint8(params['scales'][-2:]) 25 | else: 26 | # for opset version - 9+ 27 | # Upsample since opset version 9 uses input[1] as 'scales' instead of attributes. 28 | scale = np.uint8(layers[node.input[1]][-2:]) 29 | 30 | if params['mode'].decode('utf-8') != 'nearest': 31 | logger.error('Cannot convert non-nearest upsampling.') 32 | raise AssertionError('Cannot convert non-nearest upsampling') 33 | 34 | upsampling = keras.layers.UpSampling2D( 35 | size=scale, name=keras_name 36 | ) 37 | 38 | layers[node_name] = upsampling(layers[node.input[0]]) 39 | -------------------------------------------------------------------------------- /onnx2keras/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tensorflow import keras 3 | 4 | 5 | def is_numpy(obj): 6 | """ 7 | Check of the type is instance of numpy array 8 | :param obj: object to check 9 | :return: True if the object is numpy-type array. 10 | """ 11 | return isinstance(obj, (np.ndarray, np.generic)) 12 | 13 | 14 | def ensure_numpy_type(obj): 15 | """ 16 | Raise exception if it's not a numpy 17 | :param obj: object to check 18 | :return: numpy object 19 | """ 20 | if is_numpy(obj): 21 | return obj 22 | else: 23 | raise AttributeError('Not a numpy type.') 24 | 25 | 26 | def ensure_tf_type(obj, fake_input_layer=None, name=None): 27 | """ 28 | Convert to Keras Constant if needed 29 | :param obj: numpy / tf type 30 | :param fake_input_layer: fake input layer to add constant 31 | :return: tf type 32 | """ 33 | if is_numpy(obj): 34 | if obj.dtype == np.int64: 35 | obj = np.int32(obj) 36 | 37 | def target_layer(_, inp=obj, dtype=obj.dtype.name): 38 | import numpy as np 39 | import tensorflow as tf 40 | if not isinstance(inp, (np.ndarray, np.generic)): 41 | inp = np.array(inp, dtype=dtype) 42 | return tf.constant(inp, dtype=inp.dtype) 43 | 44 | lambda_layer = keras.layers.Lambda(target_layer, name=name) 45 | return lambda_layer(fake_input_layer) 46 | else: 47 | return obj 48 | 49 | 50 | def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False): 51 | """ 52 | Check difference between Torch and Keras models 53 | :param model: torch model 54 | :param k_model: keras model 55 | :param input_np: input data as numpy array or list of numpy array 56 | :param epsilon: allowed difference 57 | :param change_ordering: change ordering for keras input 58 | :return: actual difference 59 | """ 60 | from torch.autograd import Variable 61 | import torch 62 | 63 | initial_keras_image_format = keras.backend.image_data_format() 64 | 65 | if isinstance(input_np, np.ndarray): 66 | input_np = [input_np.astype(np.float32)] 67 | 68 | 69 | input_var = [Variable(torch.FloatTensor(i)) for i in input_np] 70 | pytorch_output = model(*input_var) 71 | if not isinstance(pytorch_output, tuple): 72 | pytorch_output = [pytorch_output.data.numpy()] 73 | else: 74 | pytorch_output = [p.data.numpy() for p in pytorch_output] 75 | 76 | if change_ordering: 77 | # change image data format 78 | 79 | # to proper work with Lambda layers that transpose weights based on image_data_format 80 | keras.backend.set_image_data_format("channels_last") 81 | 82 | _input_np = [] 83 | for i in input_np: 84 | axes = list(range(len(i.shape))) 85 | axes = axes[0:1] + axes[2:] + axes[1:2] 86 | _input_np.append(np.transpose(i, axes)) 87 | input_np = _input_np 88 | 89 | # run keras model 90 | keras_output = k_model.predict(input_np) 91 | if not isinstance(keras_output, list): 92 | keras_output = [keras_output] 93 | 94 | # change image data format if output shapes are different (e.g. the same for global_avgpool2d) 95 | _koutput = [] 96 | for i, k in enumerate(keras_output): 97 | if k.shape != pytorch_output[i].shape: 98 | axes = list(range(len(k.shape))) 99 | axes = axes[0:1] + axes[-1:] + axes[1:-1] 100 | k = np.transpose(k, axes) 101 | _koutput.append(k) 102 | keras_output = _koutput 103 | else: 104 | keras.backend.set_image_data_format("channels_first") 105 | keras_output = k_model.predict(input_np) 106 | if not isinstance(keras_output, list): 107 | keras_output = [keras_output] 108 | 109 | # reset to previous image_data_format 110 | keras.backend.set_image_data_format(initial_keras_image_format) 111 | 112 | max_error = 0 113 | for p, k in zip(pytorch_output, keras_output): 114 | error = np.max(np.abs(p - k)) 115 | np.testing.assert_allclose(p, k, atol=epsilon, rtol=0.0) 116 | if error > max_error: 117 | max_error = error 118 | 119 | return max_error 120 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow 2 | tensorflow-addons 3 | numpy 4 | onnx -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | def parse_requirements(filename): 5 | """ load requirements from a pip requirements file """ 6 | lineiter = (line.strip() for line in open(filename)) 7 | return [line for line in lineiter if line and not line.startswith("#")] 8 | 9 | 10 | reqs = parse_requirements('requirements.txt') 11 | 12 | 13 | with open('README.md') as f: 14 | long_description = f.read() 15 | 16 | 17 | setup(name='onnx2keras', 18 | version='0.0.24', 19 | description='The deep learning models converter', 20 | long_description=long_description, 21 | long_description_content_type='text/markdown', 22 | url='https://github.com/gmalivenko/onnx2keras', 23 | author='Grigory Malivenko', 24 | author_email='', 25 | classifiers=[ 26 | 'Development Status :: 3 - Alpha', 27 | 'Intended Audience :: Science/Research', 28 | 'License :: OSI Approved :: MIT License', 29 | 'Operating System :: OS Independent', 30 | 'Programming Language :: Python', 31 | 'Topic :: Scientific/Engineering :: Image Recognition', 32 | ], 33 | keywords='machine-learning deep-learning pytorch keras neuralnetwork vgg resnet ' 34 | 'densenet drn dpn darknet squeezenet mobilenet', 35 | license='MIT', 36 | packages=find_packages(), 37 | install_requires=reqs, 38 | zip_safe=False) 39 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | Testing 2 | 3 | 1. Install requirements: 4 | ``` 5 | pip install -r test/requirements.txt 6 | ``` 7 | 8 | 2. Run tests 9 | 10 | Some tests are too slow. You can run fast tests and then slow tests. 11 | 12 | To run only fast tests: 13 | ``` 14 | pytest test/ -p no:warnings -m "not slow" 15 | ``` 16 | To run only slow tests: 17 | ``` 18 | pytest test/ -p no:warnings -m "slow" 19 | ``` 20 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/__init__.py -------------------------------------------------------------------------------- /test/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/__init__.py -------------------------------------------------------------------------------- /test/layers/activations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/activations/__init__.py -------------------------------------------------------------------------------- /test/layers/activations/test_elu.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import random 3 | import numpy as np 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerELU(nn.Module): 10 | """ 11 | Test for nn.layers based types 12 | """ 13 | def __init__(self): 14 | super(LayerELU, self).__init__() 15 | self.alpha = random.random() 16 | self.elu = nn.ELU(alpha=self.alpha) 17 | 18 | def forward(self, x): 19 | x = self.elu(x) 20 | return x 21 | 22 | 23 | class FPELU(nn.Module): 24 | """ 25 | Test for nn.functional types 26 | """ 27 | def __init__(self): 28 | super(FPELU, self).__init__() 29 | self.alpha = random.random() 30 | 31 | def forward(self, x): 32 | from torch.nn import functional as F 33 | return F.elu(x, alpha=self.alpha) 34 | 35 | 36 | @pytest.mark.repeat(10) 37 | @pytest.mark.parametrize('change_ordering', [True, False]) 38 | def test_layer_elu(change_ordering): 39 | model = LayerELU() 40 | model.eval() 41 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 42 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 43 | 44 | 45 | @pytest.mark.repeat(10) 46 | @pytest.mark.parametrize('change_ordering', [True, False]) 47 | def test_fp_elu(change_ordering): 48 | model = FPELU() 49 | model.eval() 50 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 51 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 52 | -------------------------------------------------------------------------------- /test/layers/activations/test_hard_tanh.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import random 3 | import pytest 4 | import numpy as np 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerHardtanh(nn.Module): 10 | """ 11 | Test for nn.layers based types 12 | """ 13 | def __init__(self): 14 | super(LayerHardtanh, self).__init__() 15 | self.min_val = random.random() 16 | self.max_val = self.min_val + random.random() 17 | self.htanh = nn.Hardtanh(min_val=self.min_val, max_val=self.max_val) 18 | 19 | def forward(self, x): 20 | x = self.htanh(x) 21 | return x 22 | 23 | 24 | class FHardtanh(nn.Module): 25 | """ 26 | Test for nn.functional types 27 | """ 28 | def __init__(self): 29 | super(FHardtanh, self).__init__() 30 | self.min_val = random.random() 31 | self.max_val = self.min_val + random.random() 32 | 33 | def forward(self, x): 34 | from torch.nn import functional as F 35 | return F.hardtanh(x, min_val=self.min_val, max_val=self.max_val) 36 | 37 | 38 | @pytest.mark.repeat(10) 39 | @pytest.mark.parametrize('change_ordering', [True, False]) 40 | def test_layer_hardtanh(change_ordering): 41 | model = LayerHardtanh() 42 | model.eval() 43 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 44 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 45 | 46 | 47 | @pytest.mark.repeat(10) 48 | @pytest.mark.parametrize('change_ordering', [True, False]) 49 | def test_f_hardtanh(change_ordering): 50 | model = LayerHardtanh() 51 | model.eval() 52 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 53 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 54 | -------------------------------------------------------------------------------- /test/layers/activations/test_leaky_relu.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import random 3 | import pytest 4 | import numpy as np 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerLeakyReLU(nn.Module): 10 | """ 11 | Test for nn.layers based types 12 | """ 13 | def __init__(self): 14 | super(LayerLeakyReLU, self).__init__() 15 | self.negative_slope = random.random() 16 | self.leaky_relu = nn.LeakyReLU(negative_slope=self.negative_slope) 17 | 18 | def forward(self, x): 19 | x = self.leaky_relu(x) 20 | return x 21 | 22 | 23 | class FLeakyReLU(nn.Module): 24 | """ 25 | Test for nn.functional types 26 | """ 27 | def __init__(self): 28 | super(FLeakyReLU, self).__init__() 29 | self.negative_slope = random.random() 30 | 31 | def forward(self, x): 32 | from torch.nn import functional as F 33 | return F.leaky_relu(x, self.negative_slope) 34 | 35 | 36 | @pytest.mark.repeat(10) 37 | @pytest.mark.parametrize('change_ordering', [True, False]) 38 | def test_layer_leaky_relu(change_ordering): 39 | model = LayerLeakyReLU() 40 | model.eval() 41 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 42 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 43 | 44 | 45 | @pytest.mark.repeat(10) 46 | @pytest.mark.parametrize('change_ordering', [True, False]) 47 | def test_f_leaky_relu(change_ordering): 48 | model = FLeakyReLU() 49 | model.eval() 50 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 51 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 52 | -------------------------------------------------------------------------------- /test/layers/activations/test_log_sigmoid.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import pytest 3 | import numpy as np 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerLogSigmoid(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerLogSigmoid, self).__init__() 14 | self.sig = nn.LogSigmoid() 15 | 16 | def forward(self, x): 17 | x = self.sig(x) 18 | return x 19 | 20 | 21 | class FLogSigmoid(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FLogSigmoid, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.logsigmoid(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_logsigmoid(change_ordering): 35 | model = LayerLogSigmoid() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_logsigmoid(change_ordering): 43 | model = FLogSigmoid() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_log_softmax.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerLogSoftmax(nn.Module): # not supported by onnx 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self, dim): 13 | super(LayerLogSoftmax, self).__init__() 14 | self.dim = dim 15 | self.softmax = nn.LogSoftmax(dim=self.dim) 16 | 17 | def forward(self, x): 18 | x = self.softmax(x) 19 | return x 20 | 21 | 22 | class FLogSoftmax(nn.Module): 23 | """ 24 | Test for nn.functional types 25 | """ 26 | def __init__(self, dim): 27 | super(FLogSoftmax, self).__init__() 28 | self.dim = dim 29 | 30 | def forward(self, x): 31 | from torch.nn import functional as F 32 | return F.softmax(x, self.dim) 33 | 34 | 35 | @pytest.mark.parametrize('change_ordering', [False]) 36 | @pytest.mark.parametrize('dim', [0, 1, 2, 3]) 37 | def test_f_logsoftmax(change_ordering, dim): 38 | model = FLogSoftmax(dim) 39 | model.eval() 40 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 41 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 42 | -------------------------------------------------------------------------------- /test/layers/activations/test_prelu.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | import numpy as np 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerPReLU(nn.Module): 10 | """ 11 | Test for nn.layers based types 12 | """ 13 | def __init__(self, num_params=3): 14 | super(LayerPReLU, self).__init__() 15 | self.num_params = num_params 16 | self.prelu = nn.PReLU(num_params) 17 | 18 | def forward(self, x): 19 | x = self.prelu(x) 20 | return x 21 | 22 | 23 | class FPReLU(nn.Module): 24 | """ 25 | Test for nn.functional types 26 | """ 27 | def __init__(self, num_params=3): 28 | super(FPReLU, self).__init__() 29 | self.num_params = num_params 30 | 31 | def forward(self, x): 32 | from torch.nn import functional as F 33 | weights = torch.FloatTensor(torch.rand(self.num_params).numpy()) 34 | return F.prelu(x, weight=weights) 35 | 36 | 37 | @pytest.mark.parametrize('change_ordering', [True, False]) 38 | def test_layer_prelu(change_ordering): 39 | model = LayerPReLU() 40 | model.eval() 41 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 42 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 43 | 44 | 45 | @pytest.mark.parametrize('change_ordering', [True, False]) 46 | def test_f_prelu(change_ordering): 47 | model = FPReLU() 48 | model.eval() 49 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 50 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 51 | -------------------------------------------------------------------------------- /test/layers/activations/test_relu.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerReLU(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerReLU, self).__init__() 14 | self.relu = nn.ReLU() 15 | 16 | def forward(self, x): 17 | x = self.relu(x) 18 | return x 19 | 20 | 21 | class FReLU(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FReLU, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.relu(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_relu(change_ordering): 35 | model = LayerReLU() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_relu(change_ordering): 43 | model = FReLU() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_relu6.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerReLU6(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerReLU6, self).__init__() 14 | self.relu = nn.ReLU6() 15 | 16 | def forward(self, x): 17 | x = self.relu(x) 18 | return x 19 | 20 | 21 | class FReLU6(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FReLU6, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.relu6(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_relu6(change_ordering): 35 | model = LayerReLU6() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_relu6(change_ordering): 43 | model = FReLU6() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_selu.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerSELU(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerSELU, self).__init__() 14 | self.selu = nn.SELU() 15 | 16 | def forward(self, x): 17 | x = self.selu(x) 18 | return x 19 | 20 | 21 | class FSELU(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FSELU, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.selu(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_selu(change_ordering): 35 | model = LayerSELU() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_selu(change_ordering): 43 | model = FSELU() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_sigmoid.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerSigmoid(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerSigmoid, self).__init__() 14 | self.sig = nn.Sigmoid() 15 | 16 | def forward(self, x): 17 | x = self.sig(x) 18 | return x 19 | 20 | 21 | class FSigmoid(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FSigmoid, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.sigmoid(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_sigmoid(change_ordering): 35 | model = LayerSigmoid() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_sigmoid(change_ordering): 43 | model = FSigmoid() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_softmax.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerSoftmax(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self, dim): 13 | super(LayerSoftmax, self).__init__() 14 | self.dim = dim 15 | self.softmax = nn.Softmax(dim=dim) 16 | 17 | def forward(self, x): 18 | x = self.softmax(x) 19 | return x 20 | 21 | 22 | class FSoftmax(nn.Module): 23 | """ 24 | Test for nn.functional types 25 | """ 26 | def __init__(self, dim): 27 | super(FSoftmax, self).__init__() 28 | self.dim = dim 29 | 30 | def forward(self, x): 31 | from torch.nn import functional as F 32 | return F.softmax(x, self.dim) 33 | 34 | 35 | @pytest.mark.parametrize('change_ordering', [True, False]) 36 | @pytest.mark.parametrize('dim', [0, 1, 2, 3]) 37 | def test_layer_softmax(change_ordering, dim): 38 | model = LayerSoftmax(dim) 39 | model.eval() 40 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 41 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 42 | 43 | 44 | @pytest.mark.parametrize('change_ordering', [True, False]) 45 | @pytest.mark.parametrize('dim', [0, 1, 2, 3]) 46 | def test_f_softmax(change_ordering, dim): 47 | model = FSoftmax(dim) 48 | model.eval() 49 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 50 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 51 | -------------------------------------------------------------------------------- /test/layers/activations/test_tanh.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerTanh(nn.Module): 9 | """ 10 | Test for nn.layers based types 11 | """ 12 | def __init__(self): 13 | super(LayerTanh, self).__init__() 14 | self.tanh = nn.Tanh() 15 | 16 | def forward(self, x): 17 | x = self.tanh(x) 18 | return x 19 | 20 | 21 | class FTanh(nn.Module): 22 | """ 23 | Test for nn.functional types 24 | """ 25 | def __init__(self): 26 | super(FTanh, self).__init__() 27 | 28 | def forward(self, x): 29 | from torch.nn import functional as F 30 | return F.tanh(x) 31 | 32 | 33 | @pytest.mark.parametrize('change_ordering', [True, False]) 34 | def test_layer_tanh(change_ordering): 35 | model = LayerTanh() 36 | model.eval() 37 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 38 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 39 | 40 | 41 | @pytest.mark.parametrize('change_ordering', [True, False]) 42 | def test_f_tanh(change_ordering): 43 | model = FTanh() 44 | model.eval() 45 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 46 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 47 | -------------------------------------------------------------------------------- /test/layers/activations/test_threshold.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import random 3 | 4 | 5 | class LayerThreshold(nn.Module): # not supported by onnx 6 | """ 7 | Test for nn.layers based types 8 | """ 9 | def __init__(self): 10 | super(LayerThreshold, self).__init__() 11 | self.threshold = random.random() 12 | self.value = self.threshold + random.random() 13 | self.thresh = nn.Threshold(self.threshold, self.value) 14 | 15 | def forward(self, x): 16 | x = self.thresh(x) 17 | return x 18 | 19 | 20 | class FThreshold(nn.Module): # not supported by onnx 21 | """ 22 | Test for nn.functional types 23 | """ 24 | def __init__(self): 25 | super(FThreshold, self).__init__() 26 | self.threshold = random.random() 27 | self.value = self.threshold + random.random() 28 | 29 | def forward(self, x): 30 | from torch.nn import functional as F 31 | return F.threshold(x, threshold=self.threshold, value=self.value) 32 | -------------------------------------------------------------------------------- /test/layers/constants/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/constants/__init__.py -------------------------------------------------------------------------------- /test/layers/constants/test_constant.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class FConstant(nn.Module): 10 | def __init__(self, constant): 11 | super(FConstant, self).__init__() 12 | self.constant = constant 13 | 14 | def forward(self, x): 15 | return x + torch.FloatTensor([self.constant]) 16 | 17 | 18 | @pytest.mark.parametrize('change_ordering', [True, False]) 19 | @pytest.mark.parametrize('constant', [-1.0, 0.0, 1.0]) 20 | def test_constant(change_ordering, constant): 21 | model = FConstant(constant) 22 | model.eval() 23 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 24 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 25 | -------------------------------------------------------------------------------- /test/layers/convolutions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/convolutions/__init__.py -------------------------------------------------------------------------------- /test/layers/convolutions/test_conv2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, inp, out, kernel_size=3, padding=1, stride=1, bias=False, dilation=1, groups=1): 11 | super(LayerTest, self).__init__() 12 | self.conv = nn.Conv2d( 13 | inp, out, kernel_size=kernel_size, padding=padding, 14 | stride=stride, bias=bias, dilation=dilation, groups=groups 15 | ) 16 | 17 | def forward(self, x): 18 | x = self.conv(x) 19 | return x 20 | 21 | 22 | def func(change_ordering, kernel_size, padding, stride, bias, dilation, groups): 23 | if not tf.test.gpu_device_name() and not change_ordering: 24 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 25 | if stride > 1 and dilation > 1: 26 | pytest.skip("strides > 1 not supported in conjunction with dilation_rate > 1") 27 | model = LayerTest( 28 | groups * 3, groups, 29 | kernel_size=kernel_size, padding=padding, 30 | stride=stride, bias=bias, dilation=dilation, groups=groups) 31 | model.eval() 32 | input_np = np.random.uniform(0, 1, (1, groups * 3, 224, 224)) 33 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 34 | 35 | 36 | @pytest.mark.parametrize('change_ordering', [True, False]) 37 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 38 | @pytest.mark.parametrize('padding', [0, 1, 3, 5]) 39 | @pytest.mark.parametrize('stride', [1]) 40 | @pytest.mark.parametrize('bias', [True, False]) 41 | @pytest.mark.parametrize('dilation', [1, 2, 3]) 42 | @pytest.mark.parametrize('groups', [1, 2, 3]) 43 | def test_conv2d_case1(change_ordering, kernel_size, padding, stride, bias, dilation, groups): 44 | func(change_ordering, kernel_size, padding, stride, bias, dilation, groups) 45 | 46 | 47 | @pytest.mark.parametrize('change_ordering', [True, False]) 48 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 49 | @pytest.mark.parametrize('padding', [0, 1, 3, 5]) 50 | @pytest.mark.parametrize('stride', [1, 2, 3]) 51 | @pytest.mark.parametrize('bias', [True, False]) 52 | @pytest.mark.parametrize('dilation', [1]) 53 | @pytest.mark.parametrize('groups', [1, 2, 3]) 54 | def test_conv2d_case2(change_ordering, kernel_size, padding, stride, bias, dilation, groups): 55 | func(change_ordering, kernel_size, padding, stride, bias, dilation, groups) 56 | -------------------------------------------------------------------------------- /test/layers/convolutions/test_conv3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, inp, out, kernel_size=3, padding=1, stride=1, bias=False, dilation=1, groups=1): 11 | super(LayerTest, self).__init__() 12 | self.conv = nn.Conv3d( 13 | inp, out, kernel_size=kernel_size, padding=padding, 14 | stride=stride, bias=bias, dilation=dilation, groups=groups 15 | ) 16 | 17 | def forward(self, x): 18 | x = self.conv(x) 19 | return x 20 | 21 | 22 | def func(change_ordering, kernel_size, padding, stride, bias, dilation, groups, dimension): 23 | if not tf.test.gpu_device_name() and not change_ordering: 24 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 25 | if dimension < kernel_size: 26 | pytest.skip() 27 | # if stride > 1 and dilation > 1: 28 | # pytest.skip("strides > 1 not supported in conjunction with dilation_rate > 1") 29 | model = LayerTest(groups * 3, groups, kernel_size=kernel_size, padding=padding, 30 | stride=stride, bias=bias, dilation=dilation, groups=groups) 31 | model.eval() 32 | input_np = np.random.uniform(0, 1, (1, 3 * groups, dimension, 224, 224)) 33 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 34 | 35 | 36 | @pytest.mark.slow 37 | @pytest.mark.parametrize('change_ordering', [True, False]) 38 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 39 | @pytest.mark.parametrize('padding', [0, 1, 3, 5]) 40 | @pytest.mark.parametrize('stride', [1]) 41 | @pytest.mark.parametrize('bias', [True, False]) 42 | @pytest.mark.parametrize('dilation', [1, 2, 3]) 43 | @pytest.mark.parametrize('groups', [1, 2, 3]) 44 | @pytest.mark.parametrize('dimension', [20, 40]) 45 | # @pytest.mark.parametrize('change_ordering', [True]) 46 | # @pytest.mark.parametrize('kernel_size', [1]) 47 | # @pytest.mark.parametrize('padding', [1]) 48 | # @pytest.mark.parametrize('stride', [1]) 49 | # @pytest.mark.parametrize('bias', [True]) 50 | # @pytest.mark.parametrize('dilation', [3]) 51 | # @pytest.mark.parametrize('groups', [2]) 52 | # @pytest.mark.parametrize('dimension', [20]) 53 | def test_conv3d_case1(change_ordering, kernel_size, padding, stride, bias, dilation, groups, dimension): 54 | func(change_ordering, kernel_size, padding, stride, bias, dilation, groups, dimension) 55 | 56 | 57 | @pytest.mark.slow 58 | @pytest.mark.parametrize('change_ordering', [True, False]) 59 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 60 | @pytest.mark.parametrize('padding', [0, 1, 3, 5]) 61 | @pytest.mark.parametrize('stride', [1, 2, 3]) 62 | @pytest.mark.parametrize('bias', [True, False]) 63 | @pytest.mark.parametrize('dilation', [1]) 64 | @pytest.mark.parametrize('groups', [1, 2, 3]) 65 | @pytest.mark.parametrize('dimension', [20, 40]) 66 | def test_conv3d_case2(change_ordering, kernel_size, padding, stride, bias, dilation, groups, dimension): 67 | func(change_ordering, kernel_size, padding, stride, bias, dilation, groups, dimension) 68 | -------------------------------------------------------------------------------- /test/layers/convolutions/test_convtranspose2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerTest(nn.Module): 9 | def __init__(self, inp, out, kernel_size=3, padding=1, stride=1, bias=False): 10 | super(LayerTest, self).__init__() 11 | self.conv = nn.ConvTranspose2d(inp, out, kernel_size=kernel_size, padding=padding, 12 | stride=stride, bias=bias) 13 | 14 | def forward(self, x): 15 | x = self.conv(x) 16 | return x 17 | 18 | 19 | @pytest.mark.parametrize('change_ordering', [True, False]) 20 | @pytest.mark.parametrize('kernel_size', [1, 3, 5]) 21 | @pytest.mark.parametrize('padding', [0, 1, 3]) 22 | @pytest.mark.parametrize('stride', [1, 2]) 23 | @pytest.mark.parametrize('bias', [True, False]) 24 | def test_convtranspose2d(change_ordering, kernel_size, padding, stride, bias): 25 | outs = np.random.choice([1, 3, 7]) 26 | model = LayerTest(3, outs, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) 27 | model.eval() 28 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 29 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 30 | -------------------------------------------------------------------------------- /test/layers/elementwise/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/elementwise/__init__.py -------------------------------------------------------------------------------- /test/layers/elementwise/test_add.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class FAdd(nn.Module): 9 | def __init__(self): 10 | super(FAdd, self).__init__() 11 | 12 | def forward(self, x, y): 13 | x = x + y + np.float32(0.1) 14 | # x = x 15 | return x 16 | 17 | 18 | @pytest.mark.repeat(10) 19 | @pytest.mark.parametrize('change_ordering', [True, False]) 20 | def test_add(change_ordering): 21 | model = FAdd() 22 | model.eval() 23 | 24 | input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224)) 25 | input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224)) 26 | error = convert_and_test(model, (input_np1, input_np2), verbose=False, change_ordering=change_ordering) 27 | -------------------------------------------------------------------------------- /test/layers/elementwise/test_div.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | from onnx2keras import onnx_to_keras, check_torch_keras_error 6 | import onnx 7 | import pytest 8 | 9 | from test.utils import convert_and_test 10 | 11 | 12 | class FDiv(nn.Module): 13 | def __init__(self): 14 | super(FDiv, self).__init__() 15 | 16 | def forward(self, x, y): 17 | x = x / 2 18 | y = y / 2 19 | 20 | x = x / y 21 | return x 22 | 23 | 24 | @pytest.mark.repeat(10) 25 | @pytest.mark.parametrize('change_ordering', [True, False]) 26 | def test_div(change_ordering): 27 | model = FDiv() 28 | model.eval() 29 | 30 | input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224)) 31 | input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224)) 32 | error = convert_and_test(model, (input_np1, input_np2), verbose=False, change_ordering=change_ordering) 33 | 34 | -------------------------------------------------------------------------------- /test/layers/elementwise/test_mul.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class FMul(nn.Module): 9 | def __init__(self): 10 | super(FMul, self).__init__() 11 | 12 | def forward(self, x, y): 13 | x = x * y 14 | x = x * 10.0 15 | return x 16 | 17 | 18 | @pytest.mark.repeat(10) 19 | @pytest.mark.parametrize('change_ordering', [True, False]) 20 | def test_mul(change_ordering): 21 | model = FMul() 22 | model.eval() 23 | 24 | input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224)) 25 | input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224)) 26 | error = convert_and_test(model, (input_np1, input_np2), verbose=False, change_ordering=change_ordering) 27 | -------------------------------------------------------------------------------- /test/layers/elementwise/test_sub.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class FSub(nn.Module): 9 | def __init__(self): 10 | super(FSub, self).__init__() 11 | 12 | def forward(self, x, y): 13 | x = x - y - 8.3 14 | return x 15 | 16 | 17 | @pytest.mark.repeat(10) 18 | @pytest.mark.parametrize('change_ordering', [True, False]) 19 | def test_add(change_ordering): 20 | model = FSub() 21 | model.eval() 22 | 23 | input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224)) 24 | input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224)) 25 | error = convert_and_test(model, (input_np1, input_np2), verbose=False, change_ordering=change_ordering) 26 | -------------------------------------------------------------------------------- /test/layers/linears/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/linears/__init__.py -------------------------------------------------------------------------------- /test/layers/linears/test_linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | import pytest 6 | 7 | from test.utils import convert_and_test 8 | 9 | 10 | class LayerTest(nn.Module): 11 | def __init__(self, inp, out, bias=False): 12 | super(LayerTest, self).__init__() 13 | self.fc = nn.Linear(inp, out, bias=bias) 14 | 15 | def forward(self, x): 16 | x = self.fc(x) 17 | return x 18 | 19 | 20 | @pytest.mark.repeat(10) 21 | @pytest.mark.parametrize('change_ordering', [True, False]) 22 | @pytest.mark.parametrize('bias', [True, False]) 23 | def test_linear(change_ordering, bias): 24 | ins = np.random.choice([1, 3, 7, 128]) 25 | 26 | model = LayerTest(ins, np.random.choice([1, 3, 7, 128]), bias=bias) 27 | model.eval() 28 | 29 | input_np = np.random.uniform(0, 1, (1, ins)) 30 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 31 | -------------------------------------------------------------------------------- /test/layers/normalizations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/normalizations/__init__.py -------------------------------------------------------------------------------- /test/layers/normalizations/test_bn2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import random 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, out, eps, momentum): 11 | super(LayerTest, self).__init__() 12 | self.bn = nn.BatchNorm2d(out, eps=eps, momentum=momentum) 13 | 14 | def forward(self, x): 15 | x = self.bn(x) 16 | return x 17 | 18 | 19 | @pytest.mark.repeat(10) 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | def test_bn2d(change_ordering): 22 | inp_size = np.random.randint(10, 100) 23 | 24 | model = LayerTest(inp_size, random.random(), random.random()) 25 | model.eval() 26 | 27 | input_np = np.random.uniform(0, 1, (1, inp_size, 224, 224)) 28 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 29 | -------------------------------------------------------------------------------- /test/layers/normalizations/test_in2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import random 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, out, eps, momentum): 11 | super(LayerTest, self).__init__() 12 | self.in2d = nn.InstanceNorm2d(out, eps=eps, momentum=momentum) 13 | 14 | def forward(self, x): 15 | x = self.in2d(x) 16 | return x 17 | 18 | 19 | @pytest.mark.repeat(10) 20 | # sometimes error is a little bit greater than 1e-5 21 | # maybe it can be problem described here 22 | # https://discuss.pytorch.org/t/instance-norm-implement-by-basic-operations-has-different-result-comparing-to-torch-nn-instancenorm2d/87470/2 23 | @pytest.mark.parametrize('epsilon', [1e-4]) 24 | @pytest.mark.parametrize('change_ordering', [True, False]) 25 | def test_instancenorm(change_ordering, epsilon): 26 | inp_size = np.random.randint(10, 100) 27 | 28 | model = LayerTest(inp_size, random.random(), random.random()) 29 | model.eval() 30 | 31 | input_np = np.random.uniform(0, 1, (1, inp_size, 224, 224)) 32 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering, epsilon=1e-4) 33 | -------------------------------------------------------------------------------- /test/layers/operations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/operations/__init__.py -------------------------------------------------------------------------------- /test/layers/operations/test_cast.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import torch 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class FCastTest(nn.Module): 10 | """ 11 | Test for nn.functional types 12 | """ 13 | def __init__(self): 14 | super(FCastTest, self).__init__() 15 | 16 | def forward(self, x): 17 | return x.type(torch.DoubleTensor).type(torch.BoolTensor).type(torch.uint8) 18 | 19 | 20 | @pytest.mark.repeat(10) 21 | @pytest.mark.parametrize('change_ordering', [True, False]) 22 | def test_cast(change_ordering): 23 | model = FCastTest() 24 | model.eval() 25 | 26 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 27 | 28 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 29 | -------------------------------------------------------------------------------- /test/layers/operations/test_clip.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class FClipTest(nn.Module): 9 | """ 10 | Test for nn.functional types 11 | """ 12 | def __init__(self): 13 | self.low = np.random.uniform(-1, 1) 14 | self.high = np.random.uniform(1, 2) 15 | super(FClipTest, self).__init__() 16 | 17 | def forward(self, x): 18 | return x.clamp(self.low, self.high) 19 | 20 | 21 | @pytest.mark.repeat(10) 22 | @pytest.mark.parametrize('change_ordering', [True, False]) 23 | def test_clip(change_ordering): 24 | model = FClipTest() 25 | model.eval() 26 | 27 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 28 | 29 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 30 | -------------------------------------------------------------------------------- /test/layers/operations/test_floor.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import numpy as np 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class FFloorTest(nn.Module): 9 | """ 10 | Test for nn.functional types 11 | """ 12 | def __init__(self): 13 | super(FFloorTest, self).__init__() 14 | 15 | def forward(self, x): 16 | return x.floor() 17 | 18 | 19 | @pytest.mark.repeat(10) 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | def test_floor(change_ordering): 22 | model = FFloorTest() 23 | model.eval() 24 | 25 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 26 | 27 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 28 | -------------------------------------------------------------------------------- /test/layers/operations/test_norm.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | import numpy as np 4 | import pytest 5 | 6 | 7 | from test.utils import convert_and_test 8 | 9 | 10 | class FNormTest(nn.Module): 11 | """ 12 | Test for nn.functional types 13 | """ 14 | def __init__(self, dim, keepdim): 15 | super(FNormTest, self).__init__() 16 | self.dim = dim 17 | self.keepdim = keepdim 18 | 19 | def forward(self, x): 20 | x = torch.norm(x, p=2, dim=self.dim, keepdim=self.keepdim) 21 | return x 22 | 23 | 24 | # TODO: Not working with dim=[2,3] and change_ordering=False ???? error about 0.0001-0.001 25 | @pytest.mark.repeat(10) 26 | @pytest.mark.parametrize('change_ordering', [True, False]) 27 | @pytest.mark.parametrize('dim', [[1, 2], [1, 3]]) 28 | @pytest.mark.parametrize('epsilon', [5e-5]) 29 | @pytest.mark.parametrize('keepdim', [True, False]) 30 | def test_norm(change_ordering, dim, epsilon, keepdim): 31 | model = FNormTest(dim, keepdim) 32 | model.eval() 33 | 34 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 35 | 36 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering, epsilon=epsilon) 37 | -------------------------------------------------------------------------------- /test/layers/poolings/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/poolings/__init__.py -------------------------------------------------------------------------------- /test/layers/poolings/test_avgpool2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, kernel_size=3, padding=1, stride=1): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.AvgPool2d(kernel_size=kernel_size, padding=padding, stride=stride) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.parametrize('change_ordering', [True, False]) 20 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 21 | @pytest.mark.parametrize('padding', [0, 1, 3]) 22 | @pytest.mark.parametrize('stride', [1, 2, 3, 4]) 23 | def test_avgpool2d(change_ordering, kernel_size, padding, stride): 24 | if not tf.test.gpu_device_name() and not change_ordering: 25 | pytest.skip("Skip! Since tensorflow AvgPoolingOp op currently only supports the NHWC tensor format on the CPU") 26 | if padding > kernel_size / 2: 27 | # RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, 28 | # but got padW = 1, padH = 1, kW = 1, 29 | pytest.skip("pad should be smaller than half of kernel size") 30 | model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride) 31 | model.eval() 32 | 33 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 34 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 35 | -------------------------------------------------------------------------------- /test/layers/poolings/test_avgpool3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, kernel_size=3, padding=1, stride=1): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.AvgPool3d(kernel_size=kernel_size, padding=padding, stride=stride) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.slow 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 22 | @pytest.mark.parametrize('padding', [0, 1, 3]) 23 | @pytest.mark.parametrize('stride', [1, 2, 3, 4]) 24 | def test_avgpool3d(change_ordering, kernel_size, padding, stride): 25 | if not tf.test.gpu_device_name() and not change_ordering: 26 | pytest.skip("Skip! Since tensorflow AvgPoolingOp op currently only supports the NHWC tensor format on the CPU") 27 | if padding > kernel_size / 2: 28 | # RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, 29 | # but got padW = 1, padH = 1, kW = 1, 30 | pytest.skip("pad should be smaller than half of kernel size") 31 | model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride) 32 | model.eval() 33 | 34 | input_np = np.random.uniform(0, 1, (1, 3, 20, 224, 224)) 35 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 36 | -------------------------------------------------------------------------------- /test/layers/poolings/test_global_avgpool2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.AdaptiveAvgPool2d((1, 1)) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.repeat(10) 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | def test_global_avgpool2d(change_ordering): 22 | if not tf.test.gpu_device_name() and not change_ordering: 23 | pytest.skip("Skip! Since tensorflow AvgPoolingOp op currently only supports the NHWC tensor format on the CPU") 24 | model = LayerTest() 25 | model.eval() 26 | 27 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 28 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 29 | -------------------------------------------------------------------------------- /test/layers/poolings/test_global_maxpool2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.AdaptiveMaxPool2d((1, 1)) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.repeat(10) 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | def test_global_maxpool2d(change_ordering): 22 | if not tf.test.gpu_device_name() and not change_ordering: 23 | pytest.skip("Skip! Since tensorflow MaxPoolingOp op currently only supports the NHWC tensor format on the CPU") 24 | model = LayerTest() 25 | model.eval() 26 | 27 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 28 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 29 | -------------------------------------------------------------------------------- /test/layers/poolings/test_maxpool2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, kernel_size=3, padding=1, stride=1): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.MaxPool2d(kernel_size=kernel_size, padding=padding, stride=stride) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.parametrize('change_ordering', [True, False]) 20 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 21 | @pytest.mark.parametrize('padding', [0, 1, 3]) 22 | @pytest.mark.parametrize('stride', [1, 2, 3, 4]) 23 | def test_maxpool2d(change_ordering, kernel_size, padding, stride): 24 | if not tf.test.gpu_device_name() and not change_ordering: 25 | pytest.skip("Skip! Since tensorflow MaxPoolingOp op currently only supports the NHWC tensor format on the CPU") 26 | if padding > kernel_size / 2: 27 | # RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, 28 | # but got padW = 1, padH = 1, kW = 1, 29 | pytest.skip("pad should be smaller than half of kernel size") 30 | model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride) 31 | model.eval() 32 | 33 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 34 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 35 | -------------------------------------------------------------------------------- /test/layers/poolings/test_maxpool3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | import tensorflow as tf 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self, kernel_size=3, padding=1, stride=1): 11 | super(LayerTest, self).__init__() 12 | self.pool = nn.MaxPool3d(kernel_size=kernel_size, padding=padding, stride=stride) 13 | 14 | def forward(self, x): 15 | x = self.pool(x) 16 | return x 17 | 18 | 19 | @pytest.mark.slow 20 | @pytest.mark.parametrize('change_ordering', [True, False]) 21 | @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) 22 | @pytest.mark.parametrize('padding', [0, 1, 3]) 23 | @pytest.mark.parametrize('stride', [1, 2, 3, 4]) 24 | def test_maxpool3d(change_ordering, kernel_size, padding, stride): 25 | if not tf.test.gpu_device_name() and not change_ordering: 26 | pytest.skip("Skip! Since tensorflow MaxPoolingOp op currently only supports the NHWC tensor format on the CPU") 27 | if padding > kernel_size / 2: 28 | # RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, 29 | # but got padW = 1, padH = 1, kW = 1, 30 | pytest.skip("pad should be smaller than half of kernel size") 31 | model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride) 32 | model.eval() 33 | 34 | input_np = np.random.uniform(0, 1, (1, 3, 20, 224, 224)) 35 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 36 | -------------------------------------------------------------------------------- /test/layers/reshapes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/layers/reshapes/__init__.py -------------------------------------------------------------------------------- /test/layers/reshapes/test_slice.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerTest(nn.Module): 9 | def __init__(self): 10 | super(LayerTest, self).__init__() 11 | 12 | def forward(self, x): 13 | x = x[:1, :2, :3, :4] 14 | return x 15 | 16 | 17 | @pytest.mark.parametrize('change_ordering', [True, False]) 18 | def test_slice(change_ordering): 19 | model = LayerTest() 20 | model.eval() 21 | 22 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 23 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 24 | -------------------------------------------------------------------------------- /test/layers/reshapes/test_split.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import pytest 5 | 6 | from test.utils import convert_and_test 7 | 8 | 9 | class LayerTest(nn.Module): 10 | def __init__(self): 11 | super(LayerTest, self).__init__() 12 | 13 | def forward(self, x): 14 | return torch.split(x, 224//4, 3) 15 | 16 | 17 | @pytest.mark.parametrize('change_ordering', [True, False]) 18 | def test_split(change_ordering): 19 | model = LayerTest() 20 | model.eval() 21 | 22 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 23 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 24 | -------------------------------------------------------------------------------- /test/layers/reshapes/test_squeeze.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import pytest 4 | 5 | from test.utils import convert_and_test 6 | 7 | 8 | class LayerTest(nn.Module): 9 | def __init__(self): 10 | super(LayerTest, self).__init__() 11 | 12 | def forward(self, x): 13 | x = x.squeeze(1) 14 | return x 15 | 16 | 17 | @pytest.mark.parametrize('change_ordering', [True, False]) 18 | def test_squeeze(change_ordering): 19 | model = LayerTest() 20 | model.eval() 21 | 22 | input_np = np.random.uniform(0, 1, (1, 1, 224, 224)) 23 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 24 | -------------------------------------------------------------------------------- /test/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmalivenko/onnx2keras/45c81f221bb4228751abb061cb24d473bb74a8e8/test/models/__init__.py -------------------------------------------------------------------------------- /test/models/test_alexnet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import alexnet 7 | 8 | 9 | @pytest.mark.parametrize('change_ordering', [True, False]) 10 | def test_alexnet(change_ordering): 11 | if not tf.test.gpu_device_name() and not change_ordering: 12 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 13 | model = alexnet() 14 | model.eval() 15 | 16 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 17 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 18 | -------------------------------------------------------------------------------- /test/models/test_densenet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models.densenet import densenet121 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | def test_densenet(change_ordering): 12 | if not tf.test.gpu_device_name() and not change_ordering: 13 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 14 | model = densenet121() 15 | model.eval() 16 | 17 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 18 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 19 | -------------------------------------------------------------------------------- /test/models/test_googlenet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import googlenet 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | def test_googlenet(change_ordering): 12 | if not tf.test.gpu_device_name() and not change_ordering: 13 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 14 | model = googlenet() 15 | model.eval() 16 | 17 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 18 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 19 | -------------------------------------------------------------------------------- /test/models/test_mbnet2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import mobilenet_v2 7 | 8 | 9 | @pytest.mark.parametrize('change_ordering', [True, False]) 10 | def test_mobilenetv2(change_ordering): 11 | if not tf.test.gpu_device_name() and not change_ordering: 12 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 13 | model = mobilenet_v2() 14 | model.eval() 15 | 16 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 17 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 18 | -------------------------------------------------------------------------------- /test/models/test_mnasnet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import mnasnet0_5, mnasnet1_0, mnasnet0_75, mnasnet1_3 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | @pytest.mark.parametrize('model_class', [mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3]) 12 | def test_mnasnet(change_ordering, model_class): 13 | if not tf.test.gpu_device_name() and not change_ordering: 14 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 15 | model = model_class() 16 | model.eval() 17 | 18 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 19 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 20 | -------------------------------------------------------------------------------- /test/models/test_resnet18.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from torchvision.models import resnet18 6 | 7 | from test.utils import convert_and_test 8 | 9 | 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | def test_resnet18(change_ordering): 12 | if not tf.test.gpu_device_name() and not change_ordering: 13 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 14 | model = resnet18() 15 | model.eval() 16 | 17 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 18 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 19 | -------------------------------------------------------------------------------- /test/models/test_resnext.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import resnext50_32x4d, resnext101_32x8d 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | @pytest.mark.parametrize('model_class', [resnext50_32x4d, resnext101_32x8d]) 12 | def test_resnext(change_ordering, model_class): 13 | if not tf.test.gpu_device_name() and not change_ordering: 14 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 15 | model = model_class() 16 | model.eval() 17 | 18 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 19 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 20 | -------------------------------------------------------------------------------- /test/models/test_squeezenet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import squeezenet1_0, squeezenet1_1 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | @pytest.mark.parametrize('model_class', [squeezenet1_1, squeezenet1_0]) 12 | def test_squeezenet(change_ordering, model_class): 13 | if not tf.test.gpu_device_name() and not change_ordering: 14 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 15 | model = model_class() 16 | model.eval() 17 | 18 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 19 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 20 | -------------------------------------------------------------------------------- /test/models/test_vgg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from test.utils import convert_and_test 6 | from torchvision.models import vgg11, vgg11_bn 7 | 8 | 9 | @pytest.mark.slow 10 | @pytest.mark.parametrize('change_ordering', [True, False]) 11 | @pytest.mark.parametrize('model_class', [vgg11, vgg11_bn]) 12 | def test_vgg(change_ordering, model_class): 13 | if not tf.test.gpu_device_name() and not change_ordering: 14 | pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") 15 | model = model_class() 16 | model.eval() 17 | 18 | input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) 19 | error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) 20 | -------------------------------------------------------------------------------- /test/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.1.0,<=1.5.0 2 | torchvision>=0.3.0,<=0.6.0 3 | pytest 4 | pytest-repeat -------------------------------------------------------------------------------- /test/utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | import torch 3 | import onnx 4 | 5 | from onnx2keras import onnx_to_keras, check_torch_keras_error 6 | 7 | 8 | def torch2keras(model: torch.nn.Module, input_variable, verbose=True, change_ordering=False): 9 | if isinstance(input_variable, (tuple, list)): 10 | input_variable = tuple(torch.FloatTensor(var) for var in input_variable) 11 | input_names = [f'test_in{i}' for i, _ in enumerate(input_variable)] 12 | else: 13 | input_variable = torch.FloatTensor(input_variable) 14 | input_names = ['test_in'] 15 | 16 | temp_f = io.BytesIO() 17 | torch.onnx.export(model, input_variable, temp_f, verbose=verbose, input_names=input_names, 18 | output_names=['test_out']) 19 | temp_f.seek(0) 20 | onnx_model = onnx.load(temp_f) 21 | k_model = onnx_to_keras(onnx_model, input_names, change_ordering=change_ordering) 22 | return k_model 23 | 24 | 25 | def convert_and_test(model: torch.nn.Module, 26 | input_variable, 27 | verbose=True, 28 | change_ordering=False, 29 | epsilon=1e-5): 30 | k_model = torch2keras(model, input_variable, verbose=verbose, change_ordering=change_ordering) 31 | error = check_torch_keras_error(model, k_model, input_variable, change_ordering=change_ordering, epsilon=epsilon) 32 | return error 33 | --------------------------------------------------------------------------------