├── LICENSE ├── README.md ├── pytorch_keras_converter ├── API.py ├── __init__.py ├── cadene_to_tf │ ├── __init__.py │ ├── cadene_to_tf.py │ └── utils.py ├── io_utils.py ├── tests.py └── utility │ ├── LayerRepresentation.py │ ├── __init__.py │ ├── converting_layers.py │ ├── core.py │ ├── t2k_equivalents │ ├── __init__.py │ ├── activation.py │ ├── batchnorm.py │ ├── cadene │ │ ├── BNInception.py │ │ ├── FBResNet.py │ │ ├── InceptionV4.py │ │ ├── ResNet.py │ │ ├── SENet.py │ │ └── __init__.py │ ├── container.py │ ├── conv.py │ ├── dropout.py │ ├── linear.py │ ├── padding.py │ └── pooling.py │ └── torch2keras.py └── setup.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 sonibla 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pytorch_keras_converter 2 | 3 | ![GitHub stars](https://img.shields.io/github/stars/sonibla/pytorch_keras_converter) 4 | ![license](https://img.shields.io/github/license/sonibla/pytorch_keras_converter) 5 | 6 | A PyTorch-Keras converter made for [Cadene](https://github.com/Cadene)'s [pretrained models](https://github.com/cadene/pretrained-models.pytorch). 7 | 8 | Also converts some simple PyTorch models. See [supported layers](https://github.com/sonibla/pytorch_keras_converter#other-models) for more details. 9 | 10 | I don't have a lot if time for this project, feel free to [contribute](#contributing) if you want to :) 11 | 12 | ## Table of contents 13 | 14 | - [Installation](#installation) 15 | * [Prerequisites](#prerequisites) 16 | * [Installing](#installing) 17 | * [Troubleshooting](#troubleshooting) 18 | - [Usage](#usage) 19 | * [Quick examples](#quick-examples) 20 | * [API documentation](#api-documentation) 21 | - [Supported models and layers](#supported-models-and-layers) 22 | - [Known issues](#known-issues) 23 | - [Contributing](#contributing) 24 | - [Authors](#authors) 25 | - [License](#license) 26 | - [Acknowledgments](#acknowledgments) 27 | 28 | ## Installation 29 | 30 | These instructions will get you a copy of the project up and running on your local machine. 31 | 32 | ### Prerequisites 33 | 34 | [Python](https://www.python.org/downloads/) : supported versions : >=3.5 35 | 36 | You can also install python with [Anaconda](https://www.anaconda.com/distribution/#download-section). 37 | 38 | ### Installing 39 | 40 | Download files: 41 | ``` 42 | git clone https://github.com/sonibla/pytorch_keras_converter.git 43 | cd pytorch_keras_converter 44 | ``` 45 | Then install `pytorch_keras_converter` using `setup.py` 46 | 47 | The following command should install automatically `pytorch_keras_converter` and every dependency: 48 | ``` 49 | python3 setup.py install --user 50 | ``` 51 | 52 | To install on a particular version of Python (here 3.7): 53 | ``` 54 | python3.7 setup.py install --user 55 | ``` 56 | 57 | To install on the entire system (requires administrator privileges): 58 | ``` 59 | sudo python setup.py install 60 | ``` 61 | 62 | ### Troubleshooting 63 | 64 | #### Installing `pip` or `setuptools` 65 | 66 | If modules `pip` or `setuptools` aren't installed on your Python environment: 67 | ``` 68 | curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py 69 | python get-pip.py --user 70 | ``` 71 | 72 | #### Manually installing dependencies 73 | 74 | Dependencies should install automatically when running `setup.py`. But if it fails, install them manually: 75 | ``` 76 | python -m pip install torch pretrainedmodels tensorflow graphviz numpy h5py tqdm --user 77 | ``` 78 | 79 | Remove `--user` to install on the whole system, replace `python` with `python3.7` to install on Python 3.7. 80 | 81 | ## Usage 82 | 83 | ### Quick examples 84 | 85 | - To convert `se_resnet50`: 86 | ``` 87 | >>> import pytorch_keras_converter as pkc 88 | >>> pkc.cadene_to_tf(['se_resnet50'], quiet=False) 89 | ``` 90 | 91 | - To convert `bninception` and `fbresnet152`: 92 | ``` 93 | >>> import pytorch_keras_converter as pkc 94 | >>> pkc.cadene_to_tf(['bninception', 'fbresnet152']) 95 | ``` 96 | 97 | - To convert `se_resnet152` with random weights: 98 | ``` 99 | >>> import pytorch_keras_converter as pkc 100 | >>> pkc.cadene_to_tf(['se_resnet152(pretrained=None)']) 101 | ``` 102 | 103 | - To automatically convert many models: 104 | 105 | Create a file containing on each line one model's name. For example: 106 | ``` 107 | resnet18 108 | resnet34 109 | resnet50(pretrained=None) 110 | resnet50 111 | ``` 112 | 113 | Let's store this file in `models.txt`. 114 | 115 | Now, in a Python shell: 116 | ``` 117 | >>> import pytorch_keras_converter as pkc 118 | >>> pkc.cadene_to_tf('models.txt') 119 | ``` 120 | 121 | ### API documentation 122 | 123 | #### `pytorch_keras_converter.API.convert` 124 | 125 | ``` 126 | def convert(model, 127 | input_shape, 128 | weights=True, 129 | quiet=True, 130 | ignore_tests=False, 131 | input_range=None, 132 | save=None, 133 | filename=None, 134 | directory=None): 135 | ``` 136 | 137 | ##### Arguments: 138 | 139 | - **model**: 140 | A Keras or PyTorch model or layer to convert 141 | - **input_shape** (list, tuple or int): 142 | Input shape, without batchsize. 143 | - **weights** (bool): 144 | Also convert weights. If set to *False*, only convert model 145 | architecture 146 | - **quiet** (bool): 147 | If *False*, display a progress bar and some messages 148 | - **ignore_tests** (bool): 149 | If tests should be ignored. If weights is False, tests are automatically ignored. 150 | - If set to *True*, converted model will 151 | still be tested by security. If models are not identical, it will 152 | only print a warning. 153 | - If set to *False*, and models are not identical, *RuntimeWarning* will 154 | be raised 155 | - **input_range**: 156 | Optional. 157 | A list of 2 elements containing max and min values to give as 158 | input to the model when performing the tests. If *None,* models will 159 | be tested on samples from the "standard normal" distribution. 160 | - **save**: 161 | If model should be exported to a hdf5 file. 162 | - **filename**: 163 | Optional. 164 | Filename to give to model's hdf5 file. If filename is not *None* and 165 | save is not *False*, then save will automatically be set to *True* 166 | - **directory**: 167 | Optional. 168 | Where to save model's hdf5 file. If directory is not *None* and 169 | save is not *False*, then save will automatically be set to *True* 170 | 171 | ##### Raises: 172 | 173 | - *RuntimeWarning*: 174 | If converted and original model aren't identical, and ignore_tests 175 | is False 176 | 177 | ##### Returns: 178 | 179 | If model has been exported to a file, it will return the name of the file. 180 | Else, it returns the converted model. 181 | 182 | #### `pytorch_keras_converter.API.convert_and_save` 183 | 184 | ``` 185 | def convert_and_save(model, 186 | input_shape, 187 | weights=True, 188 | quiet=True, 189 | ignore_tests=False, 190 | input_range=None, 191 | filename=None, 192 | directory=None): 193 | ``` 194 | 195 | ##### Arguments: 196 | 197 | - **model**: 198 | A Keras or PyTorch model or layer to convert 199 | - **input_shape** (list, tuple or int): 200 | Input shape, without batchsize. 201 | - **weights** (bool): 202 | Also convert weights. If set to *False*, only convert model architecture 203 | - **quiet** (bool): 204 | If *False*, display a progress bar and some messages. 205 | - **ignore_tests** (bool): 206 | If tests should be ignored. If weights is False, tests are automatically ignored. 207 | - If set to *True*, converted model will still be tested by security. 208 | If models are not identical, it will only print a warning. 209 | - If set to *False*, and models are not identical, *RuntimeWarning* will be raised 210 | - **input_range**: 211 | Optional. 212 | A list of 2 elements containing max and min values to give as 213 | input to the model when performing the tests. If *None,* models will 214 | be tested on samples from the "standard normal" distribution. 215 | - **filename**: 216 | Optional. 217 | Filename to give to model's hdf5 file. If filename is not *None* and save is not *False*, then save will automatically be set to *True* 218 | - **directory**: 219 | Optional. 220 | Where to save model's hdf5 file. If directory is not *None* and save is not *False*, then save will automatically be set to *True* 221 | 222 | ##### Returns: 223 | 224 | Name of created hdf5 file 225 | 226 | #### `pytorch_keras_converter.cadene_to_tf` 227 | 228 | ``` 229 | def cadene_to_tf(modelList=None, 230 | outputDirectory=None, 231 | quiet=True): 232 | ``` 233 | 234 | ##### Arguments: 235 | 236 | - **modelList**: 237 | A *tuple* or *list* of names of the models to convert 238 | OR 239 | A *str* telling the emplacement of a file containing names 240 | of models to convert (one model per line). 241 | If you want a particular config for each model, put it between 242 | parenthesis after model's name, for example: 243 | 'se_resnet50(pretrained=None)' 244 | - **outputDirectory** (str): 245 | Optionnal. Where hdf5 files should be saved 246 | - **quiet** (bool): 247 | If *False*, display a progress bar 248 | 249 | ##### Raises: 250 | 251 | - *TypeError*: 252 | If modelList wasn't a *str*, *list*, or *tuple* 253 | 254 | ##### Returns: 255 | 256 | A list of created files 257 | 258 | ## Supported models and layers 259 | 260 | ### Supported [Cadene's models](https://github.com/cadene/pretrained-models.pytorch) 261 | 262 | - [SE-ResNet50](https://github.com/Cadene/pretrained-models.pytorch#senet) 263 | - [SE-ResNet101](https://github.com/Cadene/pretrained-models.pytorch#senet) 264 | - [SE-ResNet152](https://github.com/Cadene/pretrained-models.pytorch#senet) 265 | - [CaffeResNet101](https://github.com/Cadene/pretrained-models.pytorch#caffe-resnet) 266 | - [BNInception](https://github.com/Cadene/pretrained-models.pytorch#bninception) 267 | - [FBResNet152](https://github.com/Cadene/pretrained-models.pytorch#facebook-resnet) 268 | - [ResNet18](https://github.com/Cadene/pretrained-models.pytorch#torchvision) 269 | - [ResNet34](https://github.com/Cadene/pretrained-models.pytorch#torchvision) 270 | - [ResNet50](https://github.com/Cadene/pretrained-models.pytorch#torchvision) 271 | - [ResNet101](https://github.com/Cadene/pretrained-models.pytorch#torchvision) 272 | - [ResNet152](https://github.com/Cadene/pretrained-models.pytorch#torchvision) 273 | 274 | ### Other models 275 | 276 | Some simple PyTorch models are supported. Supported layers are: 277 | 278 | - torch.nn.AvgPool2d(count_include_pad=True) 279 | - torch.nn.MaxPool2d(dilation=1) 280 | - torch.nn.Batchnorm2d 281 | - torch.nn.Conv2d(groups=1, padding_mode='zeros') 282 | - torch.nn.Linear 283 | - torch.nn.ZeroPad2d 284 | - torch.nn.Dropout 285 | - torch.nn.Sequential 286 | - torch.nn.ReLU 287 | - torch.nn.Sigmoid 288 | - torch.nn.AdaptiveAvgPool2d(output_size=1) 289 | 290 | To convert a custom PyTorch model, use `pytorch_keras_converter.API`. 291 | 292 | ## Known issues 293 | 294 | ### Fails to import `pretrainedmodels` 295 | 296 | If `pretrainedmodels` module isn't available, you can still convert models manually. 297 | 298 | 1. Download `pretrainedmodels` from [GitHub](https://github.com/cadene/pretrained-models.pytorch) 299 | ``` 300 | git clone https://github.com/Cadene/pretrained-models.pytorch.git 301 | ``` 302 | 303 | 2. Open a Python shell 304 | ``` 305 | cd pretrained-models.pytorch 306 | python 307 | ``` 308 | 309 | 3. Create a model 310 | ``` 311 | >>> import pretrainedmodels 312 | >>> model = pretrainedmodels.se_resnet50() 313 | >>> input_shape = pretrainedmodels.pretrained_settings['se_resnet50']['input_size'] 314 | ``` 315 | 316 | 4. Convert your model 317 | ``` 318 | >>> import pytorch_keras_converter as pkc 319 | >>> pkc.API.convert_and_save(model, input_shape) 320 | ``` 321 | 322 | ### Can't run the tests 323 | 324 | If you use `pytorch_keras_converter.API` or `pytorch_keras_converter.cadene_to_tf` it will only show a warning : `Warning: tests unavailable!`. 325 | 326 | If you manually test models using `pytorch_keras_converter.tests` it will raise the exception `InvalidArgumentError`. 327 | 328 | This is because some TensorFlow layers only support NHWC (i.e channels last) on CPU. 329 | 330 | Using Anaconda solves this issue. 331 | 332 | ## Contributing 333 | 334 | Contributions are always welcome ! 335 | 336 | Feel free to create an issue if you found a bug or if you have an idea of improvement. 337 | If you want to solve an issue or implement a new feature by yourself, you can fork the repository and make a pull request 338 | 339 | This repository is fully [PEP8](https://www.python.org/dev/peps/pep-0008/) compliant. 340 | 341 | When reporting bugs, please specify your OS, your version of Python, versions of every dependency and if you're using a particular environment (Anaconda for example). 342 | 343 | You can use `pip freeze` to see versions of your Python modules 344 | 345 | ## Authors 346 | 347 | * [**sonibla**](https://github.com/sonibla) 348 | 349 | See also the list of [contributors](https://github.com/sonibla/pytorch_keras_converter/contributors) who participated in this project. 350 | 351 | ## License 352 | 353 | This project is licensed under the [MIT License](https://tldrlegal.com/license/mit-license) - see the [LICENSE](LICENSE) file for details 354 | 355 | ## Acknowledgments 356 | 357 | * [**Rémi Cadene**](https://github.com/Cadene) 358 | * [**David Picard**](https://github.com/davidpicard) 359 | * [**Pierre Jacob**](https://github.com/pierre-jacob) 360 | -------------------------------------------------------------------------------- /pytorch_keras_converter/API.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple API to convert models between PyTorch and Keras 3 | 4 | (Conversions from Keras to PyTorch aren't implemented) 5 | """ 6 | from . import utility 7 | from . import tests 8 | from . import io_utils as utils 9 | import tensorflow 10 | 11 | 12 | def convert(model, 13 | input_shape, 14 | weights=True, 15 | quiet=True, 16 | ignore_tests=False, 17 | input_range=None, 18 | save=None, 19 | filename=None, 20 | directory=None): 21 | """ 22 | Conversion between PyTorch and Keras 23 | (Conversions from Keras to PyTorch aren't implemented) 24 | 25 | Arguments: 26 | -model: 27 | A Keras or PyTorch model or layer to convert 28 | -input_shape: 29 | Input shape (list, tuple or int), without batchsize. 30 | -weights (bool): 31 | Also convert weights. If set to false, only convert model 32 | architecture 33 | -quiet (bool): 34 | If a progress bar and some messages should appear 35 | -ignore_tests (bool): 36 | If tests should be ignored. 37 | If set to True, converted model will 38 | still be tested by security. If models are not identical, it will 39 | only print a warning. 40 | If set to False, and models are not identical, RuntimeWarning will 41 | be raised 42 | If weights is False, tests are automatically ignored 43 | -input_range: 44 | Optionnal. 45 | A list of 2 elements containing max and min values to give as 46 | input to the model when performing the tests. If None, models will 47 | be tested on samples from the "standard normal" distribution. 48 | -save: 49 | If model should be exported to a hdf5 file. 50 | -filename: 51 | Filename to give to model's hdf5 file. If filename is not None and 52 | save is not False, then save will automatically be set to True 53 | -directory: 54 | Where to save model's hdf5 file. If directory is not None and 55 | save is not False, then save will automatically be set to True 56 | 57 | Raises: 58 | -RuntimeWarning: 59 | If converted and original model aren't identical, and ignore_tests 60 | is False 61 | 62 | Returns: 63 | If model has been exported to a file, it will return the name of the 64 | file 65 | 66 | Else, it returns the converted model 67 | """ 68 | 69 | if (filename is not None or directory is not None) and save is None: 70 | save = True 71 | if save is None: 72 | save = False 73 | 74 | if weights == False: 75 | ignore_tests = True 76 | 77 | if not quiet: 78 | print('\nConversion...') 79 | 80 | # Converting: 81 | newModel = utility.convert(model=utility.LayerRepresentation(model), 82 | input_size=input_shape, 83 | weights=weights, 84 | quiet=quiet) 85 | 86 | # Actually, newModel is a LayerRepresentation object 87 | # Equivalents: 88 | torchModel = newModel.equivalent['torch'] 89 | kerasModel = newModel.equivalent['keras'] 90 | 91 | if not quiet: 92 | print('Automatically testing converted model reliability...\n') 93 | 94 | # Checking converted model reliability 95 | tested = False 96 | try: 97 | meanSquaredError = tests.comparison(model1=torchModel, 98 | model2=kerasModel, 99 | input_shape=input_shape, 100 | input_range=input_range, 101 | quiet=quiet) 102 | tested = True 103 | except tensorflow.errors.InvalidArgumentError: 104 | print("Warning: tests unavailable!") 105 | 106 | if tested and meanSquaredError > 0.0001: 107 | if ignore_tests: 108 | print("Warning: converted and original models aren't identical !\ 109 | (mean squared error: {})".format(meanSquaredError)) 110 | else: 111 | raise RuntimeWarning("Original and converted model do not match !\ 112 | \nOn random input data, outputs showed a mean squared error of {} (if should \ 113 | be below 1e-10)".format(meanSquaredError)) 114 | elif not quiet and tested: 115 | print('\n Original and converted models match !\nMean squared err\ 116 | or : {}'.format(meanSquaredError)) 117 | 118 | if save: 119 | if not quiet: 120 | print('Saving model...') 121 | 122 | defaultName = 'conversion_{}'.format(newModel.name) 123 | 124 | if filename is None: 125 | filename = defaultName 126 | 127 | # Formatting filename so that we don't overwrite any existing file 128 | file = utils.formatFilename(filename, 129 | directory) 130 | 131 | # Freezing Keras model (trainable = False everywhere) 132 | utils.freeze(kerasModel) 133 | 134 | # Save the entire model 135 | kerasModel.save(file + '.h5') 136 | 137 | if not quiet: 138 | print('Done !') 139 | 140 | return file + '.h5' 141 | 142 | if not quiet: 143 | print('Done !') 144 | 145 | return kerasModel 146 | 147 | 148 | def convert_and_save(model, 149 | input_shape, 150 | weights=True, 151 | quiet=True, 152 | ignore_tests=False, 153 | input_range=None, 154 | filename=None, 155 | directory=None): 156 | """ 157 | Conversion between PyTorch and Keras, and automatic save 158 | (Conversions from Keras to PyTorch aren't implemented) 159 | 160 | Arguments: 161 | -model: 162 | A Keras or PyTorch model or layer to convert 163 | -input_shape: 164 | Input shape (list, tuple or int), without batchsize. 165 | -weights (bool): 166 | Also convert weights. If set to false, only convert model 167 | architecture 168 | -quiet (bool): 169 | If a progress bar and some messages should appear 170 | -ignore_tests (bool): 171 | 172 | If tests should be ignored. 173 | 174 | If set to True, converted model will 175 | 176 | still be tested by security. If models are not identical, it will 177 | 178 | only print a warning. 179 | 180 | If set to False, and models are not identical, RuntimeWarning will 181 | 182 | be raised 183 | 184 | If weights is False, tests are automatically ignored 185 | -input_range: 186 | Optionnal. 187 | A list of 2 elements containing max and min values to give as 188 | input to the model when performing the tests. If None, models will 189 | be tested on samples from the "standard normal" distribution. 190 | -filename: 191 | Filename to give to model's hdf5 file. If filename is not None and 192 | save is not False, then save will automatically be set to True 193 | -directory: 194 | Where to save model's hdf5 file. If directory is not None and 195 | save is not False, then save will automatically be set to True 196 | 197 | Returns: 198 | Name of created hdf5 file 199 | """ 200 | 201 | return convert(model=model, 202 | input_shape=input_shape, 203 | weights=weights, 204 | quiet=quiet, 205 | ignore_tests=ignore_tests, 206 | input_range=input_range, 207 | save=True, 208 | filename=filename, 209 | directory=directory) 210 | -------------------------------------------------------------------------------- /pytorch_keras_converter/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Conversion Utility 3 | Convert and analyse Keras and pyTorch models 4 | 5 | Dependencies: 6 | -torch 7 | -pretrainedmodels 8 | -tensorflow 9 | -graphviz 10 | -numpy 11 | -h5py 12 | -tqdm 13 | 14 | 15 | Features: 16 | 17 | ######################### 18 | ####- CONVERSIONS: -#### 19 | ######################### 20 | -pyTorch -> Keras: 21 | simple models and Cadene's models only. You can use 22 | original weights or random ones. Converting weights takes a lot of time 23 | because of Keras' set_weights function 24 | 25 | To easily convert Cadene models see cadene_to_tf.py 26 | 27 | Supported Cadene's models: 28 | -se_resnet50 29 | -se_resnet101 30 | -se_resnet152 31 | -cafferesnet101 32 | -bninception 33 | -fbresnet152 34 | -resnet18 35 | -resnet34 36 | -resnet50 37 | -resnet101 38 | -resnet152 39 | 40 | Supported layers: 41 | -torch.nn.AvgPool2d(count_include_pad = True) 42 | -torch.nn.MaxPool2d(dilation = 1) 43 | -torch.nn.Batchnorm2d 44 | -torch.nn.Conv2d(groups = 1, padding_mode = 'zeros') 45 | -torch.nn.Linear 46 | -torch.nn.ZeroPad2d 47 | -torch.nn.Dropout 48 | -torch.nn.Sequential 49 | -torch.nn.ReLU 50 | -torch.nn.Sigmoid 51 | -torch.nn.AdaptiveAvgPool2d(output_size = 1) 52 | 53 | -Keras -> pyTorch : NOT IMPLEMENTED 54 | 55 | ################################# 56 | ####- DEV AND DEBUG TOOLS: -#### 57 | ################################# 58 | 59 | -pyTorch layers listing : OK. See utility/LayerRepresentation.py for 60 | details (LayerRepresentation.summary) 61 | 62 | -Keras layers listing : OK. See utility/LayerRepresentation.py for details 63 | (LayerRepresentation.summary). 64 | 65 | -pyTorch DOT graph rendering/export : OK, but edges aren't reliable. See 66 | utility/LayerRepresentation.py for details (LayerRepresentation.DOT). 67 | 68 | -Keras DOT graph rendering/export : OK, but edges aren't reliable. See 69 | utility/LayerRepresentation.py for details (LayerRepresentation.DOT). 70 | 71 | -Saving a Keras model to a .py file (architecture only): 72 | OK, only if original model was a simple pyTorch model. See 73 | utility/core.py for details. 74 | 75 | -Saving an entire model after pyTorch to Keras conversion : 76 | OK. See API.py for details. 77 | 78 | -Comparing 2 models (Keras, pyTorch) on random data : 79 | OK (mean squared error, progress bar available, severak tests 80 | available). See tests.py for details. 81 | 82 | """ 83 | 84 | name = "pytorch_keras_converter" 85 | supported_cadene_models = ['se_resnet50', 86 | 'se_resnet101', 87 | 'se_resnet152', 88 | 'cafferesnet101', 89 | 'bninception', 90 | 'fbresnet152', 91 | 'resnet18', 92 | 'resnet34', 93 | 'resnet50', 94 | 'resnet101', 95 | 'resnet152'] 96 | 97 | from . import API 98 | from .cadene_to_tf import cadene_to_tf 99 | from . import utility 100 | -------------------------------------------------------------------------------- /pytorch_keras_converter/cadene_to_tf/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | API to convert Cadene's models 3 | 4 | Supported models: 5 | -se_resnet50 6 | -se_resnet101 7 | -se_resnet152 8 | -cafferesnet101 9 | -bninception 10 | -fbresnet152 11 | -resnet18 12 | -resnet34 13 | -resnet50 14 | -resnet101 15 | -resnet152 16 | 17 | Convert and save models with cadene_to_tf 18 | 19 | Examples: 20 | cadene_to_tf(modelList=['se_resnet50'], quiet=False) 21 | 22 | cadene_to_tf(modelList=['se_resnet50', 23 | 'bninception(pretrained=None)]) 24 | 25 | cadene_to_tf(modelList=models.txt) 26 | 27 | """ 28 | from .cadene_to_tf import cadene_to_tf 29 | -------------------------------------------------------------------------------- /pytorch_keras_converter/cadene_to_tf/cadene_to_tf.py: -------------------------------------------------------------------------------- 1 | try: 2 | import pretrainedmodels 3 | except ImportError: 4 | print("\nWarning: Cadene's pretrainedmodels unavailable !") 5 | print("Try: pip install pretrainedmodels") 6 | 7 | from .. import API 8 | from .. import supported_cadene_models as supported 9 | from . import utils 10 | 11 | 12 | def sync_model(model_name, 13 | model_params=None, 14 | outputDirectory=None, 15 | quiet=True): 16 | """ 17 | sync_model : automatically converts and save one Cadene model 18 | 19 | Arguments: 20 | -model_name: 21 | The model's name 22 | -model_params (dict): 23 | Optionnal 24 | If you want a particular config for the model 25 | Example: 26 | {'pretrained': None} 27 | -outputDirectory (str): 28 | Optionnal. Where .h5 files should be saved 29 | -quiet (bool): 30 | If False, display a progress bar 31 | 32 | Returns: 33 | The created file 34 | """ 35 | 36 | if model_name not in pretrainedmodels.model_names: 37 | print("Warning: model {} not recognized ! Skipping".format(model_name)) 38 | elif model_name not in supported: 39 | print("Warning: model {} not supported ! Skipping".format(model_name)) 40 | else: 41 | if not quiet: 42 | print("Converting {}\n".format(model_name)) 43 | name = model_name 44 | 45 | if model_params is None or model_params == dict(): 46 | model = getattr(pretrainedmodels, model_name)() 47 | else: 48 | model = getattr(pretrainedmodels, model_name)(**model_params) 49 | name = name + '(' 50 | for param, value in model_params.items(): 51 | name = name + param + '=' + str(value) + ', ' 52 | name = name[:-2] + ')' 53 | 54 | settings = pretrainedmodels.pretrained_settings[model_name] 55 | 56 | if 'pretrained' in model_params.keys() and \ 57 | model_params['pretrained'] is not None: 58 | 59 | pretrained = model_params['pretrained'] 60 | input_shape = settings[pretrained]['input_size'] 61 | input_range = settings[pretrained]['input_range'] 62 | else: 63 | for pretrained in settings.keys(): 64 | input_shape = settings[pretrained]['input_size'] 65 | input_range = settings[pretrained]['input_range'] 66 | break 67 | 68 | try: 69 | file = API.convert_and_save(model, 70 | input_shape=input_shape, 71 | weights=True, 72 | quiet=quiet, 73 | ignore_tests=False, 74 | input_range=input_range, 75 | filename=name, 76 | directory=outputDirectory) 77 | return file 78 | except RuntimeWarning: 79 | if not quiet: 80 | print("Failed to convert model {}".format(model_name)) 81 | return None 82 | 83 | 84 | def cadene_to_tf(modelList=None, outputDirectory=None, quiet=True): 85 | """ 86 | cadene_to_tf : automatically converts and save Cadene's models 87 | 88 | Arguments: 89 | -modelList: 90 | A tuple of names of the models to convert 91 | OR 92 | A list of names of the models to convert 93 | OR 94 | A str telling the emplacement of a file containing names 95 | of models to convert (one model per line) 96 | 97 | If you want a particular config for each model, put it between 98 | parenthesis after model's name, for example: 99 | 100 | 'se_resnet50(pretrained=None)' 101 | -outputDirectory (str): 102 | Optionnal. Where .h5 files should be saved 103 | -quiet (bool): 104 | If False, display a progress bar 105 | 106 | Raises: 107 | -TypeError: 108 | If modelList wasn't a str, list, or tuple 109 | 110 | Returns: 111 | A list of created files 112 | 113 | Examples: 114 | cadene_to_tf(modelList=['se_resnet50'], quiet=False) 115 | 116 | cadene_to_tf(modelList=['se_resnet50', 117 | 'bninception(pretrained=None)]) 118 | 119 | cadene_to_tf(modelList=models.txt) 120 | """ 121 | 122 | if modelList is None: 123 | return None 124 | 125 | if isinstance(modelList, str): 126 | with open(modelList, 'r') as file: 127 | models = file.read().split('\n') 128 | elif isinstance(modelList, tuple): 129 | models = list(modelList) 130 | elif isinstance(modelList, list): 131 | models = modelList 132 | else: 133 | raise TypeError('modelList has to be str, list, or tuple') 134 | 135 | fileList = [] 136 | failed = [] 137 | 138 | for i in range(len(models)): 139 | assert isinstance(models[i], str) 140 | models[i] = utils.removeBorderSpaces(models[i]) 141 | if '(' in models[i] or ')' in models[i]: 142 | extract = utils.extractFunctionArguments(models[i]) 143 | model_name = utils.removeBorderSpaces(extract[0]) 144 | model_params = extract[1] 145 | else: 146 | model_name = models[i] 147 | model_params = dict() 148 | if len(model_name) > 0: 149 | file = sync_model(model_name, 150 | model_params, 151 | outputDirectory=outputDirectory, 152 | quiet=quiet) 153 | if file is not None: 154 | fileList.append(file) 155 | else: 156 | failed.append(model_name) 157 | 158 | if failed and not quiet: 159 | print("\nWarning: some models were not converted !") 160 | print(str(failed)) 161 | 162 | return fileList 163 | -------------------------------------------------------------------------------- /pytorch_keras_converter/cadene_to_tf/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def removeBorderSpaces(inputStr): 3 | """ 4 | Function that removes trailing and leading whitespaces 5 | 6 | Argument: 7 | -inputStr (str) 8 | 9 | Returns: 10 | A str of inputStr without leading and trailing whitespaces 11 | 12 | """ 13 | if len(inputStr) == 0: 14 | return str() 15 | if inputStr[0] == ' ': 16 | return removeBorderSpaces(inputStr[1:]) 17 | if inputStr[-1] == ' ': 18 | return removeBorderSpaces(inputStr[:-1]) 19 | return inputStr 20 | 21 | 22 | def extractFunctionArguments(fct): 23 | """extractFunctionArguments(fct) 24 | 25 | Argument: 26 | -fct: 27 | a str of the syntax used to call the function 28 | example: fct(a=1,b=2,c=3,4) 29 | 30 | Return: 31 | a dictionnary containing as values the arguments, 32 | and as keys the names of arguments. 33 | Unnamed arguments are keyed with their number 34 | 35 | Example : 36 | extractFunctionArguments(fct(a=1,b=2,c=3,4)) 37 | returns 38 | [fct, {'a':1, 'b':2, 'c':3, 4:4}] 39 | """ 40 | 41 | def searchValidElement(inputStr, element): 42 | # We can't use fct.split(',') because there may be some commas (',') 43 | # in str or other called functions 44 | # We'll use a for loop. 45 | 46 | # List that will contain the Posi of each commas 47 | commaPosi = list() 48 | # StrType1: 'str' 49 | # StrType2: "str" 50 | # StrType3: '''str''' 51 | # StrType6: """str""" 52 | inStrType1 = False 53 | inStrType2 = False 54 | inStrType3 = False 55 | inStrType6 = False 56 | 57 | openedParenthesis = 0 58 | 59 | for char in range(len(inputStr)): 60 | inStr = inStrType1 * inStrType2 * inStrType3 * inStrType6 61 | if inStr: 62 | # We are currently in a str 63 | if (inStrType1 and inputStr[char] == "'"): 64 | inStrType1 = False 65 | elif (inStrType2 and inputStr[char] == '"'): 66 | inStrType2 = False 67 | elif (inStrType3 and inputStr[char] == "'" and 68 | inputStr[char-1] == "'" and inputStr[char-2] == "'"): 69 | inStrType3 = False 70 | elif (inStrType6 and inputStr[char] == '"' and 71 | inputStr[char-1] == '"' and inputStr[char-2] == '"'): 72 | inStrType6 = False 73 | else: 74 | # We are *not* in a str 75 | if inputStr[char] == '(': 76 | openedParenthesis += 1 77 | elif inputStr[char] == ')': 78 | openedParenthesis -= 1 79 | elif (inputStr[char] == '"'): 80 | # Starting a str of type 6 or 2 81 | if char+2 < len(inputStr)-1: 82 | if (inputStr[char+1] == '"' and fct[char+2] == '"'): 83 | inStrType6 = True 84 | else: 85 | inStrType2 = True 86 | elif (inputStr[char] == "'"): 87 | # Starting a str of type 1 or 3 88 | if char+2 < len(inputStr)-1: 89 | if inputStr[char+1] == "'" and inputStr[char+2] == "'": 90 | inStrType3 = True 91 | else: 92 | inStrType1 = True 93 | elif inputStr[char] == element and openedParenthesis == 0: 94 | # We found a valid comma ! 95 | commaPosi.append(char) 96 | return commaPosi 97 | 98 | # Looking for the first '(' 99 | for char in range(len(fct)): 100 | if fct[char] == '(': 101 | start = char 102 | break 103 | 104 | # Looking for the last ')' 105 | for char in range(len(fct)-1, 0, -1): 106 | if fct[char] == ')': 107 | stop = char 108 | break 109 | 110 | try: 111 | if stop: 112 | pass 113 | except NameError: 114 | stop = -1 115 | 116 | calledFct = fct[:start] 117 | fct = fct[start+1:stop] 118 | # Now we only have what was in the parenthesis 119 | 120 | # Let's extract the arguments. 121 | 122 | commaPosi = searchValidElement(fct, ',') 123 | 124 | # List of arguments: 125 | args = list() 126 | 127 | for commaNumber in range(len(commaPosi)): 128 | if commaNumber == 0: 129 | args.append(fct[:commaPosi[commaNumber]]) 130 | else: 131 | args.append(fct[commaPosi[commaNumber-1]+1:commaPosi[commaNumber]]) 132 | 133 | # Don't forget the last argument ! 134 | if commaPosi: 135 | args.append(fct[commaPosi[-1]+1:]) 136 | else: 137 | args.append(fct) 138 | 139 | # Now args is a list of all arguments passed to the function 140 | # Example: ['arg1', 'fct(fa1,fa2)', 'arg3 = "12"'] 141 | 142 | # We still need to analyse it to separate arguments' names and their values 143 | # We have to find chars "=" : 144 | 145 | equalsPosi = list() 146 | 147 | for argument in args: 148 | equalsPosi.append(searchValidElement(argument, '=')) 149 | # equalsPosi[argument] should be an empty list or a list containing 150 | # only one integer 151 | 152 | # Finally, let's build the output dict 153 | output = dict() 154 | 155 | for argNumber in range(len(args)): 156 | if equalsPosi[argNumber]: 157 | name = removeBorderSpaces( 158 | args[argNumber][:equalsPosi[argNumber][0]]) 159 | value = removeBorderSpaces( 160 | args[argNumber][equalsPosi[argNumber][0]+1:]) 161 | output[name] = value 162 | else: 163 | output[argNumber] = removeBorderSpaces(args[argNumber]) 164 | 165 | for key in output.keys(): 166 | try: 167 | output[key] = int(output[key]) 168 | except ValueError: 169 | pass 170 | 171 | if output[key] == 'True': 172 | output[key] = True 173 | if output[key] == 'False': 174 | output[key] = False 175 | if output[key] == 'None': 176 | output[key] = None 177 | 178 | return [calledFct, output] 179 | -------------------------------------------------------------------------------- /pytorch_keras_converter/io_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions used when saving a model after a conversion 3 | """ 4 | import os 5 | from time import gmtime, strftime 6 | import platform 7 | 8 | 9 | def formatFilename(filename=None, 10 | directory=None, 11 | useTime=None): 12 | """ 13 | Given filename and directory, formatFilename computes the file to actually 14 | create: if a file with this name already exist it wil generate another name 15 | by adding '_(0)' to filename. 16 | 17 | Arguments: 18 | -filename: 19 | Optionnal. The best name to give to the file. The program will 20 | search for an available name as similar as possible of filename 21 | -directory: 22 | Optionnal. Where to store the file 23 | -useTime (bool): 24 | Use current time in the filename. If set to None and no filename 25 | provided, this will be used 26 | 27 | Returns: 28 | An available filename with his directory (directory/filename) (or 29 | directory\filename if using windows) 30 | """ 31 | 32 | def removeExtension(name): 33 | """ 34 | Function that removes extension in a filename 35 | 36 | Argument: 37 | -name (str): 38 | The filename to analyse 39 | 40 | Returns: 41 | name, without the extension 42 | 43 | For example, if name == 'abc.def.ghi', 44 | removeExtension(name) == 'abc.def' 45 | """ 46 | if '.' in name: 47 | return '.'.join(name.split('.')[:-1]) 48 | 49 | def getExtension(name): 50 | """ 51 | Function that returns the extension of a file 52 | 53 | Argument: 54 | -name (str): 55 | The filename to analyse 56 | 57 | Returns: 58 | The extension of name 59 | 60 | For example, if name == 'abc.def.ghi', 61 | getExtension(name) == '.ghi' 62 | """ 63 | if '.' in name: 64 | return '.' + name.split('.')[-1] 65 | return '' 66 | 67 | # Use current time in filename if necessary: 68 | if filename is None: 69 | if useTime or useTime is None: 70 | currentTime = strftime("%d_%b_%Y_%H_%M_%S", gmtime()) 71 | file = str(currentTime) 72 | else: 73 | file = '' 74 | else: 75 | file = str(filename) 76 | if useTime: 77 | currentTime = strftime("%d_%b_%Y_%H_%M_%S", gmtime()) 78 | file = file + '_' + str(currentTime) 79 | 80 | # Remove border spaces: 81 | while file[-1] == ' ': 82 | file = file[:-1] 83 | while file[0] == ' ': 84 | file = file[1:] 85 | 86 | # Check that directory ends with '/' or '\' if using Windows 87 | if isinstance(directory, str): 88 | if platform.system() == 'Windows': 89 | if not(directory[-1] == '\\'): 90 | directory = directory + '\\' 91 | else: 92 | if not(directory[-1] == '/'): 93 | directory = directory + '/' 94 | file = directory + file 95 | 96 | # Add a number to find an available name 97 | if os.path.isfile(file): 98 | number = 0 99 | numStr = '_(' + str(number) + ')' 100 | newFile = removeExtension(file) + numStr + getExtension(file) 101 | while os.path.isfile(newFile): 102 | number += 1 103 | numStr = '_(' + str(number) + ')' 104 | newFile = removeExtension(file) + numStr + getExtension(file) 105 | file = newFile 106 | 107 | return file 108 | 109 | 110 | def freeze(model): 111 | """ 112 | Function that freezes a Keras model (inplace) 113 | 114 | Useful (sometimes necessary) before saving it in hdf5 format 115 | 116 | Argument: 117 | -model: 118 | A Keras model or layer 119 | """ 120 | if 'layers' in dir(model): 121 | for layer in model.layers: 122 | layer.trainable = False 123 | if 'layers' in dir(layer): 124 | freeze(layer) 125 | -------------------------------------------------------------------------------- /pytorch_keras_converter/tests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Compare 2 models on random data. 3 | 4 | Available comparisons: 5 | - Keras Keras 6 | - Keras PyTorch 7 | - PyTorch PyTorch 8 | - PyTorch Keras 9 | 10 | Useful to check a converted model's reliability 11 | """ 12 | 13 | try: 14 | import tensorflow.keras as keras 15 | import tensorflow 16 | except ImportError: 17 | tensorflow = None 18 | try: 19 | import keras 20 | except ImportError: 21 | keras = None 22 | 23 | # Same for pyTorch: 24 | try: 25 | import torch 26 | except ImportError: 27 | torch = None 28 | 29 | import numpy as np 30 | from .utility.LayerRepresentation import UsedFramework as UsedFrk 31 | from .utility.LayerRepresentation import normalizeShape 32 | from tqdm import tqdm 33 | 34 | 35 | def pyTorchForwardPass(model, torchInput): 36 | """ 37 | Make one forward pass on a PyTorch model (batchsize ==1), remove 38 | batchsize dimension, and convert it to a numpy array. 39 | 40 | Arguments: 41 | -model: 42 | A PyTorch model 43 | -torchInput: 44 | A PyTorch Tensor 45 | 46 | Returns: 47 | A numpy array of raw output data 48 | """ 49 | out = model(torchInput).detach().numpy() 50 | out = np.squeeze(out, axis=0) 51 | return out 52 | 53 | 54 | def kerasForwardPass(model, kerasInput): 55 | """ 56 | Make one forward pass on a Keras model (batchsize ==1) 57 | 58 | Arguments: 59 | -model: 60 | A Keras model 61 | -torchInput: 62 | A Tensorflow Tensor 63 | 64 | Returns: 65 | A numpy array of raw output data 66 | """ 67 | out = model.predict(kerasInput, steps=1) 68 | return out 69 | 70 | 71 | def forwardPass(model, numpyInput): 72 | """ 73 | Make one forward pass on a Keras or PyTorch model (batchsize ==1) 74 | 75 | Arguments: 76 | -model: 77 | A Keras or PyTorch model 78 | -numpyInput: 79 | A numPy array to feed the model 80 | 81 | Returns: 82 | A numpy array of raw output data 83 | 84 | Raises: 85 | -NotImplementedError: 86 | If provided model isn't supported 87 | """ 88 | # Convert numpyInput to PyTorch Tensor: 89 | torchInput = torch.from_numpy(numpyInput) 90 | torchInput = torchInput.type(torch.FloatTensor) 91 | 92 | # Convert numpyInput to Tensorflow Tensor: 93 | if tensorflow is None: 94 | # Using Keras after making import keras 95 | kerasInput = keras.backend.tf.convert_to_tensor(torchInput.numpy()) 96 | else: 97 | # Using Keras after making import tensorflow.keras as keras 98 | kerasInput = tensorflow.convert_to_tensor(torchInput.numpy()) 99 | 100 | if UsedFrk(model) == 'torch': 101 | out = pyTorchForwardPass(model, torchInput) 102 | elif UsedFrk(model) == 'keras': 103 | out = kerasForwardPass(model, kerasInput) 104 | else: 105 | error = "Model {} not recognized".format(str(model)) 106 | raise NotImplementedError(error) 107 | return out 108 | 109 | 110 | def one_random_test(model1, 111 | model2, 112 | input_shape=None, 113 | numpyInput=None, 114 | input_range=None): 115 | """ 116 | This function does one comparison between model1 and model2. 117 | If numpyInput is set to None, model1 and model2 will be testd on random 118 | data 119 | 120 | Arguments: 121 | -model1: 122 | A PyTorch or Keras model 123 | -model2: 124 | A PyTorch or Keras model 125 | -input_shape: 126 | A list, int or tuple of the input shape (without batchsize) 127 | Optionnal if numpyInput provided 128 | -numpyInput: 129 | A numpy array containing the data to test the models on. 130 | Optionnal of input_shape provided 131 | -input_range: 132 | Optionnal. 133 | A list of 2 elements containing max and min values to give as 134 | input to the model. If None, models will be tested on 135 | samples from the "standard normal" distribution. 136 | 137 | Returns: 138 | A tuple of 2 numpy arrays containing raw output data of model1 and 139 | model2 140 | 141 | """ 142 | if torch is None or keras is None: 143 | raise ImportError("pyTorch or Keras unavailable!") 144 | 145 | if numpyInput is None: 146 | # Generate random data: 147 | 148 | # First, normalize input_shape as a tuple: 149 | input_shape = normalizeShape(input_shape) 150 | 151 | if input_range is None: 152 | # Generate random data from the "standard normal" distribution: 153 | numpyInput = np.random.randn(1, *input_shape) 154 | else: 155 | # Generate uniform random data between min(input_range) and 156 | # max(input_range): 157 | r = input_range 158 | randArray = np.random.rand(1, *input_shape) 159 | numpyInput = (randArray * (max(r)-min(r))) + min(r) 160 | 161 | # Make a forward pass for each model: 162 | out1 = forwardPass(model1, numpyInput) 163 | out2 = forwardPass(model2, numpyInput) 164 | 165 | return out1, out2 166 | 167 | 168 | def many_random_tests(model1, 169 | model2, 170 | input_shape, 171 | number=100, 172 | input_range=None): 173 | """ 174 | This function does many comparisons between model1 and model2 on *one* 175 | random Tensor 176 | 177 | Arguments: 178 | -model1: 179 | A PyTorch or Keras model 180 | -model2: 181 | A PyTorch or Keras model 182 | -input_shape: 183 | A list, int or tuple of the input shape (without batchsize) 184 | -number: 185 | Number of tests to perform on models. It should be set to 1 if 186 | models don't have any random behavior, such as Drouput layers 187 | -input_range: 188 | Optionnal. 189 | A list of 2 elements containing max and min values to give as 190 | input to the model. If None, models will be tested on 191 | samples from the "standard normal" distribution. 192 | 193 | Returns: 194 | A tuple of 2 numpy arrays containing means of output data from model1 195 | and model2 196 | 197 | """ 198 | if torch is None or keras is None: 199 | raise ImportError("pyTorch or Keras unavailable!") 200 | 201 | # First, normalize input_shape as a tuple: 202 | input_shape = normalizeShape(input_shape) 203 | 204 | # Lists to store raw output data 205 | testsModel1 = list() 206 | testsModel2 = list() 207 | 208 | if input_range is None: 209 | # Generate random data from the "standard normal" distribution: 210 | numpyInput = np.random.randn(1, *input_shape) 211 | else: 212 | # Generate uniform random data between min(input_range) and 213 | # max(input_range): 214 | r = input_range 215 | randArray = np.random.rand(1, *input_shape) 216 | numpyInput = (randArray * (max(r)-min(r))) + min(r) 217 | 218 | for _ in range(number): 219 | # Perform several tests on the same input array 220 | out1, out2 = one_random_test(model1, 221 | model2, 222 | input_shape, 223 | numpyInput, 224 | input_range=input_range) 225 | testsModel1.append(out1) 226 | testsModel2.append(out2) 227 | 228 | mean1 = np.mean(testsModel1, axis=0) 229 | mean2 = np.mean(testsModel2, axis=0) 230 | 231 | return mean1, mean2 232 | 233 | 234 | def standard_test(model1, 235 | model2, 236 | input_shape, 237 | input_range=None, 238 | numberA=10, 239 | numberB=2, 240 | quiet=False): 241 | """ 242 | This function does many comparisons between model1 and model2 on *several* 243 | random Tensors 244 | 245 | The more numberA and numberB are high, the more accurate the test will be, 246 | but it will become very slower. 247 | In total, we have to make numberA*numberB*2 forward passes ! 248 | 249 | Arguments: 250 | -model1: 251 | A PyTorch or Keras model 252 | -model2: 253 | A PyTorch or Keras model 254 | -input_shape: 255 | A list, int or tuple of the input shape (without batchsize) 256 | -input_range: 257 | Optionnal. 258 | A list of 2 elements containing max and min values to give as 259 | input to the model. If None, models will be tested on 260 | samples from the "standard normal" distribution. 261 | -numberA: 262 | Number of tests to perform on models (i.e number of random input 263 | tensor to generate and test the models on) 264 | -numberB: 265 | Number of tests to perform on models using each random tensor. 266 | It should be set to 1 if models don't have any random behavior, 267 | such as Drouput layers 268 | -quiet (bool): 269 | If a progress bar should appear 270 | 271 | Returns: 272 | A numpy array containing the differences between models for each output 273 | 274 | """ 275 | 276 | if not quiet: 277 | bar = tqdm(total=numberA) 278 | 279 | # First, normalize input_shape as a tuple: 280 | input_shape = normalizeShape(input_shape) 281 | 282 | differences = list() 283 | 284 | for _ in range(numberA): 285 | if not quiet: 286 | bar.update() 287 | 288 | # Perform numberA tests. Each test will generate a random tensor and 289 | # make numberB forward passes on each model 290 | # In total, we have to make numberA*numberB*2 forward passes 291 | out1, out2 = many_random_tests(model1, 292 | model2, 293 | input_shape, 294 | number=numberB, 295 | input_range=input_range) 296 | 297 | # Reshape out1 and out2 in a 1-dimension array. This is not necessary 298 | out1 = np.reshape(out1, -1) 299 | out2 = np.reshape(out2, -1) 300 | 301 | diff = out1 - out2 302 | differences.append(diff) 303 | 304 | if not quiet: 305 | bar.close() 306 | 307 | return np.array(differences) 308 | 309 | 310 | def comparison(model1, model2, input_shape, input_range=None, quiet=True): 311 | """ 312 | This function does a complete comparison between model1 and model2. 313 | 314 | Arguments: 315 | -model1: 316 | A PyTorch or Keras model 317 | -model2: 318 | A PyTorch or Keras model 319 | -input_shape: 320 | A list, int or tuple of the input shape (without batchsize) 321 | -input_range: 322 | Optionnal. 323 | A list of 2 elements containing max and min values to give as 324 | input to the model. If None, models will be tested on 325 | samples from the "standard normal" distribution. 326 | -quiet (bool): 327 | If a progress bar should appear 328 | 329 | Returns: 330 | The mean squared error between the two models 331 | If models are identical, it should be below 1e-10 332 | 333 | """ 334 | 335 | if model1 == model2 and not quiet: 336 | print("Those models are identical twins...") 337 | if model1 is model2: 338 | print("Actually they are the same python object.") 339 | print("\nWe should have a really, really low MSE.") 340 | print("Use Ctrl+C to cnacel") 341 | 342 | # First, normalize input_shape as a tuple: 343 | input_shape = normalizeShape(input_shape) 344 | 345 | differences = standard_test(model1, 346 | model2, 347 | input_shape, 348 | input_range=input_range, 349 | quiet=quiet) 350 | 351 | MSE = float(np.mean(np.square(np.reshape(differences, -1)))) 352 | 353 | if not quiet: 354 | print("\nMean Squared Error: MSE={}".format(round(MSE, 3))) 355 | 356 | return MSE 357 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/LayerRepresentation.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | try: 13 | import graphviz # Useful in DOT graphs creation and export 14 | except ImportError: 15 | graphviz = None 16 | 17 | from . import torch2keras as t2k 18 | 19 | 20 | def UsedFramework(module): 21 | """ 22 | Tells which framework module is based on. 23 | 24 | Arguments: 25 | -module: 26 | The object to test 27 | 28 | Returns: 29 | -'keras' if module is a keras object 30 | -'torch' if module is a pyTorch object 31 | -None if none of them 32 | """ 33 | 34 | moduleClass = str(module.__class__)[8:-2] 35 | IsKeras = 'keras' in moduleClass 36 | IsTorch = 'torch' in moduleClass 37 | if '_backend' in dir(module): 38 | IsTorch = IsTorch or 'torch' in str(module._backend) 39 | 40 | if IsKeras: 41 | return 'keras' 42 | if IsTorch: 43 | return 'torch' 44 | return None 45 | 46 | 47 | def normalizeShape(shape): 48 | """ 49 | Method used to convert shape to tuple 50 | 51 | Arguments: 52 | -shape: 53 | int, tuple, list, or anything convertible to tuple 54 | 55 | Raises: 56 | -TypeError: 57 | If conversion to tuple failed 58 | 59 | Returns: 60 | A tuple representing the shape 61 | """ 62 | if isinstance(shape, tuple): 63 | normalizedShape = shape 64 | elif isinstance(shape, int): 65 | normalizedShape = (shape,) 66 | else: 67 | try: 68 | normalizedShape = tuple(shape) 69 | except TypeError: 70 | # It was too difficult. 71 | # This case will never happen if the API is used correctly 72 | raise TypeError("Could not convert provided shape to tulpe") 73 | return normalizedShape 74 | 75 | 76 | class LayerRepresentation: 77 | """ 78 | Easy-to-use representation of a layer. 79 | 80 | When you create an object of this class, all sub layers (also called 81 | children) are automatically decomposed and referenced in self.children 82 | 83 | This is useful to go through a model considering only simple layers one 84 | by one 85 | 86 | Argument: 87 | -module: 88 | A PyTorch or Keras model or layer to import 89 | """ 90 | 91 | # --- CORE METHODS --- 92 | 93 | def __init__(self, module=None): 94 | 95 | # --- INITIALIZATIONS --- 96 | self.parent = None 97 | # parent represents the block in which the layer is (for example a 98 | # Sequential) 99 | 100 | self.children = list() 101 | # children is a list of direct sub layers (for example if module is a 102 | # Sequential block) 103 | 104 | self.originalFramework = None 105 | # Framework of module ('keras' or 'torch'). Stays None if provided 106 | # model isn't compatible 107 | 108 | self.name = str(module.__class__.__name__) 109 | # name will change if module has been created by recursivity (with 110 | # addChild method, see below for details) 111 | self.type = str(module.__class__.__name__) 112 | self.detailedType = str(module.__class__)[8:-2] 113 | self.detailedTypeList = self.detailedType.split('.') 114 | 115 | self.equivalent = dict() 116 | self.equivalentTxt = dict() 117 | # These are dictionnaries containing equivalents of the layer (or 118 | # model) in PyTorch or Keras. 119 | # self.equivalent['torch'] is a PyTorch object (if exists) 120 | # self.equivalent['keras'] is a Keras object (if exists) 121 | # self.equivalentTxt contains equivalents that can be put directly in a 122 | # python file 123 | 124 | self.input_shape = None 125 | self.output_shape = None 126 | # In/Output shapes, without batchsize, channels first (tuple) 127 | # For example : (3, 299, 299) 128 | 129 | self.InputConnectedTo = set() 130 | # set telling where the input comes from. 131 | # If the input comes from the input of self.parent (for example if the 132 | # layer is the first one in a Sequential block), then 0 will be in this 133 | # set 134 | # If the input comes from the output of a brother (for example if the 135 | # layer is in a Sequential block but not in first place), then 136 | # LayerRepresentation of this brother will be in this set 137 | 138 | self.OutputConnectedTo = set() 139 | # Same, for output. 140 | # set telling where the output goes to. 141 | # If the output goes to the output of self.parent (for example if the 142 | # layer is the last one in a Sequential block), then 0 will be in this 143 | # set 144 | # If the output goes to the input of a brother (for example if the 145 | # layer is in a Sequential block but not at last place), then 146 | # LayerRepresentation of this brother will be in this set 147 | 148 | # Initializing self.framework, self.children and self.equivalent : 149 | 150 | framework = UsedFramework(module) 151 | 152 | if framework == 'torch': 153 | # PyTorch ! 154 | 155 | self.originalFramework = 'torch' 156 | # We don't want to modify the model when converting it 157 | self.equivalent['torch'] = module.eval() 158 | 159 | # listing each sub layers (children) 160 | children = dict(module.named_children()).items() 161 | for name, child in children: 162 | # Hidden recursivity (see addChild method for details) 163 | self.addChild(child, name=name) 164 | 165 | elif framework == 'keras': 166 | # Keras ! 167 | 168 | self.originalFramework = 'keras' 169 | # We don't want to modify the model when converting it 170 | if keras is None: 171 | # But Keras isn't available so we can't clone the model... 172 | self.equivalent['keras'] = module 173 | else: 174 | self.equivalent['keras'] = keras.models.clone_model(module) 175 | 176 | # listing each sub layers (children) 177 | if 'layers' in dir(module): 178 | for child in module.layers: 179 | # Hidden recursivity (see addChild method for details) 180 | self.addChild(child, name=child.name) 181 | 182 | self.kerasOutput = None 183 | self.kerasInput = None 184 | # During a PyTorch to Keras conversion, if Keras Functionnal API is 185 | # used, inputs and outputs are stored here. 186 | 187 | def __setattr__(self, attr, val): 188 | object.__setattr__(self, attr, val) 189 | 190 | if 'kerasInput' in dir(self): 191 | # We have to check this first (if kerasInput isn't in dir(self), it 192 | # means that we're still in self.__init__, we should not interfere 193 | # with it) 194 | 195 | if attr == 'input_shape': 196 | # If input_shape was modified, we check that it's a tuple 197 | if not isinstance(self.input_shape, tuple): 198 | self.input_shape = normalizeShape(self.input_shape) 199 | 200 | elif attr == 'output_shape': 201 | # If output_shape was modified, we check that it's a tuple 202 | if not isinstance(self.output_shape, tuple): 203 | self.output_shape = normalizeShape(self.output_shape) 204 | 205 | elif attr == 'kerasInput': 206 | # If kerasInput was modified, we automatically compute (or 207 | # update) self.equivalent['keras'] or self.kerasOutput, if 208 | # possible 209 | inputExist = self.kerasInput is not None 210 | outputExist = self.kerasOutput is not None 211 | equivExist = 'keras' in self.equivalent.keys() 212 | 213 | if inputExist and equivExist: 214 | # We can compute (update or create) kerasOutput ! 215 | output = self.equivalent['keras'](self.kerasInput) 216 | self.kerasOutput = output 217 | 218 | if inputExist and outputExist and not(equivExist): 219 | # We can compute (create) keras equivalent ! 220 | if keras is None: 221 | err = "Could not import keras. Conversion failed !" 222 | raise ImportError(err) 223 | kerasEq = keras.models.Model(inputs=self.kerasInput, 224 | outputs=self.kerasOutput, 225 | name=self.name) 226 | self.equivalent['keras'] = kerasEq 227 | 228 | if self.kerasInput is not None and self.input_shape is not None: 229 | # Here, we check that input computed with Keras Funcitonnal API 230 | # have the correct shape (i.e self.input_shape). 231 | # If shapes are not the same, it means that the conversion 232 | # failed, and all we have to do is raising a RuntimeError to 233 | # warn the user about it 234 | 235 | shape = self.kerasInputShape() 236 | 237 | if shape is not None and shape != self.input_shape: 238 | err = "Conversion failed! Details: at layer {}, input \ 239 | shape should be {}, but is {}\ 240 | ".format(self.name, self.input_shape, shape) 241 | 242 | raise RuntimeError(err) 243 | 244 | if self.kerasOutput is not None and self.output_shape is not None: 245 | # Here, we check that output computed with Keras Funcitonnal 246 | # API have the correct shape (i.e self.output_shape). 247 | # If shapes are not the same, it means that the conversion 248 | # failed, and all we have to do is raising a RuntimeError to 249 | # warn the user about it 250 | 251 | shape = self.kerasOutputShape() 252 | 253 | if shape is not None and shape != self.output_shape: 254 | err = "Conversion failed! Details: at layer {}, output \ 255 | shape should be {}, but is {}\ 256 | ".format(self.name, self.output_shape, shape) 257 | 258 | raise RuntimeError(err) 259 | 260 | # --- FAMILY METHODS --- 261 | 262 | def __getitem__(self, index): 263 | """ 264 | Equivalent of self.getChild if index is str 265 | 266 | Equivalent of self.getChildId with framework=None if index is int 267 | """ 268 | if isinstance(index, str): 269 | return self.getChild(name=index) 270 | if isinstance(index, int): 271 | return self.getChildId(identifier=index) 272 | return None 273 | 274 | def getChildId(self, identifier=None, framework=None): 275 | """ 276 | If framework is None: search for a layer which id is identifier 277 | Else: search for a layer which framework equivalent's id is identifier 278 | 279 | If no child was found, search in the whole model 280 | Return None if no layer was found at all 281 | 282 | Arguments: 283 | -identifier 284 | -framework 285 | 286 | Returns: 287 | The LayerRepresentation of corresponding layer if this layer exists 288 | """ 289 | if framework is None: 290 | for child in self.children: 291 | if id(child) == identifier: 292 | return child 293 | if id(self) == identifier: 294 | return self 295 | mainParent = self.firstParent() 296 | if id(mainParent) == identifier: 297 | return mainParent 298 | for child in mainParent.allChildren(): 299 | if id(child) == identifier: 300 | return child 301 | return None 302 | 303 | else: 304 | # framework is not None : look for equivalents 305 | 306 | for child in self.children: 307 | # Look at first in self.children 308 | if framework in child.equivalent.keys(): 309 | equiv = child.equivalent[framework] 310 | if id(equiv) == identifier: 311 | return child 312 | 313 | if framework in self.equivalent.keys(): 314 | if id(self.equivalent[framework]) == identifier: 315 | return self 316 | mainParent = self.firstParent() 317 | if framework in mainParent.equivalent.keys(): 318 | if id(mainParent.equivalent[framework]) == identifier: 319 | return mainParent 320 | 321 | for child in mainParent.allChildren(): 322 | # Look in the entire model 323 | if framework in child.equivalent.keys(): 324 | equiv = child.equivalent[framework] 325 | if id(equiv) == identifier: 326 | return child 327 | return None 328 | 329 | def getChild(self, name=None): 330 | """ 331 | Return child which name is name (argument) 332 | Return None if no child was found 333 | """ 334 | for child in self.children: 335 | if child.name == name: 336 | return child 337 | return None 338 | 339 | def addChild(self, childEq, name=None): 340 | """ 341 | Adds a child in self.children 342 | 343 | Arguments: 344 | -childEq: 345 | Keras or PyTorch object representign the layer to add 346 | -name: 347 | Optional name of the child 348 | """ 349 | child = LayerRepresentation(childEq) 350 | child.name = str(name) 351 | 352 | child.parent = self 353 | self.children.append(child) 354 | 355 | return child 356 | 357 | def delChildren(self): 358 | """ 359 | Delete every children 360 | """ 361 | self.children = list() 362 | 363 | def delChild(self, name=None): 364 | """ 365 | Delete one child, identified by his name 366 | """ 367 | if self.getChild(name=name) is not None: 368 | del self.children[self.getChild(name=name)] 369 | 370 | def allChildren(self): 371 | """ 372 | Returns a list of every child contained in self.children, and their 373 | sub-layers (using recursivity) 374 | """ 375 | if not self.children: 376 | return list() 377 | else: 378 | List = self.children 379 | for child in self.children: 380 | List = List + child.allChildren() 381 | return List 382 | 383 | def numberOfChildren(self): 384 | """ 385 | Uses recursivity to compute total number of sub layers (children) 386 | contained in the model 387 | """ 388 | number = len(self.children) 389 | for child in self.children: 390 | number += child.numberOfChildren() 391 | return number 392 | 393 | def completeName(self): 394 | if self.parent is None: 395 | return self.name 396 | return self.parent.completeName() + '_' + self.name 397 | 398 | def firstParent(self): 399 | """ 400 | Returns the main parent of the layer (the layer that contains 401 | everything) 402 | Actually, it's the model the user want to convert 403 | """ 404 | if self.parent is None: 405 | return self 406 | return self.parent.firstParent() 407 | 408 | def connectionsAmongChildren(self, attr, reverse=False): 409 | """ 410 | Describes connections among self's children 411 | 412 | Arguments: 413 | -attr: 414 | One of self.children, or 'IN', or 'OUT' 415 | -reverse (bool): 416 | If reverse == False, then look for connected children after 417 | attr's output 418 | Else, then look for connected children before attr's 419 | input 420 | 421 | Returns: 422 | A set containing children connected to attr, and 'IN' or 'OUT' if 423 | attr is connected to input or output of self 424 | 425 | Examples: 426 | If self is a Sequential and attr == 'IN' and reverse == False 427 | connectionsAmongChildren will return a set containing only the 428 | first layer in self 429 | 430 | If self is a Sequential and attr == 'OUT' and reverse == True 431 | connectionsAmongChildren will return a set containing only the 432 | last layer in self 433 | 434 | If self is a Sequential and attr is the 1st layer and 435 | reverse == False, 436 | connectionsAmongChildren will return a set containing only the 437 | second layer in self 438 | """ 439 | connected = set() 440 | 441 | if isinstance(attr, str): 442 | if (attr != 'IN' or reverse) and not(attr == 'OUT' and reverse): 443 | # There's no layer which output goes to self's input, and 444 | # there's no layer which input comes from self's output 445 | return set() 446 | else: 447 | for child in self.children: 448 | if attr == 'IN' and 0 in child.InputConnectedTo: 449 | connected.add(child) 450 | elif attr == 'OUT' and 0 in child.OutputConnectedTo: 451 | connected.add(child) 452 | return connected 453 | else: 454 | child = attr 455 | 456 | if child not in self.children: 457 | return set() 458 | 459 | if not reverse: 460 | # not reverse : we look where child's output goes 461 | for bro in self.children: 462 | if child is not bro and child in bro.InputConnectedTo: 463 | connected.add(bro) 464 | for Output in child.OutputConnectedTo: 465 | if not Output == 0: 466 | connected.add(Output) 467 | else: 468 | connected.add('OUT') 469 | 470 | elif reverse: 471 | # reverse : we look where child's input comes from 472 | for bro in self.children: 473 | if child is not bro and child in bro.OutputConnectedTo: 474 | connected.add(bro) 475 | for Input in child.OutputConnectedTo: 476 | if not Input == 0: 477 | connected.add(Input) 478 | else: 479 | connected.add('IN') 480 | 481 | return connected 482 | 483 | def connectedChildren(self, attr, reverse=False): 484 | """ 485 | Tells which simple layer(s) is connected to attr. (attr must be one of 486 | self's children) 487 | A simple layer is a layer which doesn't have any sub-layer 488 | 489 | Note: returned layer may not be in self.children, for example if 490 | self's children also have children 491 | 492 | Arguments: 493 | -attr: 494 | One of self.children, or 'IN', or 'OUT' 495 | -reverse (bool): 496 | If reverse == False, then look for connected layers after 497 | attr's output 498 | Else, then look for connected layers before attr's 499 | input 500 | 501 | Returns: 502 | A set containing ismple layers connected to attr, and 'IN' or 'OUT' 503 | if attr is connected to input or output of self 504 | """ 505 | connected = self.connectionsAmongChildren(attr, reverse=reverse) 506 | 507 | connectedSimple = set() 508 | 509 | for layer in connected: 510 | if isinstance(layer, str): 511 | # Should be 'IN' or 'OUT' 512 | if self.parent is None: 513 | # No parent => No brother 514 | connectedSimple.add(layer) 515 | else: 516 | # We have to look which layer is actually connected among 517 | # self's brothers 518 | parent = self.parent 519 | cnctdRecursive = parent.connectedChildren(self, 520 | reverse=reverse) 521 | for simpleLayer in cnctdRecursive: 522 | connectedSimple.add(simpleLayer) 523 | 524 | elif not layer.children: 525 | # We found a simple layer ! 526 | connectedSimple.add(layer) 527 | 528 | elif layer.children: 529 | # Not a simple layer, we have to use recursivity 530 | if reverse: 531 | cnctdRecursive = layer.connectedChildren('OUT', 532 | reverse=reverse) 533 | else: 534 | cnctdRecursive = layer.connectedChildren('IN', 535 | reverse=reverse) 536 | for simpleLayer in cnctdRecursive: 537 | connectedSimple.add(simpleLayer) 538 | 539 | return connectedSimple 540 | 541 | def numberOfEquivalents(self, framework=None, file=False): 542 | """ 543 | Uses recursivity to compute total number of sub layers (children) 544 | contained in the model which have their equivalent in keras or 545 | pyTorch (argument framework) 546 | 547 | Arguments: 548 | -framework: 549 | 'keras' or 'torch' 550 | -file: 551 | If true: look in equivalents AND equivalentsTxt instead of just 552 | equivalent 553 | 554 | Return: 555 | The number of sub layers (children) which keras or torch equivalent 556 | is available 557 | """ 558 | number = 0 559 | for child in self.children: 560 | if not file: 561 | if framework in child.equivalent.keys(): 562 | number += 1 563 | elif file and framework in child.equivalentTxt.keys(): 564 | if framework in child.equivalent.keys(): 565 | number += 1 566 | number += child.numberOfEquivalents(framework=framework) 567 | return number 568 | 569 | def childrenEquivalentsCompleted(self, framework=None, file=False): 570 | """ 571 | Tells if all equivalents are available in self.children 572 | 573 | Arguments: 574 | -framework: 575 | 'torch' or 'keras' or None. The framework to look for. 576 | If None, look for ANY framework 577 | -file: 578 | If true: look in equivalents AND equivalentsTxt instead of just 579 | equivalent 580 | 581 | Returns: 582 | True if all equivalents are available 583 | False if not 584 | """ 585 | for child in self.children: 586 | if framework not in child.equivalent.keys(): 587 | return False 588 | if file and framework not in child.equivalentTxt.keys(): 589 | return False 590 | if (framework is None) and (child.equivalent == {}): 591 | return False 592 | if (framework is None) and file and (child.equivalentTxt == {}): 593 | return False 594 | return True 595 | 596 | def Connect2Layers(self, name0, name1, connectKeras=True): 597 | """ 598 | Connect together 2 layers among self.children : 599 | 600 | Output of name0 goes to input of name1 601 | 602 | Arguments: 603 | -name0 (str) 604 | -name1 (str) 605 | -connectKeras (bool): 606 | If True, also connect kerasOutput to kerasInput. 607 | """ 608 | child0 = self.getChild(name=name0) 609 | child1 = self.getChild(name=name1) 610 | if child0 is None or child1 is None: 611 | return None 612 | child0.OutputConnectedTo.add(child1) 613 | child1.InputConnectedTo.add(child0) 614 | 615 | if connectKeras: 616 | child1.kerasInput = child0.kerasOutput 617 | 618 | def ConnectLayers(self, *names, **kwargs): 619 | """ 620 | Connect together many layers among self.children : 621 | 622 | Output of names[i] goes to input of names[i+1] 623 | 624 | Arguments: 625 | -*names (str): 626 | Children to connect together 627 | -connectKeras (bool): 628 | If True, also connect kerasOutput to kerasInput. 629 | """ 630 | if 'connectKeras' in kwargs.keys(): 631 | connectKeras = kwargs['connectKeras'] 632 | else: 633 | connectKeras = True 634 | 635 | for i in range(len(names)-1): 636 | self.Connect2Layers(names[i], 637 | names[i+1], 638 | connectKeras=connectKeras) 639 | 640 | def ConnectModelInputToChildren(self, *names, **kwargs): 641 | """ 642 | Puts model's input on each child given in argument 643 | 644 | Arguments: 645 | -*names (str): 646 | Children to connect to model's input 647 | -connectKeras (bool): 648 | If True, also connect kerasOutput to kerasInput. 649 | 650 | Raises: 651 | -ImportError: 652 | If keras isn't available 653 | """ 654 | if 'connectKeras' in kwargs.keys(): 655 | connectKeras = kwargs['connectKeras'] 656 | else: 657 | connectKeras = True 658 | 659 | for name in names: 660 | child = self.getChild(name=name) 661 | if child is not None: 662 | child.InputConnectedTo.add(0) 663 | 664 | if connectKeras: 665 | if self.kerasInput is None: 666 | if keras is None: 667 | err = "Could not import keras. Conversion failed !" 668 | raise ImportError(err) 669 | Input = keras.layers.Input(shape=self.input_shape) 670 | self.kerasInput = Input 671 | child.kerasInput = self.kerasInput 672 | 673 | def ConnectChildrenOutputToModel(self, *names, **kwargs): 674 | """ 675 | Concatenate outputs of every child given in argument, and connect it to 676 | model's output 677 | 678 | Arguments: 679 | -*names (str): 680 | Children to connect to model's output 681 | -connectKeras (bool): 682 | If True, also connect generated kerasOutput to model's 683 | kerasOutput 684 | 685 | Raises: 686 | -ImportError: 687 | If keras isn't available 688 | """ 689 | if 'connectKeras' in kwargs.keys(): 690 | connectKeras = kwargs['connectKeras'] 691 | else: 692 | connectKeras = True 693 | 694 | if connectKeras: 695 | kerasOutputs = list() 696 | 697 | for name in names: 698 | child = self.getChild(name=name) 699 | if child is not None: 700 | child.OutputConnectedTo.add(0) 701 | 702 | if connectKeras: 703 | kerasOutputs.append(child.kerasOutput) 704 | 705 | if connectKeras: 706 | if None in kerasOutputs: 707 | return None 708 | elif len(kerasOutputs) == 0: 709 | return None 710 | elif len(kerasOutputs) == 1: 711 | self.kerasOutput = kerasOutputs[0] 712 | else: 713 | cat = keras.layers.concatenate(kerasOutputs, axis=1) 714 | self.kerasOutput = cat 715 | 716 | def ConnectChildrenOutputToChild(self, *names, **kwargs): 717 | """ 718 | Concatenate outputs of every child given in argument and put in on 719 | childName's Input 720 | 721 | Arguments: 722 | -*names (str): 723 | Children to connect to childName's input 724 | -childName (str) 725 | A layer of the model 726 | -connectKeras (bool): 727 | If True, also connect generated kerasOutput to childName's 728 | kerasInput 729 | 730 | Raises: 731 | -ImportError: 732 | If keras isn't available 733 | """ 734 | if 'connectKeras' in kwargs.keys(): 735 | connectKeras = kwargs['connectKeras'] 736 | else: 737 | connectKeras = True 738 | 739 | childName = kwargs['childName'] 740 | 741 | if connectKeras: 742 | kerasOutputs = list() 743 | 744 | child = self.getChild(name=childName) 745 | for i in range(len(names)): 746 | if isinstance(names[i], str): 747 | child_i = self.getChild(name=names[i]) 748 | else: 749 | child_i = child_i 750 | if child_i is not None: 751 | child_i.OutputConnectedTo.add(child) 752 | child.InputConnectedTo.add(child_i) 753 | 754 | if connectKeras: 755 | kerasOutputs.append(child_i.kerasOutput) 756 | 757 | if connectKeras: 758 | if None in kerasOutputs: 759 | return None 760 | elif len(kerasOutputs) == 0: 761 | return None 762 | elif len(kerasOutputs) == 1: 763 | self.kerasOutput = kerasOutputs[0] 764 | else: 765 | cat = keras.layers.concatenate(kerasOutputs, axis=1) 766 | self.getChild(name=childName).kerasInput = cat 767 | 768 | def ConnectChildrenOutputToChildren(self, *names, **kwargs): 769 | """ 770 | Concatenate outputs of every child given in argument and put in on 771 | childrenNames's Input 772 | 773 | Arguments: 774 | -*names (str): 775 | Children to connect to childrenNames's input 776 | -childrenNames (list) 777 | A list layers of the model 778 | -connectKeras (bool): 779 | If True, also connect generated kerasOutput to childrenNames's 780 | kerasInput 781 | 782 | Raises: 783 | -ImportError: 784 | If keras isn't available 785 | """ 786 | if 'connectKeras' in kwargs.keys(): 787 | connectKeras = kwargs['connectKeras'] 788 | else: 789 | connectKeras = True 790 | 791 | childrenNames = kwargs['childrenNames'] 792 | 793 | if isinstance(childrenNames, str): 794 | self.ConnectChildrenOutputToChild(*names, 795 | childName=childrenNames, 796 | connectKeras=connectKeras) 797 | elif isinstance(childrenNames, list): 798 | for child in childrenNames: 799 | self.ConnectChildrenOutputToChild(*names, 800 | childName=child, 801 | connectKeras=connectKeras) 802 | 803 | # --- PYTORCH SPECIFIC METHODS --- 804 | 805 | def isTorchBuiltIn(self): 806 | dT = self.detailedType 807 | return 'torch' in dT and 'torchvision' not in dT 808 | 809 | def isContainer(self): 810 | return ('container' in self.detailedType) 811 | 812 | def isTorchLayer(self): 813 | return self.isTorchBuiltIn() and not self.isContainer() 814 | 815 | def isTorchContainer(self): 816 | return self.isTorchBuiltIn() and self.isContainer() 817 | 818 | # --- KERAS SPECIFIC METHODS --- 819 | 820 | def kerasInputShape(self): 821 | """ 822 | Determine input shape according to self.kerasInput 823 | 824 | Useful to check that PyTorch to Keras conversion doesn't fail 825 | """ 826 | return t2k.kerasShape(self.kerasInput) 827 | 828 | def kerasOutputShape(self): 829 | """ 830 | Determine output shape according to self.kerasOutput 831 | 832 | Useful to check that PyTorch to Keras conversion doesn't fail 833 | """ 834 | return t2k.kerasShape(self.kerasOutput) 835 | 836 | # --- REPRESENTATION METHODS --- 837 | 838 | def DOT(self, shapes=True, debug=False): 839 | """ 840 | Creates a DOT graph of the model 841 | 842 | Arguments: 843 | -shapes (bool): 844 | Show input and output shapes on the graph 845 | -debug (bool): 846 | Show as much information as possible 847 | 848 | Returns: 849 | A graphviz.Digraph object 850 | (or None if graphviz isn't available) 851 | """ 852 | if graphviz is None: 853 | return None 854 | 855 | if debug: 856 | shapes = True 857 | 858 | # Step 1 : create a digraph. In most cases, names are based on id(self) 859 | # to ensure uniqueness 860 | dot = graphviz.Digraph(name='cluster_{}'.format(str(id(self))), 861 | format='svg') 862 | 863 | # Step 2 : give our digraph a label and a color 864 | label = DOTlabel(model=self, 865 | shapes=shapes, 866 | debug=debug, 867 | name=str(self)) 868 | 869 | color = DOTcolor(model=self, 870 | debug=debug) 871 | 872 | dot.attr(label=label, fontsize='12', color=color) 873 | 874 | # Step 3 : add sub layers in the digraph 875 | for child in self.children: 876 | 877 | if not child.children: 878 | # If there aren't any sub children 879 | label = DOTlabel(model=child, 880 | shapes=shapes, 881 | debug=debug, 882 | name=child.name) 883 | 884 | color = DOTcolor(model=child, 885 | debug=debug) 886 | 887 | dot.node(str(id(child)), 888 | label=label, 889 | color=color, 890 | shape='box', 891 | fontsize='11') 892 | else: 893 | # If there are sub children => recursivity 894 | dot.subgraph(child.DOT(shapes=shapes, debug=debug)) 895 | 896 | # Step 4 : if it's the main layer (the whole model) 897 | if self.parent is None: # Main layer (the entire model) 898 | Dot = graphviz.Digraph(name='all', format='svg') 899 | Dot.subgraph(dot) 900 | 901 | connectedIN = self.connectedChildren('IN') 902 | connectedOUT = self.connectedChildren('OUT', reverse=True) 903 | 904 | # Create 'IN' and 'OUT' nodes if necessary 905 | if shapes: 906 | if connectedIN: 907 | Dot.node('IN', label='IN\n'+str(self.input_shape)) 908 | if connectedOUT: 909 | Dot.node('OUT', label='OUT\n'+str(self.output_shape)) 910 | else: 911 | if connectedIN: 912 | Dot.node('IN') 913 | if connectedOUT: 914 | Dot.node('OUT') 915 | 916 | # Add edges between layers to show how they are connected 917 | Dot = createDOTedges(self, Dot, debug=debug) 918 | 919 | return Dot 920 | 921 | return dot 922 | 923 | def progression(self, framework=None): 924 | """ 925 | Returns the fraction of the model (in %) which have an equivalent 926 | available in keras or pyTorch (argument framework) 927 | """ 928 | equivalents = self.numberOfEquivalents(framework=framework) 929 | if framework in self.equivalent.keys(): 930 | equivalents += 1 931 | total = self.numberOfChildren() + 1 932 | 933 | return (equivalents / total) * 100 934 | 935 | def summary(self, niv=0): 936 | """ 937 | Prints a quick summary of the model. Actually, it's just an exhaustive 938 | list of layers contained in the model. 939 | 940 | This method doesn't tell how layers are connected to each other 941 | """ 942 | if niv == 0: 943 | print("\nSummary of {}:\n".format(self.type)) 944 | print('( - ): ' + self.type) 945 | niv = 1 946 | 947 | for child in self.children: 948 | print_str = str() 949 | for _ in range(niv): 950 | # Let's add some indentations: 951 | print_str = print_str + '| ' 952 | print_str = print_str + str(child) 953 | print(print_str) 954 | 955 | child.summary(niv+1) 956 | 957 | def __str__(self): 958 | if self.name == '': 959 | return self.type 960 | return '(' + self.name + '): ' + self.type 961 | 962 | def __repr__(self): 963 | return '<{} at {}>'.format(str(self), str(hex(id(self)))) 964 | 965 | 966 | # --- USEFUL FUNCTIONS FOR DOT CREATIONS --- 967 | 968 | def createDOTedges(model, dot, debug=False): 969 | """ 970 | Function creating edges in a DOT graph showing connections between layers 971 | 972 | Arguments: 973 | -model: 974 | the LayerRepresentation associated with the graph 975 | -dot: 976 | The dot graph, without edges 977 | -debug (bool): 978 | Show as muchinformation as possible 979 | 980 | Returns: 981 | A dot graph, with edges 982 | """ 983 | Dot = dot.copy() 984 | 985 | for child in model.allChildren(): 986 | if not child.children: 987 | connected = child.parent.connectedChildren(child) 988 | 989 | for connectedLayer in connected: 990 | kwargs = dict() 991 | 992 | if debug: 993 | kwargs['label'] = str(child.kerasOutputShape()) 994 | kwargs['fontsize'] = '10' 995 | 996 | edgeBegin = str(id(child)) 997 | 998 | if child.kerasOutput is None and debug: 999 | kwargs['style'] = 'dashed' 1000 | 1001 | if connectedLayer == 'OUT': 1002 | edgeEnd = 'OUT' 1003 | 1004 | else: 1005 | edgeEnd = str(id(connectedLayer)) 1006 | 1007 | if connectedLayer.kerasInput is None and debug: 1008 | kwargs['style'] = 'dashed' 1009 | 1010 | Dot.edge(edgeBegin, 1011 | edgeEnd, 1012 | **kwargs) 1013 | 1014 | connectedIN = model.connectedChildren('IN') 1015 | 1016 | for layer in connectedIN: 1017 | kwargs = dict() 1018 | 1019 | if debug: 1020 | kwargs['label'] = str(model.kerasInputShape()) 1021 | kwargs['fontsize'] = '10' 1022 | 1023 | edgeBegin = 'IN' 1024 | 1025 | if layer == 'OUT': 1026 | edgeEnd = 'OUT' 1027 | else: 1028 | edgeEnd = str(id(layer)) 1029 | 1030 | if model.kerasInput is None and debug: 1031 | kwargs['style'] = 'dashed' 1032 | 1033 | Dot.edge(edgeBegin, 1034 | edgeEnd, 1035 | **kwargs) 1036 | return Dot 1037 | 1038 | 1039 | def DOTlabel(model, shapes, debug, name): 1040 | """ 1041 | Function creating labels for dot graphs and nodes 1042 | """ 1043 | if debug: 1044 | # We indicate if Keras Input/Output is available 1045 | if model.kerasInput is None: 1046 | inputState = ' (Keras-Not Computed)' 1047 | else: 1048 | inputState = ' (Keras-Computed)' 1049 | if model.kerasOutput is None: 1050 | outputState = ' (Keras-Not Computed)' 1051 | else: 1052 | outputState = ' (Keras-Computed)' 1053 | 1054 | inputStr = str(model.input_shape) + inputState + '\n' 1055 | outputStr = '\n' + str(model.output_shape) + outputState 1056 | 1057 | elif shapes: 1058 | # We just indicate the shapes 1059 | inputStr = str(model.input_shape) + '\n' 1060 | outputStr = '\n' + str(model.output_shape) 1061 | 1062 | else: 1063 | inputStr = '' 1064 | outputStr = '' 1065 | 1066 | label = inputStr + name + outputStr 1067 | return label 1068 | 1069 | 1070 | def DOTcolor(model, debug): 1071 | """ 1072 | Function creating colors for dot graphs and nodes 1073 | """ 1074 | if debug: 1075 | availableEquiv = model.equivalent.keys() 1076 | if 'keras' in availableEquiv and 'torch' in availableEquiv: 1077 | color = 'green' 1078 | else: 1079 | color = 'red' 1080 | else: 1081 | color = 'black' 1082 | 1083 | return color 1084 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import * 2 | from .LayerRepresentation import * 3 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/converting_layers.py: -------------------------------------------------------------------------------- 1 | from . import t2k_equivalents as t2k 2 | 3 | try: 4 | import tensorflow.keras as keras 5 | except ImportError: 6 | try: 7 | import keras 8 | except ImportError: 9 | keras = None 10 | try: 11 | import torch 12 | except ImportError: 13 | torch = None 14 | 15 | 16 | def spreadSignal(model): 17 | """ 18 | Converts Cadene models using Keras Functionnal API. 19 | 20 | This function creates inputs (keras.layers.Input) on first layers of each 21 | block, and 'connects' layers together 22 | 23 | A connection between layer0 and layer1 means that output of layer0 goes to 24 | input of layer1. 25 | 26 | Argument: 27 | -model: 28 | The LayerRepresentation of the layer or model to convert 29 | """ 30 | 31 | if model.firstParent().type == 'InceptionV4': 32 | # Actually it will raise an exception because of: 33 | # nn.AvgPool2d(count_include_pad=False) 34 | t2k.cadene.InceptionV4.spreadSignal(model) 35 | 36 | elif model.firstParent().type == 'BNInception': 37 | t2k.cadene.BNInception.spreadSignal(model) 38 | 39 | elif model.firstParent().type == 'SENet': 40 | t2k.cadene.SENet.spreadSignal(model) 41 | 42 | elif model.firstParent().type == 'ResNet': 43 | t2k.cadene.ResNet.spreadSignal(model) 44 | 45 | elif model.firstParent().type == 'FBResNet': 46 | t2k.cadene.FBResNet.spreadSignal(model) 47 | 48 | elif model.type == 'Sequential': 49 | model.ConnectModelInputToChildren('0') 50 | for i in range(len(model.children)-1): 51 | model.Connect2Layers(str(i), str(i+1)) 52 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 53 | 54 | else: 55 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 56 | raise NotImplementedError(err) 57 | 58 | 59 | def torch2kerasEquivalent(model, file=False, weights=True): 60 | """ 61 | Converts a pytorch native layer or container into a keras layer 62 | All children must have their equivalents already computed 63 | 64 | Arguments: 65 | -model: 66 | A LayerRepresentation object of the layer to convert 67 | -file (bool): 68 | If we want to write the equivalent in a python file 69 | -weights (bool): 70 | Also convert weights 71 | 72 | Raises: 73 | -ImportError: 74 | If Keras isn't available 75 | -NotImplementedError: 76 | If the given layer isn't supported yet 77 | 78 | Returns: 79 | Keras equivalent. 80 | If file is True, returns as a str to put in a python file 81 | Else, return the keras layer 82 | """ 83 | if keras is None: 84 | raise ImportError("Could not import keras. Conversion failed !") 85 | 86 | if model.detailedType == 'torch.nn.modules.container.Sequential': 87 | return t2k.Sequential(model, file=file) 88 | 89 | if model.detailedType == 'torch.nn.modules.conv.Conv2d': 90 | return t2k.Conv2d(model, file=file, weights=weights) 91 | 92 | if model.detailedType == 'torch.nn.modules.activation.ReLU': 93 | return t2k.ReLU(model, file=file) 94 | 95 | if model.detailedType == 'torch.nn.modules.activation.Sigmoid': 96 | return t2k.Sigmoid(model, file=file) 97 | 98 | if model.detailedType == 'torch.nn.modules.batchnorm.BatchNorm2d': 99 | return t2k.BatchNorm2d(model, file=file, weights=weights) 100 | 101 | if model.detailedType == 'torch.nn.modules.dropout.Dropout': 102 | return t2k.Dropout(model, file=file) 103 | 104 | if model.detailedType == 'torch.nn.modules.linear.Linear': 105 | return t2k.Linear(model, file=file, weights=weights) 106 | 107 | if model.detailedType == 'torch.nn.modules.padding.ZeroPad2d': 108 | return t2k.ZeroPad2d(model, file=file) 109 | 110 | if model.detailedType == 'torch.nn.modules.pooling.AdaptiveAvgPool2d': 111 | return t2k.AdaptiveAvgPool2d(model, file=file) 112 | 113 | if model.detailedType == 'torch.nn.modules.pooling.MaxPool2d': 114 | return t2k.MaxPool2d(model, file=file) 115 | 116 | if model.detailedType == 'torch.nn.modules.pooling.AvgPool2d': 117 | return t2k.AvgPool2d(model, file=file) 118 | 119 | err = "Layers of type {} aren't implemented yet".format(model.detailedType) 120 | raise NotImplementedError(err) 121 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/core.py: -------------------------------------------------------------------------------- 1 | from .torch2keras import convert_torch2keras 2 | from .torch2keras import convert_torch2keras_file 3 | from .LayerRepresentation import normalizeShape 4 | 5 | 6 | def convert_to_file(model, framework): 7 | """ 8 | Converts a models between PyTorch and Keras 9 | 10 | Arguments: 11 | -model: 12 | the model to convert 13 | -framework: 14 | the framework created python file should use 15 | 16 | Returns: 17 | the model (LayerRepresentation) 18 | 19 | Raises: 20 | -NotImplementedError: 21 | if the conversion isn't supported yet 22 | """ 23 | 24 | if model.originalFramework == 'keras' and framework == 'keras': 25 | error = "Exporting a existing keras model to a Keras file isn't \ 26 | supported yet" 27 | raise NotImplementedError(error) 28 | 29 | if model.originalFramework == 'keras' and framework == 'torch': 30 | error = "Conversions from keras to pytorch aren't supported yet" 31 | raise NotImplementedError(error) 32 | 33 | if model.originalFramework == 'torch' and framework == 'torch': 34 | error = "Exporting a existing pyTorch model to a pyTorch file isn't \ 35 | supported yet" 36 | raise NotImplementedError(error) 37 | 38 | return convert_torch2keras_file(model) 39 | 40 | 41 | def convert(model, input_size=None, weights=True, quiet=True): 42 | """ 43 | Converts a models between PyTorch and Keras 44 | 45 | Arguments: 46 | -model: 47 | the model to convert 48 | -input_size: 49 | int, list, or tuple. 50 | Optionnal if the model is very simple 51 | -weights (bool): 52 | Automatically convert weights 53 | -quiet (bool): 54 | If a progress bar should appear 55 | 56 | Returns: 57 | the model (LayerRepresentation) 58 | 59 | Raises: 60 | -NotImplementedError: 61 | if the model isn't supported yet 62 | -ValueError: 63 | if trying to convert from PyTorch to Keras without specifying 64 | input shape 65 | """ 66 | 67 | if model.originalFramework == 'keras': 68 | error = "Conversions from keras to pytorch aren't supported yet" 69 | raise NotImplementedError(error) 70 | 71 | if model.originalFramework == 'torch': 72 | if input_size is None: 73 | raise ValueError("input_size is necessary to convert a model") 74 | 75 | input_size = normalizeShape(input_size) 76 | 77 | return convert_torch2keras(model, 78 | input_size=input_size, 79 | weights=weights, 80 | quiet=quiet) 81 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/__init__.py: -------------------------------------------------------------------------------- 1 | from .linear import * 2 | from .conv import * 3 | from .activation import * 4 | from .pooling import * 5 | from .batchnorm import * 6 | from .container import * 7 | from .padding import * 8 | from .dropout import * 9 | from . import cadene 10 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/activation.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def ReLU(model, file=False): 11 | """ 12 | Converts a torch.nn.ReLU layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer ReLU to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | 20 | Raises: 21 | -ImportError: 22 | If Keras import failed 23 | 24 | Returns: 25 | Keras equivalent. 26 | If file is True, returns as a str to put in a python file 27 | Else, return the keras layer 28 | """ 29 | if keras is None: 30 | raise ImportError("Could not import keras. Conversion failed !") 31 | 32 | name = model.completeName() 33 | 34 | arguments = {'input_shape': model.input_shape, 35 | 'max_value': None, 36 | 'negative_slope': 0.0, 37 | 'threshold': 0.0, 38 | 'name': name} 39 | 40 | if not file: 41 | kerasLayer = keras.layers.ReLU(**arguments) 42 | return kerasLayer 43 | else: 44 | outstr = 'keras.layers.ReLU(' 45 | for arg, val in arguments.items(): 46 | outstr = outstr + arg + '=' + str(val) + ', ' 47 | outstr = outstr[:-2] + ')' 48 | return outstr 49 | 50 | 51 | def Sigmoid(model, file=False): 52 | """ 53 | Converts a torch.nn.Sigmoid layer 54 | 55 | Arguments: 56 | -model: 57 | A LayerRepresentation object of the layer Sigmoid to convert 58 | -file (bool): 59 | If we want to write the equivalent in a python file 60 | 61 | Raises: 62 | -ImportError: 63 | If Keras import failed 64 | 65 | Returns: 66 | Keras equivalent. 67 | If file is True, returns as a str to put in a python file 68 | Else, return the keras layer 69 | """ 70 | if keras is None: 71 | raise ImportError("Could not import keras. Conversion failed !") 72 | 73 | name = model.completeName() 74 | 75 | arguments = {'activation': 'sigmoid', 76 | 'input_shape': model.input_shape, 77 | 'name': name} 78 | 79 | if not file: 80 | kerasLayer = keras.layers.Activation(**arguments) 81 | return kerasLayer 82 | else: 83 | outstr = 'keras.layers.Activation(' 84 | for arg, val in arguments.items(): 85 | outstr = outstr + arg + '=' + str(val) + ', ' 86 | outstr = outstr[:-2] + ')' 87 | return outstr 88 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/batchnorm.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def BatchNorm2d(model, file=False, weights=True): 11 | """ 12 | Converts a torch.nn.BatchNorm2d layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer BatchNorm2d to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | -weights (bool): 20 | Also convert weights 21 | 22 | Raises: 23 | -ImportError: 24 | If Keras import failed 25 | 26 | Returns: 27 | Keras equivalent. 28 | If file is True, returns as a str to put in a python file 29 | Else, return the keras layer 30 | """ 31 | if keras is None: 32 | raise ImportError("Could not import keras. Conversion failed !") 33 | 34 | pytorchLayer = model.equivalent['torch'] 35 | name = model.completeName() 36 | 37 | argumentsBatchNorm = {'axis': 1, 38 | 'momentum': pytorchLayer.momentum, 39 | 'epsilon': pytorchLayer.eps, 40 | 'center': pytorchLayer.affine, 41 | 'scale': pytorchLayer.affine, 42 | 'input_shape': model.input_shape, 43 | 'name': name} 44 | 45 | if weights: 46 | parameters = dict() 47 | for key, val in dict(pytorchLayer.state_dict()).items(): 48 | # Convert every parameter Tensor to a numpy array 49 | parameters[key] = val.detach().numpy() 50 | 51 | # List of [weight, bias, running_mean, running_var] 52 | paramList = [parameters['weight'], 53 | parameters['bias'], 54 | parameters['running_mean'], 55 | parameters['running_var']] 56 | 57 | if not file: 58 | BatchNormLayer = keras.layers.BatchNormalization(**argumentsBatchNorm) 59 | 60 | kerasLayer = keras.Sequential() 61 | kerasLayer.add(BatchNormLayer) 62 | 63 | if weights: 64 | kerasLayer.layers[0].set_weights(paramList) 65 | 66 | return kerasLayer 67 | else: 68 | outstr = 'keras.layers.BatchNormalization(' 69 | for arg, val in argumentsBatchNorm.items(): 70 | outstr = outstr + arg + '=' + str(val) + ', ' 71 | outstr = outstr[:-2] + ')' 72 | return outstr 73 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/BNInception.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | 13 | from ... import torch2keras as t2k 14 | 15 | 16 | def spreadSignal(model): 17 | 18 | def ConcatenateOutputs(*names): 19 | model.ConnectChildrenOutputToModel(*names, connectKeras=False) 20 | outputs = list() 21 | for i in range(len(names)): 22 | if not isinstance(names[i], str): 23 | outputs.append(names[i]) 24 | elif Output(names[i]) is None: 25 | return None 26 | else: 27 | outputs.append(Output(names[i])) 28 | return keras.layers.concatenate(outputs, axis=1) 29 | 30 | def Output(child): 31 | if isinstance(child, str) and model.getChild(name=child) is not None: 32 | return model.getChild(name=child).kerasOutput 33 | elif child in model.children: 34 | return child.kerasOutput 35 | return None 36 | 37 | if model.type == 'BNInception': 38 | model.ConnectModelInputToChildren('conv1_7x7_s2') 39 | model.ConnectLayers('conv1_7x7_s2', 40 | 'conv1_7x7_s2_bn', 41 | 'conv1_relu_7x7', 42 | 'pool1_3x3_s2', 43 | 'conv2_3x3_reduce', 44 | 'conv2_3x3_reduce_bn', 45 | 'conv2_relu_3x3_reduce', 46 | 'conv2_3x3', 47 | 'conv2_3x3_bn', 48 | 'conv2_relu_3x3', 49 | 'pool2_3x3_s2') 50 | 51 | model.ConnectLayers('pool2_3x3_s2', 'inception_3a_1x1') 52 | model.ConnectLayers('pool2_3x3_s2', 'inception_3a_3x3_reduce') 53 | model.ConnectLayers('pool2_3x3_s2', 'inception_3a_double_3x3_reduce') 54 | model.ConnectLayers('pool2_3x3_s2', 'inception_3a_pool') 55 | 56 | model.ConnectLayers('inception_3a_1x1', 57 | 'inception_3a_1x1_bn', 58 | 'inception_3a_relu_1x1') 59 | 60 | model.ConnectLayers('inception_3a_3x3_reduce', 61 | 'inception_3a_3x3_reduce_bn', 62 | 'inception_3a_relu_3x3_reduce', 63 | 'inception_3a_3x3', 64 | 'inception_3a_3x3_bn', 65 | 'inception_3a_relu_3x3') 66 | 67 | model.ConnectLayers('inception_3a_double_3x3_reduce', 68 | 'inception_3a_double_3x3_reduce_bn', 69 | 'inception_3a_relu_double_3x3_reduce', 70 | 'inception_3a_double_3x3_1', 71 | 'inception_3a_double_3x3_1_bn', 72 | 'inception_3a_relu_double_3x3_1', 73 | 'inception_3a_double_3x3_2', 74 | 'inception_3a_double_3x3_2_bn', 75 | 'inception_3a_relu_double_3x3_2') 76 | 77 | model.ConnectLayers('inception_3a_pool', 78 | 'inception_3a_pool_proj', 79 | 'inception_3a_pool_proj_bn', 80 | 'inception_3a_relu_pool_proj') 81 | 82 | childrenNames = ['inception_3b_1x1', 83 | 'inception_3b_3x3_reduce', 84 | 'inception_3b_double_3x3_reduce', 85 | 'inception_3b_pool'] 86 | 87 | model.ConnectChildrenOutputToChildren('inception_3a_relu_1x1', 88 | 'inception_3a_relu_3x3', 89 | 'inception_3a_relu_double_3x3_2', 90 | 'inception_3a_relu_pool_proj', 91 | childrenNames=childrenNames) 92 | 93 | model.ConnectLayers('inception_3b_1x1', 94 | 'inception_3b_1x1_bn', 95 | 'inception_3b_relu_1x1') 96 | 97 | model.ConnectLayers('inception_3b_3x3_reduce', 98 | 'inception_3b_3x3_reduce_bn', 99 | 'inception_3b_relu_3x3_reduce', 100 | 'inception_3b_3x3', 101 | 'inception_3b_3x3_bn', 102 | 'inception_3b_relu_3x3') 103 | 104 | model.ConnectLayers('inception_3b_double_3x3_reduce', 105 | 'inception_3b_double_3x3_reduce_bn', 106 | 'inception_3b_relu_double_3x3_reduce', 107 | 'inception_3b_double_3x3_1', 108 | 'inception_3b_double_3x3_1_bn', 109 | 'inception_3b_relu_double_3x3_1', 110 | 'inception_3b_double_3x3_2', 111 | 'inception_3b_double_3x3_2_bn', 112 | 'inception_3b_relu_double_3x3_2') 113 | 114 | model.ConnectLayers('inception_3b_pool', 115 | 'inception_3b_pool_proj', 116 | 'inception_3b_pool_proj_bn', 117 | 'inception_3b_relu_pool_proj') 118 | 119 | childrenNames = ['inception_3c_3x3_reduce', 120 | 'inception_3c_double_3x3_reduce', 121 | 'inception_3c_pool'] 122 | 123 | model.ConnectChildrenOutputToChildren('inception_3b_relu_1x1', 124 | 'inception_3b_relu_3x3', 125 | 'inception_3b_relu_double_3x3_2', 126 | 'inception_3b_relu_pool_proj', 127 | childrenNames=childrenNames) 128 | 129 | model.ConnectLayers('inception_3c_3x3_reduce', 130 | 'inception_3c_3x3_reduce_bn', 131 | 'inception_3c_relu_3x3_reduce', 132 | 'inception_3c_3x3', 133 | 'inception_3c_3x3_bn', 134 | 'inception_3c_relu_3x3') 135 | 136 | model.ConnectLayers('inception_3c_double_3x3_reduce', 137 | 'inception_3c_double_3x3_reduce_bn', 138 | 'inception_3c_relu_double_3x3_reduce', 139 | 'inception_3c_double_3x3_1', 140 | 'inception_3c_double_3x3_1_bn', 141 | 'inception_3c_relu_double_3x3_1', 142 | 'inception_3c_double_3x3_2', 143 | 'inception_3c_double_3x3_2_bn', 144 | 'inception_3c_relu_double_3x3_2') 145 | 146 | childrenNames = ['inception_4a_1x1', 147 | 'inception_4a_3x3_reduce', 148 | 'inception_4a_double_3x3_reduce', 149 | 'inception_4a_pool'] 150 | 151 | model.ConnectChildrenOutputToChildren('inception_3c_relu_3x3', 152 | 'inception_3c_relu_double_3x3_2', 153 | 'inception_3c_pool', 154 | childrenNames=childrenNames) 155 | 156 | model.ConnectLayers('inception_4a_1x1', 157 | 'inception_4a_1x1_bn', 158 | 'inception_4a_relu_1x1') 159 | 160 | model.ConnectLayers('inception_4a_3x3_reduce', 161 | 'inception_4a_3x3_reduce_bn', 162 | 'inception_4a_relu_3x3_reduce', 163 | 'inception_4a_3x3', 164 | 'inception_4a_3x3_bn', 165 | 'inception_4a_relu_3x3') 166 | 167 | model.ConnectLayers('inception_4a_double_3x3_reduce', 168 | 'inception_4a_double_3x3_reduce_bn', 169 | 'inception_4a_relu_double_3x3_reduce', 170 | 'inception_4a_double_3x3_1', 171 | 'inception_4a_double_3x3_1_bn', 172 | 'inception_4a_relu_double_3x3_1', 173 | 'inception_4a_double_3x3_2', 174 | 'inception_4a_double_3x3_2_bn', 175 | 'inception_4a_relu_double_3x3_2') 176 | 177 | model.ConnectLayers('inception_4a_pool', 178 | 'inception_4a_pool_proj', 179 | 'inception_4a_pool_proj_bn', 180 | 'inception_4a_relu_pool_proj') 181 | 182 | childrenNames = ['inception_4b_1x1', 183 | 'inception_4b_3x3_reduce', 184 | 'inception_4b_double_3x3_reduce', 185 | 'inception_4b_pool'] 186 | 187 | model.ConnectChildrenOutputToChildren('inception_4a_relu_1x1', 188 | 'inception_4a_relu_3x3', 189 | 'inception_4a_relu_double_3x3_2', 190 | 'inception_4a_relu_pool_proj', 191 | childrenNames=childrenNames) 192 | 193 | model.ConnectLayers('inception_4b_1x1', 194 | 'inception_4b_1x1_bn', 195 | 'inception_4b_relu_1x1') 196 | 197 | model.ConnectLayers('inception_4b_3x3_reduce', 198 | 'inception_4b_3x3_reduce_bn', 199 | 'inception_4b_relu_3x3_reduce', 200 | 'inception_4b_3x3', 201 | 'inception_4b_3x3_bn', 202 | 'inception_4b_relu_3x3') 203 | 204 | model.ConnectLayers('inception_4b_double_3x3_reduce', 205 | 'inception_4b_double_3x3_reduce_bn', 206 | 'inception_4b_relu_double_3x3_reduce', 207 | 'inception_4b_double_3x3_1', 208 | 'inception_4b_double_3x3_1_bn', 209 | 'inception_4b_relu_double_3x3_1', 210 | 'inception_4b_double_3x3_2', 211 | 'inception_4b_double_3x3_2_bn', 212 | 'inception_4b_relu_double_3x3_2') 213 | 214 | model.ConnectLayers('inception_4b_pool', 215 | 'inception_4b_pool_proj', 216 | 'inception_4b_pool_proj_bn', 217 | 'inception_4b_relu_pool_proj') 218 | 219 | childrenNames = ['inception_4c_1x1', 220 | 'inception_4c_3x3_reduce', 221 | 'inception_4c_double_3x3_reduce', 222 | 'inception_4c_pool'] 223 | 224 | model.ConnectChildrenOutputToChildren('inception_4b_relu_1x1', 225 | 'inception_4b_relu_3x3', 226 | 'inception_4b_relu_double_3x3_2', 227 | 'inception_4b_relu_pool_proj', 228 | childrenNames=childrenNames) 229 | 230 | model.ConnectLayers('inception_4c_1x1', 231 | 'inception_4c_1x1_bn', 232 | 'inception_4c_relu_1x1') 233 | 234 | model.ConnectLayers('inception_4c_3x3_reduce', 235 | 'inception_4c_3x3_reduce_bn', 236 | 'inception_4c_relu_3x3_reduce', 237 | 'inception_4c_3x3', 238 | 'inception_4c_3x3_bn', 239 | 'inception_4c_relu_3x3') 240 | 241 | model.ConnectLayers('inception_4c_double_3x3_reduce', 242 | 'inception_4c_double_3x3_reduce_bn', 243 | 'inception_4c_relu_double_3x3_reduce', 244 | 'inception_4c_double_3x3_1', 245 | 'inception_4c_double_3x3_1_bn', 246 | 'inception_4c_relu_double_3x3_1', 247 | 'inception_4c_double_3x3_2', 248 | 'inception_4c_double_3x3_2_bn', 249 | 'inception_4c_relu_double_3x3_2') 250 | 251 | model.ConnectLayers('inception_4c_pool', 252 | 'inception_4c_pool_proj', 253 | 'inception_4c_pool_proj_bn', 254 | 'inception_4c_relu_pool_proj') 255 | 256 | childrenNames = ['inception_4d_1x1', 257 | 'inception_4d_3x3_reduce', 258 | 'inception_4d_double_3x3_reduce', 259 | 'inception_4d_pool'] 260 | 261 | model.ConnectChildrenOutputToChildren('inception_4c_relu_1x1', 262 | 'inception_4c_relu_3x3', 263 | 'inception_4c_relu_double_3x3_2', 264 | 'inception_4c_relu_pool_proj', 265 | childrenNames=childrenNames) 266 | 267 | model.ConnectLayers('inception_4d_1x1', 268 | 'inception_4d_1x1_bn', 269 | 'inception_4d_relu_1x1') 270 | 271 | model.ConnectLayers('inception_4d_3x3_reduce', 272 | 'inception_4d_3x3_reduce_bn', 273 | 'inception_4d_relu_3x3_reduce', 274 | 'inception_4d_3x3', 275 | 'inception_4d_3x3_bn', 276 | 'inception_4d_relu_3x3') 277 | 278 | model.ConnectLayers('inception_4d_double_3x3_reduce', 279 | 'inception_4d_double_3x3_reduce_bn', 280 | 'inception_4d_relu_double_3x3_reduce', 281 | 'inception_4d_double_3x3_1', 282 | 'inception_4d_double_3x3_1_bn', 283 | 'inception_4d_relu_double_3x3_1', 284 | 'inception_4d_double_3x3_2', 285 | 'inception_4d_double_3x3_2_bn', 286 | 'inception_4d_relu_double_3x3_2') 287 | 288 | model.ConnectLayers('inception_4d_pool', 289 | 'inception_4d_pool_proj', 290 | 'inception_4d_pool_proj_bn', 291 | 'inception_4d_relu_pool_proj') 292 | 293 | childrenNames = ['inception_4e_3x3_reduce', 294 | 'inception_4e_double_3x3_reduce', 295 | 'inception_4e_pool'] 296 | 297 | model.ConnectChildrenOutputToChildren('inception_4d_relu_1x1', 298 | 'inception_4d_relu_3x3', 299 | 'inception_4d_relu_double_3x3_2', 300 | 'inception_4d_relu_pool_proj', 301 | childrenNames=childrenNames) 302 | 303 | model.ConnectLayers('inception_4e_3x3_reduce', 304 | 'inception_4e_3x3_reduce_bn', 305 | 'inception_4e_relu_3x3_reduce', 306 | 'inception_4e_3x3', 307 | 'inception_4e_3x3_bn', 308 | 'inception_4e_relu_3x3') 309 | 310 | model.ConnectLayers('inception_4e_double_3x3_reduce', 311 | 'inception_4e_double_3x3_reduce_bn', 312 | 'inception_4e_relu_double_3x3_reduce', 313 | 'inception_4e_double_3x3_1', 314 | 'inception_4e_double_3x3_1_bn', 315 | 'inception_4e_relu_double_3x3_1', 316 | 'inception_4e_double_3x3_2', 317 | 'inception_4e_double_3x3_2_bn', 318 | 'inception_4e_relu_double_3x3_2') 319 | 320 | childrenNames = ['inception_5a_1x1', 321 | 'inception_5a_3x3_reduce', 322 | 'inception_5a_double_3x3_reduce', 323 | 'inception_5a_pool'] 324 | 325 | model.ConnectChildrenOutputToChildren('inception_4e_relu_3x3', 326 | 'inception_4e_relu_double_3x3_2', 327 | 'inception_4e_pool', 328 | childrenNames=childrenNames) 329 | 330 | model.ConnectLayers('inception_5a_1x1', 331 | 'inception_5a_1x1_bn', 332 | 'inception_5a_relu_1x1') 333 | 334 | model.ConnectLayers('inception_5a_3x3_reduce', 335 | 'inception_5a_3x3_reduce_bn', 336 | 'inception_5a_relu_3x3_reduce', 337 | 'inception_5a_3x3', 338 | 'inception_5a_3x3_bn', 339 | 'inception_5a_relu_3x3') 340 | 341 | model.ConnectLayers('inception_5a_double_3x3_reduce', 342 | 'inception_5a_double_3x3_reduce_bn', 343 | 'inception_5a_relu_double_3x3_reduce', 344 | 'inception_5a_double_3x3_1', 345 | 'inception_5a_double_3x3_1_bn', 346 | 'inception_5a_relu_double_3x3_1', 347 | 'inception_5a_double_3x3_2', 348 | 'inception_5a_double_3x3_2_bn', 349 | 'inception_5a_relu_double_3x3_2') 350 | 351 | model.ConnectLayers('inception_5a_pool', 352 | 'inception_5a_pool_proj', 353 | 'inception_5a_pool_proj_bn', 354 | 'inception_5a_relu_pool_proj') 355 | 356 | childrenNames = ['inception_5b_1x1', 357 | 'inception_5b_3x3_reduce', 358 | 'inception_5b_double_3x3_reduce', 359 | 'inception_5b_pool'] 360 | 361 | model.ConnectChildrenOutputToChildren('inception_5a_relu_1x1', 362 | 'inception_5a_relu_3x3', 363 | 'inception_5a_relu_double_3x3_2', 364 | 'inception_5a_relu_pool_proj', 365 | childrenNames=childrenNames) 366 | 367 | model.ConnectLayers('inception_5b_1x1', 368 | 'inception_5b_1x1_bn', 369 | 'inception_5b_relu_1x1') 370 | 371 | model.ConnectLayers('inception_5b_3x3_reduce', 372 | 'inception_5b_3x3_reduce_bn', 373 | 'inception_5b_relu_3x3_reduce', 374 | 'inception_5b_3x3', 375 | 'inception_5b_3x3_bn', 376 | 'inception_5b_relu_3x3') 377 | 378 | model.ConnectLayers('inception_5b_double_3x3_reduce', 379 | 'inception_5b_double_3x3_reduce_bn', 380 | 'inception_5b_relu_double_3x3_reduce', 381 | 'inception_5b_double_3x3_1', 382 | 'inception_5b_double_3x3_1_bn', 383 | 'inception_5b_relu_double_3x3_1', 384 | 'inception_5b_double_3x3_2', 385 | 'inception_5b_double_3x3_2_bn', 386 | 'inception_5b_relu_double_3x3_2') 387 | 388 | model.ConnectLayers('inception_5b_pool', 389 | 'inception_5b_pool_proj', 390 | 'inception_5b_pool_proj_bn', 391 | 'inception_5b_relu_pool_proj') 392 | 393 | featuresOut = ConcatenateOutputs('inception_5b_relu_1x1', 394 | 'inception_5b_relu_3x3', 395 | 'inception_5b_relu_double_3x3_2', 396 | 'inception_5b_relu_pool_proj') 397 | 398 | if featuresOut is not None: 399 | output_shape = t2k.kerasShape(featuresOut) 400 | 401 | adaptAvgPoolWidth = output_shape[2] 402 | avgPl = keras.layers.AveragePooling2D(pool_size=adaptAvgPoolWidth, 403 | padding='valid', 404 | data_format='channels_first') 405 | 406 | avgPoolOut = avgPl(featuresOut) 407 | 408 | flatten = keras.layers.Flatten(data_format='channels_first') 409 | 410 | flattenOut = flatten(avgPoolOut) 411 | 412 | model.getChild(name='last_linear').kerasInput = flattenOut 413 | 414 | model.ConnectChildrenOutputToChild('inception_5b_relu_1x1', 415 | 'inception_5b_relu_3x3', 416 | 'inception_5b_relu_double_3x3_2', 417 | 'inception_5b_relu_pool_proj', 418 | childName='last_linear', 419 | connectKeras=False) 420 | 421 | model.ConnectChildrenOutputToModel('last_linear') 422 | 423 | elif model.type == 'Sequential': 424 | model.ConnectModelInputToChildren('0') 425 | for i in range(len(model.children)-1): 426 | model.Connect2Layers(str(i), str(i+1)) 427 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 428 | 429 | else: 430 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 431 | raise NotImplementedError(err) 432 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/FBResNet.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | 13 | from ... import torch2keras as t2k 14 | 15 | 16 | def spreadSignal(model): 17 | 18 | def Output(child): 19 | if isinstance(child, str) and model.getChild(name=child) is not None: 20 | return model.getChild(name=child).kerasOutput 21 | elif child in model.children: 22 | return child.kerasOutput 23 | return None 24 | 25 | if model.type == 'BasicBlock': 26 | # CLASS 'BasicBlock' 27 | 28 | model.ConnectModelInputToChildren('conv1') 29 | model.ConnectLayers('conv1', 30 | 'bn1') 31 | 32 | if Output('bn1') is not None: 33 | # Create a ReLU layer to put between 'bn1' and 'conv2' 34 | relu1 = keras.layers.ReLU(input_shape=model['bn1'].output_shape) 35 | outRelu1 = relu1(Output('bn1')) 36 | model.getChild(name='conv2').kerasInput = outRelu1 37 | 38 | model.ConnectLayers('conv2', 39 | 'bn2') 40 | 41 | if model.getChild(name='downsample') is not None: 42 | model.ConnectModelInputToChildren('downsample') 43 | if Output('downsample') is not None and Output('bn2') is not None: 44 | add = keras.layers.Add() 45 | out = add([Output('bn2'), Output('downsample')]) 46 | else: 47 | out = None 48 | else: 49 | if Output('bn2') is not None: 50 | add = keras.layers.Add() 51 | out = add([Output('bn2'), model.kerasInput]) 52 | else: 53 | out = None 54 | 55 | if out is not None: 56 | relu2 = keras.layers.ReLU() 57 | outRelu2 = relu2(out) 58 | model.kerasOutput = outRelu2 59 | 60 | elif model.type == 'Bottleneck': 61 | # CLASS 'Bottleneck' 62 | model.ConnectModelInputToChildren('conv1') 63 | model.ConnectLayers('conv1', 64 | 'bn1') 65 | 66 | if Output('bn1') is not None: 67 | # Create a ReLU layer to put between 'bn1' and 'conv2' 68 | relu1 = keras.layers.ReLU(input_shape=model['bn1'].output_shape) 69 | outRelu1 = relu1(Output('bn1')) 70 | model.getChild(name='conv2').kerasInput = outRelu1 71 | 72 | model.ConnectLayers('conv2', 73 | 'bn2') 74 | 75 | if Output('bn2') is not None: 76 | # Create a ReLU layer to put between 'bn2' and 'conv3' 77 | relu2 = keras.layers.ReLU(input_shape=model['bn2'].output_shape) 78 | outRelu2 = relu2(Output('bn2')) 79 | model.getChild(name='conv3').kerasInput = outRelu2 80 | 81 | model.ConnectLayers('conv3', 82 | 'bn3') 83 | 84 | if model.getChild(name='downsample') is not None: 85 | model.ConnectModelInputToChildren('downsample') 86 | if Output('downsample') is not None and Output('bn3') is not None: 87 | add = keras.layers.Add() 88 | out = add([Output('bn3'), Output('downsample')]) 89 | else: 90 | out = None 91 | else: 92 | if Output('bn3') is not None: 93 | add = keras.layers.Add() 94 | out = add([Output('bn3'), model.kerasInput]) 95 | else: 96 | out = None 97 | 98 | if out is not None: 99 | relu3 = keras.layers.ReLU() 100 | outRelu3 = relu3(out) 101 | model.kerasOutput = outRelu3 102 | 103 | elif model.type == 'FBResNet': 104 | # CLASS 'FBResNet' 105 | model.ConnectModelInputToChildren('conv1') 106 | model.ConnectLayers('conv1', 107 | 'bn1', 108 | 'relu', 109 | 'maxpool', 110 | 'layer1', 111 | 'layer2', 112 | 'layer3', 113 | 'layer4') 114 | 115 | featuresOut = model.getChild(name='layer4').kerasOutput 116 | 117 | if featuresOut is not None: 118 | adaptiveAvgPoolWidth = model['layer4'].output_shape[2] 119 | 120 | KerasAvgPool = keras.layers.AveragePooling2D 121 | 122 | avgPool = KerasAvgPool(pool_size=adaptiveAvgPoolWidth, 123 | padding='valid', 124 | data_format='channels_first', 125 | input_shape=model['layer4'].output_shape) 126 | 127 | avgPoolOut = avgPool(featuresOut) 128 | 129 | shapeOUT = t2k.kerasShape(avgPoolOut) 130 | 131 | flatten = keras.layers.Flatten(data_format='channels_first', 132 | input_shape=shapeOUT) 133 | flattenOut = flatten(avgPoolOut) 134 | 135 | model.getChild(name='last_linear').kerasInput = flattenOut 136 | 137 | model.Connect2Layers('layer4', 'last_linear', connectKeras=False) 138 | 139 | model.ConnectChildrenOutputToModel('last_linear') 140 | 141 | elif model.type == 'Sequential': 142 | model.ConnectModelInputToChildren('0') 143 | for i in range(len(model.children)-1): 144 | model.Connect2Layers(str(i), str(i+1)) 145 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 146 | 147 | else: 148 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 149 | raise NotImplementedError(err) 150 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/InceptionV4.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | 13 | from ... import torch2keras as t2k 14 | 15 | 16 | def spreadSignal(model): 17 | if model.type == 'BasicConv2d': 18 | model.ConnectModelInputToChildren('conv') 19 | if model.getChild(name='relu') is None: 20 | model.ConnectLayers('conv', 'bn') 21 | model.ConnectChildrenOutputToModel('bn') 22 | else: 23 | model.ConnectLayers('conv', 'bn', 'relu') 24 | model.ConnectChildrenOutputToModel('relu') 25 | 26 | elif model.type == 'Mixed_3a': 27 | model.ConnectModelInputToChildren('conv') 28 | model.ConnectModelInputToChildren('maxpool') 29 | model.ConnectChildrenOutputToModel('maxpool', 30 | 'conv') 31 | 32 | elif model.type == 'Mixed_4a': 33 | model.ConnectModelInputToChildren('branch0') 34 | model.ConnectModelInputToChildren('branch1') 35 | model.ConnectChildrenOutputToModel('branch0', 36 | 'branch1') 37 | 38 | elif model.type == 'Mixed_5a': 39 | model.ConnectModelInputToChildren('conv') 40 | model.ConnectModelInputToChildren('maxpool') 41 | model.ConnectChildrenOutputToModel('conv', 42 | 'maxpool') 43 | 44 | elif model.type == 'Inception_A': 45 | model.ConnectModelInputToChildren('branch0') 46 | model.ConnectModelInputToChildren('branch1') 47 | model.ConnectModelInputToChildren('branch2') 48 | model.ConnectModelInputToChildren('branch3') 49 | model.ConnectChildrenOutputToModel('branch0', 50 | 'branch1', 51 | 'branch2', 52 | 'branch3') 53 | 54 | elif model.type == 'Reduction_A': 55 | model.ConnectModelInputToChildren('branch0') 56 | model.ConnectModelInputToChildren('branch1') 57 | model.ConnectModelInputToChildren('branch2') 58 | model.ConnectChildrenOutputToModel('branch0', 59 | 'branch1', 60 | 'branch2') 61 | 62 | elif model.type == 'Inception_B': 63 | model.ConnectModelInputToChildren('branch0') 64 | model.ConnectModelInputToChildren('branch1') 65 | model.ConnectModelInputToChildren('branch2') 66 | model.ConnectModelInputToChildren('branch3') 67 | model.ConnectChildrenOutputToModel('branch0', 68 | 'branch1', 69 | 'branch2', 70 | 'branch3') 71 | 72 | elif model.type == 'Reduction_B': 73 | model.ConnectModelInputToChildren('branch0') 74 | model.ConnectModelInputToChildren('branch1') 75 | model.ConnectModelInputToChildren('branch2') 76 | model.ConnectChildrenOutputToModel('branch0', 77 | 'branch1', 78 | 'branch2') 79 | 80 | elif model.type == 'Inception_C': 81 | model.ConnectModelInputToChildren('branch0') 82 | model.ConnectModelInputToChildren('branch1_0') 83 | model.ConnectModelInputToChildren('branch2_0') 84 | model.ConnectModelInputToChildren('branch3') 85 | model.ConnectLayers('branch1_0', 'branch1_1a') 86 | model.ConnectLayers('branch1_0', 'branch1_1b') 87 | model.ConnectLayers('branch2_0', 'branch2_1', 'branch2_2') 88 | model.ConnectLayers('branch2_2', 'branch2_3a') 89 | model.ConnectLayers('branch2_2', 'branch2_3b') 90 | model.ConnectChildrenOutputToModel('branch0', 91 | 'branch1_1a', 92 | 'branch1_1b', 93 | 'branch2_3a', 94 | 'branch2_3b', 95 | 'branch3') 96 | 97 | elif model.type == 'InceptionV4': 98 | model.ConnectModelInputToChildren('features') 99 | 100 | featuresOut = model.getChild(name='features').kerasOutput 101 | 102 | if featuresOut is not None: 103 | adaptiveAvgPoolWidth = model['features'].output_shape[2] 104 | 105 | KerasAvgPool = keras.layers.AveragePooling2D 106 | 107 | avgPool = KerasAvgPool(pool_size=adaptiveAvgPoolWidth, 108 | padding='valid', 109 | data_format='channels_first', 110 | input_shape=model['features'].output_shape) 111 | 112 | avgPoolOut = avgPool(featuresOut) 113 | 114 | shapeOUT = t2k.kerasShape(avgPoolOut) 115 | 116 | flatten = keras.layers.Flatten(data_format='channels_first', 117 | input_shape=shapeOUT) 118 | flattenOut = flatten(avgPoolOut) 119 | 120 | model.getChild(name='last_linear').kerasInput = flattenOut 121 | 122 | model.Connect2Layers('features', 'last_linear', connectKeras=False) 123 | 124 | model.ConnectChildrenOutputToModel('last_linear') 125 | 126 | elif model.type == 'Sequential': 127 | model.ConnectModelInputToChildren('0') 128 | for i in range(len(model.children)-1): 129 | model.Connect2Layers(str(i), str(i+1)) 130 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 131 | 132 | else: 133 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 134 | raise NotImplementedError(err) 135 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/ResNet.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | 13 | 14 | def spreadSignal(model): 15 | 16 | def Output(child): 17 | if isinstance(child, str) and model.getChild(name=child) is not None: 18 | return model.getChild(name=child).kerasOutput 19 | elif child in model.children: 20 | return child.kerasOutput 21 | return None 22 | 23 | if model.type == 'BasicBlock': 24 | model.ConnectModelInputToChildren('conv1') 25 | model.ConnectLayers('conv1', 26 | 'bn1') 27 | 28 | if Output('bn1') is not None: 29 | relu1 = keras.layers.ReLU(input_shape=model['bn1'].output_shape) 30 | outRelu1 = relu1(Output('bn1')) 31 | model.getChild(name='conv2').kerasInput = outRelu1 32 | 33 | model.ConnectLayers('conv2', 34 | 'bn2') 35 | 36 | if model.getChild(name='downsample') is not None: 37 | model.ConnectModelInputToChildren('downsample') 38 | if Output('downsample') is not None and Output('bn2') is not None: 39 | add = keras.layers.Add() 40 | out = add([Output('bn2'), Output('downsample')]) 41 | else: 42 | out = None 43 | else: 44 | if Output('bn2') is not None: 45 | add = keras.layers.Add() 46 | out = add([Output('bn2'), model.kerasInput]) 47 | else: 48 | out = None 49 | 50 | if out is not None: 51 | relu2 = keras.layers.ReLU() 52 | outRelu2 = relu2(out) 53 | model.kerasOutput = outRelu2 54 | 55 | elif model.type == 'Bottleneck': 56 | model.ConnectModelInputToChildren('conv1') 57 | model.ConnectLayers('conv1', 58 | 'bn1') 59 | 60 | if Output('bn1') is not None: 61 | relu1 = keras.layers.ReLU(input_shape=model['bn1'].output_shape) 62 | outRelu1 = relu1(Output('bn1')) 63 | model.getChild(name='conv2').kerasInput = outRelu1 64 | 65 | model.ConnectLayers('conv2', 66 | 'bn2') 67 | 68 | if Output('bn2') is not None: 69 | relu2 = keras.layers.ReLU(input_shape=model['bn2'].output_shape) 70 | outRelu2 = relu2(Output('bn2')) 71 | model.getChild(name='conv3').kerasInput = outRelu2 72 | 73 | model.ConnectLayers('conv3', 74 | 'bn3') 75 | 76 | if model.getChild(name='downsample') is not None: 77 | model.ConnectModelInputToChildren('downsample') 78 | if Output('downsample') is not None and Output('bn3') is not None: 79 | add = keras.layers.Add() 80 | out = add([Output('bn3'), Output('downsample')]) 81 | else: 82 | out = None 83 | else: 84 | if Output('bn3') is not None: 85 | add = keras.layers.Add() 86 | out = add([Output('bn3'), model.kerasInput]) 87 | else: 88 | out = None 89 | 90 | if out is not None: 91 | relu3 = keras.layers.ReLU() 92 | outRelu3 = relu3(out) 93 | model.kerasOutput = outRelu3 94 | 95 | elif model.type == 'ResNet': 96 | model.ConnectModelInputToChildren('conv1') 97 | model.ConnectLayers('conv1', 98 | 'bn1', 99 | 'relu', 100 | 'maxpool', 101 | 'layer1', 102 | 'layer2', 103 | 'layer3', 104 | 'layer4', 105 | 'avgpool') 106 | 107 | featuresOut = Output('avgpool') 108 | model.Connect2Layers('avgpool', 'last_linear', connectKeras=False) 109 | 110 | if featuresOut is not None: 111 | flatten = keras.layers.Flatten(data_format='channels_first') 112 | 113 | flattenOut = flatten(featuresOut) 114 | 115 | model.getChild(name='last_linear').kerasInput = flattenOut 116 | 117 | model.ConnectChildrenOutputToModel('last_linear') 118 | 119 | elif model.type == 'Sequential': 120 | model.ConnectModelInputToChildren('0') 121 | for i in range(len(model.children)-1): 122 | model.Connect2Layers(str(i), str(i+1)) 123 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 124 | 125 | else: 126 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 127 | raise NotImplementedError(err) 128 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/SENet.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | try: 9 | import torch 10 | except ImportError: 11 | torch = None 12 | 13 | 14 | def spreadSignal(model): 15 | 16 | def Output(child): 17 | if isinstance(child, str) and model.getChild(name=child) is not None: 18 | return model.getChild(name=child).kerasOutput 19 | elif child in model.children: 20 | return child.kerasOutput 21 | return None 22 | 23 | if model.type == 'SEModule': 24 | model.ConnectModelInputToChildren('avg_pool') 25 | model.ConnectLayers('avg_pool', 26 | 'fc1', 27 | 'relu', 28 | 'fc2', 29 | 'sigmoid') 30 | 31 | times = keras.layers.Multiply() 32 | 33 | model.kerasOutput = times([model.kerasInput, Output('sigmoid')]) 34 | 35 | elif model.type in ['Bottleneck', 36 | 'SEBottleneck', 37 | 'SEResNetBottleneck', 38 | 'SEResNeXtBottleneck']: 39 | 40 | model.ConnectModelInputToChildren('conv1') 41 | model.ConnectLayers('conv1', 42 | 'bn1') 43 | 44 | if Output('bn1') is not None: 45 | relu1 = keras.layers.ReLU(input_shape=model['bn1'].output_shape) 46 | outRelu1 = relu1(Output('bn1')) 47 | model.getChild(name='conv2').kerasInput = outRelu1 48 | 49 | model.ConnectLayers('conv2', 50 | 'bn2') 51 | 52 | if Output('bn2') is not None: 53 | relu2 = keras.layers.ReLU(input_shape=model['bn2'].output_shape) 54 | outRelu2 = relu2(Output('bn2')) 55 | model.getChild(name='conv3').kerasInput = outRelu2 56 | 57 | model.ConnectLayers('conv3', 58 | 'bn3', 59 | 'se_module') 60 | 61 | if model.getChild(name='downsample') is not None: 62 | model.ConnectModelInputToChildren('downsample') 63 | if Output('downsample') is not None and \ 64 | Output('se_module') is not None: 65 | 66 | add = keras.layers.Add() 67 | out = add([Output('se_module'), Output('downsample')]) 68 | else: 69 | out = None 70 | else: 71 | if Output('se_module') is not None: 72 | add = keras.layers.Add() 73 | out = add([Output('se_module'), model.kerasInput]) 74 | else: 75 | out = None 76 | 77 | if out is not None: 78 | relu3 = keras.layers.ReLU() 79 | outRelu3 = relu3(out) 80 | model.kerasOutput = outRelu3 81 | 82 | elif model.type == 'SENet': 83 | model.ConnectModelInputToChildren('layer0') 84 | model.ConnectLayers('layer0', 85 | 'layer1', 86 | 'layer2', 87 | 'layer3', 88 | 'layer4', 89 | 'avg_pool') 90 | 91 | if model.getChild(name='dropout') is not None: 92 | model.ConnectLayers('avg_pool', 'dropout') 93 | featuresOut = Output('dropout') 94 | model.Connect2Layers('dropout', 'last_linear', connectKeras=False) 95 | else: 96 | featuresOut = Output('avg_pool') 97 | model.Connect2Layers('avg_pool', 'last_linear', connectKeras=False) 98 | 99 | if featuresOut is not None: 100 | flatten = keras.layers.Flatten(data_format='channels_first') 101 | 102 | flattenOut = flatten(featuresOut) 103 | 104 | model.getChild(name='last_linear').kerasInput = flattenOut 105 | 106 | model.ConnectChildrenOutputToModel('last_linear') 107 | 108 | elif model.type == 'Sequential': 109 | model.ConnectModelInputToChildren('0') 110 | for i in range(len(model.children)-1): 111 | model.Connect2Layers(str(i), str(i+1)) 112 | model.ConnectChildrenOutputToModel(str(len(model.children)-1)) 113 | 114 | else: 115 | err = "Warning: layer or model '{}' not recognized!".format(model.type) 116 | raise NotImplementedError(err) 117 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/cadene/__init__.py: -------------------------------------------------------------------------------- 1 | from . import InceptionV4 2 | from . import BNInception 3 | from . import SENet 4 | from . import ResNet 5 | from . import FBResNet 6 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/container.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def Sequential(model, file=False): 11 | """ 12 | Converts a torch.nn.Sequential layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer Sequential to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | 20 | Raises: 21 | -ImportError: 22 | If Keras import failed 23 | 24 | Returns: 25 | Keras equivalent. 26 | If file is True, returns as a str to put in a python file 27 | Else, return the keras layer 28 | 29 | If layers don't have equivaents yet, returns None 30 | """ 31 | if keras is None: 32 | raise ImportError("Could not import keras. Conversion failed !") 33 | 34 | name = model.completeName() 35 | 36 | if not file: 37 | kerasLayer = keras.Sequential(name=name) 38 | 39 | lNumber = -1 40 | # First, we need to sort layers 41 | subLayersDict = dict() 42 | for child in model.children: 43 | if 'keras' not in child.equivalent.keys(): 44 | return None 45 | try: 46 | # If layers aren't named, 47 | # PyTorch uses default named '0', '1', '2',... 48 | lNumber = int(child.name) 49 | except ValueError: 50 | lNumber += 1 51 | subLayersDict[lNumber] = child.equivalent['keras'] 52 | 53 | subLayersList = [None]*subLayersDict.__len__() 54 | 55 | for number, subLayer in subLayersDict.items(): 56 | subLayersList[number] = subLayer 57 | 58 | if None in subLayersList: 59 | return None 60 | 61 | for subLayer in subLayersList: 62 | kerasLayer.add(subLayer) 63 | 64 | return kerasLayer 65 | else: 66 | lNumber = -1 67 | # First, we need to sort layers 68 | subLayersDict = dict() 69 | for child in model.children: 70 | if 'keras' not in child.equivalentTxt.keys(): 71 | return None 72 | try: 73 | # If layers aren't named, 74 | # PyTorch uses default named '0', '1', '2',... 75 | lNumber = int(child.name) 76 | except ValueError: 77 | lNumber += 1 78 | subLayersDict[lNumber] = child.equivalentTxt['keras'] 79 | 80 | subLayersList = [None]*subLayersDict.__len__() 81 | 82 | for number, subLayerTxt in subLayersDict.items(): 83 | subLayersList[number] = subLayerTxt 84 | 85 | if None in subLayersList: 86 | return None 87 | 88 | outstr = 'keras.Sequential([' 89 | 90 | for subLayerTxt in subLayersList: 91 | outstr = outstr + '\n ' + subLayerTxt + ',' 92 | 93 | outstr = outstr[:-1] + '\n], name=' + name + ')' 94 | 95 | return outstr 96 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/conv.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def Conv2d(model, file=False, weights=True): 11 | """ 12 | Converts a torch.nn.Conv2d layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer Conv2d to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | -weights (bool): 20 | Also convert weights 21 | 22 | Raises: 23 | -ImportError: 24 | If Keras import failed 25 | -RuntimeError: 26 | If shapes don't match 27 | -NotImplementedError: 28 | if groups != 1 or padding isn't zero-padding 29 | 30 | Returns: 31 | Keras equivalent. 32 | If file is True, returns as a str to put in a python file 33 | Else, return the keras layer 34 | """ 35 | if keras is None: 36 | raise ImportError("Could not import keras. Conversion failed !") 37 | 38 | pytorchLayer = model.equivalent['torch'] 39 | name = model.completeName() 40 | 41 | # Getting hyper parameters 42 | in_channels = pytorchLayer.in_channels 43 | out_channels = pytorchLayer.out_channels 44 | kernel_size = pytorchLayer.kernel_size 45 | stride = pytorchLayer.stride 46 | padding = pytorchLayer.padding 47 | dilation = pytorchLayer.dilation 48 | groups = pytorchLayer.groups 49 | padding_mode = pytorchLayer.padding_mode 50 | bias = 'bias' in dict(pytorchLayer.named_parameters()).keys() 51 | 52 | # A little verification 53 | if in_channels != model.input_shape[0]: 54 | raise RuntimeError("Error when converting Conv2d, shapes don't match") 55 | 56 | if groups != 1: 57 | raise NotImplementedError("Error when converting Conv2d because \ 58 | groups != 1 is not supported yet") 59 | 60 | if padding_mode != "zeros": 61 | raise NotImplementedError("Error when converting Conv2d because \ 62 | padding_mode != 'zeros' is not supported yet") 63 | 64 | # Formatting them as tuple (height, width) 65 | if isinstance(kernel_size, int): 66 | kernel_size = (kernel_size, kernel_size) 67 | if isinstance(stride, int): 68 | stride = (stride, stride) 69 | if isinstance(dilation, int): 70 | dilation = (dilation, dilation) 71 | if isinstance(padding, int): 72 | padding = (padding, padding) 73 | 74 | # Formatting padding the Keras way ((top, bottom), (left, right)) 75 | padding = ((padding[0], padding[0]), 76 | (padding[1], padding[1])) 77 | 78 | argumentsConv = {'filters': out_channels, 79 | 'kernel_size': kernel_size, 80 | 'strides': stride, 81 | 'padding': 'valid', 82 | 'data_format': 'channels_first', 83 | 'dilation_rate': dilation, 84 | 'use_bias': bias, 85 | 'name': name} 86 | 87 | argumentsPadding = {'padding': padding, 88 | 'data_format': 'channels_first', 89 | 'input_shape': model.input_shape} 90 | 91 | if padding == ((0, 0), (0, 0)): 92 | argumentsConv['input_shape'] = model.input_shape 93 | 94 | if weights: 95 | parametersConv = dict() 96 | for key, val in dict(pytorchLayer.state_dict()).items(): 97 | # Convert every parameter Tensor to a numpy array 98 | parametersConv[key] = val.detach().numpy() 99 | if key == 'weight': 100 | # Weights array also need to be transposed 101 | parametersConv[key] = parametersConv[key].transpose(2, 3, 1, 0) 102 | 103 | # List of [weight, bias] 104 | paramListConv = [parametersConv['weight']] 105 | if 'bias' in parametersConv.keys(): 106 | paramListConv.append(parametersConv['bias']) 107 | 108 | if not file: 109 | convLayer = keras.layers.Conv2D(**argumentsConv) 110 | 111 | paddingLayer = keras.layers.ZeroPadding2D(**argumentsPadding) 112 | 113 | kerasLayer = keras.Sequential() 114 | if not padding == ((0, 0), (0, 0)): 115 | kerasLayer.add(paddingLayer) 116 | kerasLayer.add(convLayer) 117 | 118 | if weights: 119 | kerasLayer.layers[-1].set_weights(paramListConv) 120 | 121 | return kerasLayer 122 | else: 123 | outstrConv = 'keras.layers.Conv2D(' 124 | for arg, val in argumentsConv.items(): 125 | outstrConv = outstrConv + arg + '=' + str(val) + ', ' 126 | outstrConv = outstrConv[:-2] + ')' 127 | 128 | outstrPadding = 'keras.layers.ZeroPadding2D(' 129 | for arg, val in argumentsPadding.items(): 130 | outstrPadding = outstrPadding + arg + '=' + str(val) + ', ' 131 | outstrPadding = outstrPadding[:-2] + ')' 132 | 133 | outstr = 'keras.Sequential([\n ' + outstrPadding + ',\n ' 134 | outstr = outstr + outstrConv + '\n])' 135 | 136 | return outstr 137 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/dropout.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def Dropout(model, file=False): 11 | """ 12 | Converts a torch.nn.Dropout layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer Dropout to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | 20 | Raises: 21 | -ImportError: 22 | If Keras import failed 23 | 24 | Returns: 25 | Keras equivalent. 26 | If file is True, returns as a str to put in a python file 27 | Else, return the keras layer 28 | """ 29 | if keras is None: 30 | raise ImportError("Could not import keras. Conversion failed !") 31 | 32 | pytorchLayer = model.equivalent['torch'] 33 | name = model.completeName() 34 | 35 | argumentsDropout = {'rate': pytorchLayer.p, 36 | 'input_shape': model.input_shape, 37 | 'name': name} 38 | 39 | argumentsScale = {'function': lambda x: x*(1/(1-pytorchLayer.p))} 40 | 41 | argumentsSequential = {} 42 | 43 | if not file: 44 | dropoutLayer = keras.layers.Dropout(**argumentsDropout) 45 | scaleLayer = keras.layers.Lambda(**argumentsScale) 46 | 47 | kerasLayer = keras.Sequential(**argumentsSequential) 48 | kerasLayer.add(dropoutLayer) 49 | kerasLayer.add(scaleLayer) 50 | return kerasLayer 51 | else: 52 | 53 | outstrDropout = 'keras.layers.Dropout(' 54 | for arg, val in argumentsDropout.items(): 55 | outstrDropout = outstrDropout + arg + '=' + str(val) + ', ' 56 | outstrDropout = outstrDropout[:-2] + ')' 57 | 58 | outstrScale = 'keras.layers.Lambda(fonction= lambda x: x*(1/(1-' 59 | outstrScale = outstrScale + str(pytorchLayer.p) + ')))' 60 | 61 | outstr = 'keras.Sequential([\n ' + outstrDropout + ',\n ' 62 | outstr = outstr + outstrScale + '\n], ' 63 | 64 | for arg, val in argumentsSequential.items(): 65 | outstr = outstr + arg + '=' + str(val) + ', ' 66 | outstr = outstr[:-2] + ')' 67 | 68 | return outstr 69 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/linear.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def Linear(model, file=False, weights=True): 11 | """ 12 | Converts a torch.nn.Linear layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer Linear to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | -weights (bool): 20 | Also convert weights 21 | 22 | Raises: 23 | -ImportError: 24 | If Keras import failed 25 | 26 | Returns: 27 | Keras equivalent. 28 | If file is True, returns as a str to put in a python file 29 | Else, return the keras layer 30 | """ 31 | if keras is None: 32 | raise ImportError("Could not import keras. Conversion failed !") 33 | 34 | pytorchLayer = model.equivalent['torch'] 35 | name = model.completeName() 36 | 37 | bias = 'bias' in dict(pytorchLayer.named_parameters()).keys() 38 | 39 | argumentsDense = {'units': pytorchLayer.out_features, 40 | 'use_bias': bias, 41 | 'input_shape': model.input_shape, 42 | 'name': name} 43 | 44 | if weights: 45 | parametersDense = dict() 46 | for key, val in dict(pytorchLayer.state_dict()).items(): 47 | # Convert every parameter Tensor to a numpy array 48 | parametersDense[key] = val.detach().numpy() 49 | if key == 'weight': 50 | # Weights array also need to be transposed 51 | parametersDense[key] = parametersDense[key].transpose(1, 0) 52 | 53 | # List of [weight, bias] 54 | paramList = [parametersDense['weight']] 55 | if 'bias' in parametersDense.keys(): 56 | paramList.append(parametersDense['bias']) 57 | 58 | if not file: 59 | DenseLayer = keras.layers.Dense(**argumentsDense) 60 | 61 | kerasLayer = keras.Sequential() 62 | kerasLayer.add(DenseLayer) 63 | 64 | if weights: 65 | kerasLayer.layers[0].set_weights(paramList) 66 | 67 | return kerasLayer 68 | else: 69 | outstr = 'keras.layers.Dense(' 70 | for arg, val in argumentsDense.items(): 71 | outstr = outstr + arg + '=' + str(val) + ', ' 72 | outstr = outstr[:-2] + ')' 73 | return outstr 74 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/padding.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | 10 | def ZeroPad2d(model, file=False): 11 | """ 12 | Converts a torch.nn.ZeroPad2d layer 13 | 14 | Arguments: 15 | -model: 16 | A LayerRepresentation object of the layer ZeroPad2d to convert 17 | -file (bool): 18 | If we want to write the equivalent in a python file 19 | 20 | Raises: 21 | -ImportError: 22 | If Keras import failed 23 | 24 | Returns: 25 | Keras equivalent. 26 | If file is True, returns as a str to put in a python file 27 | Else, return the keras layer 28 | """ 29 | if keras is None: 30 | raise ImportError("Could not import keras. Conversion failed !") 31 | 32 | pytorchLayer = model.equivalent['torch'] 33 | name = name = model.completeName() 34 | 35 | padding = pytorchLayer.padding 36 | 37 | if isinstance(padding, int): 38 | left = padding 39 | right = padding 40 | bottom = padding 41 | top = padding 42 | elif padding.__len__() == 2: 43 | left = padding[0] 44 | right = padding[1] 45 | top = 0 46 | bottom = 0 47 | elif padding.__len__() == 4: 48 | left = padding[0] 49 | right = padding[1] 50 | top = padding[2] 51 | bottom = padding[3] 52 | 53 | arguments = {'padding': ((top, bottom), (left, right)), 54 | 'data_format': 'channels_first', 55 | 'input_shape': model.input_shape, 56 | 'name': name} 57 | 58 | if not file: 59 | kerasLayer = keras.layers.ZeroPadding2D(**arguments) 60 | return kerasLayer 61 | else: 62 | outstr = 'keras.layers.ZeroPadding2D(' 63 | for arg, val in arguments.items(): 64 | outstr = outstr + arg + '=' + str(val) + ', ' 65 | outstr = outstr[:-2] + ')' 66 | return outstr 67 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/t2k_equivalents/pooling.py: -------------------------------------------------------------------------------- 1 | try: 2 | import tensorflow.keras as keras 3 | except ImportError: 4 | try: 5 | import keras 6 | except ImportError: 7 | keras = None 8 | 9 | import math 10 | 11 | 12 | def MaxPool2d(model, file=False): 13 | """ 14 | Converts a torch.nn.ReLU layer 15 | 16 | Arguments: 17 | -model: 18 | A LayerRepresentation object of the layer ReLU to convert 19 | -file (bool): 20 | If we want to write the equivalent in a python file 21 | 22 | Raises: 23 | -ImportError: 24 | If Keras import failed 25 | -NotImplementedError: 26 | If dilation factor isn't 1 27 | 28 | Returns: 29 | Keras equivalent. 30 | If file is True, returns as a str to put in a python file 31 | Else, return the keras layer 32 | """ 33 | if keras is None: 34 | raise ImportError("Could not import keras. Conversion failed !") 35 | 36 | pytorchLayer = model.equivalent['torch'] 37 | name = model.completeName() 38 | 39 | # Getting hyper parameters 40 | kernel_size = pytorchLayer.kernel_size 41 | stride = pytorchLayer.stride 42 | dilation = pytorchLayer.dilation 43 | padding = pytorchLayer.padding 44 | ceil_mode = pytorchLayer.ceil_mode 45 | 46 | # Formatting them as tuple (height, width) 47 | if isinstance(kernel_size, int): 48 | kernel_size = (kernel_size, kernel_size) 49 | if isinstance(stride, int): 50 | stride = (stride, stride) 51 | if isinstance(dilation, int): 52 | dilation = (dilation, dilation) 53 | if isinstance(padding, int): 54 | padding = (padding, padding) 55 | 56 | # Formatting padding the Keras way ((top, bottom), (left, right)) 57 | padding = ((padding[0], padding[0]), 58 | (padding[1], padding[1])) 59 | 60 | if not dilation == (1, 1): 61 | raise NotImplementedError("Error when converting MaxPool2d because \ 62 | dilation != 1 is not supported yet") 63 | 64 | # Take care of ceil_mode when computing output shape 65 | # Those formulas come from PyTorch documentation 66 | # https://pytorch.org/docs/stable/nn.html#maxpool2d 67 | Hin = model.input_shape[-2] 68 | Hout = (Hin+2*padding[0][0]-dilation[0]*(kernel_size[0]-1)-1/stride[0])+1 69 | 70 | Win = model.input_shape[-1] 71 | Wout = (Win+2*padding[1][0]-dilation[1]*(kernel_size[1]-1)-1/stride[1])+1 72 | 73 | # If ceil_mode is True, we may add a padding 74 | if ceil_mode and math.ceil(Hout) != math.floor(Hout): 75 | padding = ((padding[0][0], padding[0][1]+1), 76 | (padding[1][0], padding[1][1])) 77 | 78 | if ceil_mode and math.ceil(Wout) != math.floor(Wout): 79 | padding = ((padding[0][0], padding[0][1]), 80 | (padding[1][0], padding[1][1]+1)) 81 | 82 | argumentsMaxpool = {'pool_size': kernel_size, 83 | 'strides': stride, 84 | 'padding': 'valid', 85 | 'data_format': 'channels_first', 86 | 'name': name} 87 | 88 | if padding == ((0, 0), (0, 0)): 89 | argumentsMaxpool['input_shape'] = model.input_shape 90 | 91 | argumentsPadding = {'padding': padding, 92 | 'input_shape': model.input_shape, 93 | 'data_format': 'channels_first'} 94 | 95 | argumentsSequential = {} 96 | 97 | if not file: 98 | maxpoolLayer = keras.layers.MaxPooling2D(**argumentsMaxpool) 99 | if padding == ((0, 0), (0, 0)): 100 | # No need to use a padding layer 101 | return maxpoolLayer 102 | 103 | paddingLayer = keras.layers.ZeroPadding2D(**argumentsPadding) 104 | kerasLayer = keras.Sequential(**argumentsSequential) 105 | kerasLayer.add(paddingLayer) 106 | kerasLayer.add(maxpoolLayer) 107 | 108 | return kerasLayer 109 | else: 110 | outstrMaxpool = 'keras.layers.MaxPooling2D(' 111 | for arg, val in argumentsMaxpool.items(): 112 | outstrMaxpool = outstrMaxpool + arg + '=' + str(val) + ', ' 113 | outstrMaxpool = outstrMaxpool[:-2] + ')' 114 | 115 | outstrPadding = 'keras.layers.ZeroPadding2D(' 116 | for arg, val in argumentsPadding.items(): 117 | outstrPadding = outstrPadding + arg + '=' + str(val) + ', ' 118 | outstrPadding = outstrPadding[:-2] + ')' 119 | 120 | outstr = 'keras.Sequential([\n ' + outstrPadding + ',\n ' 121 | outstr = outstr + outstrMaxpool + '\n], ' 122 | 123 | for arg, val in argumentsSequential.items(): 124 | outstr = outstr + arg + '=' + str(val) + ', ' 125 | outstr = outstr[:-2] + ')' 126 | 127 | return outstr 128 | 129 | 130 | def AvgPool2d(model, file=False): 131 | """ 132 | Converts a torch.nn.ReLU layer 133 | 134 | Arguments: 135 | -model: 136 | A LayerRepresentation object of the layer ReLU to convert 137 | -file (bool): 138 | If we want to write the equivalent in a python file 139 | 140 | Raises: 141 | -ImportError: 142 | If Keras import failed 143 | -NotImplementedError: 144 | if count_include_pad is False 145 | 146 | Returns: 147 | Keras equivalent. 148 | If file is True, returns as a str to put in a python file 149 | Else, return the keras layer 150 | """ 151 | if keras is None: 152 | raise ImportError("Could not import keras. Conversion failed !") 153 | 154 | pytorchLayer = model.equivalent['torch'] 155 | name = model.completeName() 156 | 157 | # Getting hyper parameters 158 | kernel_size = pytorchLayer.kernel_size 159 | stride = pytorchLayer.stride 160 | padding = pytorchLayer.padding 161 | ceil_mode = pytorchLayer.ceil_mode 162 | count_include_pad = pytorchLayer.count_include_pad 163 | 164 | if not count_include_pad: 165 | raise NotImplementedError("Error when converting AvgPool2d because \ 166 | count_include_pad == False is not supported \ 167 | yet") 168 | 169 | # Formatting them as tuple (height, width) 170 | if isinstance(kernel_size, int): 171 | kernel_size = (kernel_size, kernel_size) 172 | if isinstance(stride, int): 173 | stride = (stride, stride) 174 | if isinstance(padding, int): 175 | padding = (padding, padding) 176 | 177 | # Formatting padding the Keras way ((top, bottom), (left, right)) 178 | padding = ((padding[0], padding[0]), 179 | (padding[1], padding[1])) 180 | 181 | # Take care of ceil_mode when computing output shape 182 | # Those formulas come from PyTorch documentation 183 | # https://pytorch.org/docs/stable/nn.html#avgpool2d 184 | Hin = model.input_shape[-2] 185 | Hout = (Hin + 2*padding[0][0]-kernel_size[0] / stride[0]) + 1 186 | 187 | Win = model.input_shape[-1] 188 | Wout = (Win + 2*padding[1][0]-kernel_size[1] / stride[1]) + 1 189 | 190 | # If ceil_mode is True, we may add a padding 191 | if ceil_mode and math.ceil(Hout) != math.floor(Hout): 192 | padding = ((padding[0][0], padding[0][1]+1), 193 | (padding[1][0], padding[1][1])) 194 | 195 | if ceil_mode and math.ceil(Wout) != math.floor(Wout): 196 | padding = ((padding[0][0], padding[0][1]), 197 | (padding[1][0], padding[1][1]+1)) 198 | 199 | argumentsAvgpool = {'pool_size': kernel_size, 200 | 'strides': stride, 201 | 'padding': 'valid', 202 | 'data_format': 'channels_first', 203 | 'name': name} 204 | 205 | if padding == ((0, 0), (0, 0)): 206 | argumentsAvgpool['input_shape'] = model.input_shape 207 | 208 | argumentsPadding = {'padding': padding, 209 | 'input_shape': model.input_shape, 210 | 'data_format': 'channels_first'} 211 | 212 | argumentsSequential = {} 213 | 214 | if not file: 215 | avgPoolLayer = keras.layers.AveragePooling2D(**argumentsAvgpool) 216 | if padding == ((0, 0), (0, 0)): 217 | # No need to use a padding layer 218 | return avgPoolLayer 219 | 220 | paddingLayer = keras.layers.ZeroPadding2D(**argumentsPadding) 221 | 222 | kerasLayer = keras.Sequential(**argumentsSequential) 223 | kerasLayer.add(paddingLayer) 224 | kerasLayer.add(avgPoolLayer) 225 | 226 | return kerasLayer 227 | else: 228 | outstrAvgpool = 'keras.layers.AveragePooling2D(' 229 | for arg, val in argumentsAvgpool.items(): 230 | outstrAvgpool = outstrAvgpool + arg + '=' + str(val) + ', ' 231 | outstrAvgpool = outstrAvgpool[:-2] + ')' 232 | 233 | outstrPadding = 'keras.layers.ZeroPadding2D(' 234 | for arg, val in argumentsPadding.items(): 235 | outstrPadding = outstrPadding + arg + '=' + str(val) + ', ' 236 | outstrPadding = outstrPadding[:-2] + ')' 237 | 238 | outstr = 'keras.Sequential([\n ' + outstrPadding + ',\n ' 239 | outstr = outstr + outstrAvgpool + '\n], ' 240 | 241 | for arg, val in argumentsSequential.items(): 242 | outstr = outstr + arg + '=' + str(val) + ', ' 243 | outstr = outstr[:-2] + ')' 244 | 245 | return outstr 246 | 247 | 248 | def AdaptiveAvgPool2d(model, file=False): 249 | """ 250 | Converts a torch.nn.ReLU layer 251 | 252 | Arguments: 253 | -model: 254 | A LayerRepresentation object of the layer ReLU to convert 255 | -file (bool): 256 | If we want to write the equivalent in a python file 257 | 258 | Raises: 259 | -ImportError: 260 | If Keras import failed 261 | -NotImplementedError: 262 | if output size isn't 1 263 | 264 | Returns: 265 | Keras equivalent. 266 | If file is True, returns as a str to put in a python file 267 | Else, return the keras layer 268 | """ 269 | if keras is None: 270 | raise ImportError("Could not import keras. Conversion failed !") 271 | 272 | pytorchLayer = model.equivalent['torch'] 273 | name = model.completeName() 274 | 275 | # Getting hyper parameters 276 | output_size = pytorchLayer.output_size 277 | 278 | if output_size == (1, 1): 279 | output_size = 1 280 | 281 | if output_size != 1: 282 | raise NotImplementedError("Error when converting AdaptiveAvgPool2d \ 283 | because output_size != 1 is not supported yet") 284 | 285 | argumentsAvgpool = {'data_format': 'channels_first', 286 | 'input_shape': model.input_shape, 287 | 'name': name} 288 | 289 | # Need a reshape so output shape is (channels, 1, 1) instead of (channels,) 290 | argumentsReshape = {'target_shape': model.output_shape} 291 | 292 | argumentsSequential = {} 293 | 294 | if not file: 295 | avgPoolLayer = keras.layers.GlobalAveragePooling2D(**argumentsAvgpool) 296 | reshapeLayer = keras.layers.Reshape(**argumentsReshape) 297 | 298 | kerasLayer = keras.Sequential(**argumentsSequential) 299 | kerasLayer.add(avgPoolLayer) 300 | kerasLayer.add(reshapeLayer) 301 | 302 | return kerasLayer 303 | else: 304 | raise NotImplementedError 305 | outstrAvgpool = 'keras.layers.GlobalAveragePooling2D(' 306 | for arg, val in argumentsAvgpool.items(): 307 | outstrAvgpool = outstrAvgpool + arg + '=' + str(val) + ', ' 308 | outstrAvgpool = outstrAvgpool[:-2] + ')' 309 | 310 | outstrReshape = 'keras.layers.Lambda(' 311 | for arg, val in argumentsReshape.items(): 312 | outstrReshape = outstrReshape + arg + '=' + str(val) + ', ' 313 | outstrReshape = outstrReshape[:-2] + ')' 314 | 315 | outstr = 'keras.Sequential([\n ' + outstrAvgpool + ',\n ' 316 | outstr = outstr + outstrReshape + '\n], ' 317 | 318 | for arg, val in argumentsSequential.items(): 319 | outstr = outstr + arg + '=' + str(val) + ', ' 320 | outstr = outstr[:-2] + ')' 321 | 322 | return outstr 323 | -------------------------------------------------------------------------------- /pytorch_keras_converter/utility/torch2keras.py: -------------------------------------------------------------------------------- 1 | from . import converting_layers as c_l 2 | from tqdm import tqdm 3 | from .LayerRepresentation import normalizeShape 4 | 5 | try: 6 | import tensorflow.keras as keras 7 | except ImportError: 8 | try: 9 | import keras 10 | except ImportError: 11 | keras = None 12 | try: 13 | import torch 14 | except ImportError: 15 | torch = None 16 | 17 | 18 | lastProgress = 0 19 | 20 | 21 | def kerasShape(tensor): 22 | """ 23 | Determine the shape of a tensor or a keras layer 24 | 25 | Useful to check that PyTorch to Keras conversion doesn't fail 26 | """ 27 | 28 | if tensor is None: 29 | return None 30 | else: 31 | if '_keras_shape' in dir(tensor): 32 | if tensor._keras_shape is not None: 33 | shape = tensor._keras_shape 34 | 35 | # In LayerRepresentation, we leave out batch size : 36 | shape = list(shape) 37 | del shape[0] 38 | shape = tuple(shape) 39 | 40 | elif 'shape' in dir(tensor): 41 | shape = tensor.shape.as_list() 42 | del shape[0] 43 | shape = tuple(shape) 44 | 45 | elif '_shape_val' in dir(tensor): 46 | if tensor._shape_val is not None: 47 | kerasShape = tensor._shape_val 48 | 49 | # In LayerRepresentation, we leave out batch size, so we 50 | # start at 1 (not 0) : 51 | values = range(1, len(kerasShape._dims)) 52 | shape = [kerasShape._dims[k]._value for k in values] 53 | 54 | shape = tuple(shape) 55 | else: 56 | shape = None 57 | 58 | shape = normalizeShape(shape) 59 | return shape 60 | 61 | 62 | def convert_torch2keras_file(model, input_size=None): 63 | createSimpleEquivalences(model, file=True) 64 | return model 65 | 66 | 67 | def convert_torch2keras(model, input_size, weights=True, quiet=True): 68 | """ 69 | Converts a pytroch model to keras 70 | 71 | Arguments: 72 | -model: 73 | the model to convert (LayerRepresentation) 74 | -input_size: 75 | int, list, or tuple. 76 | -weights (bool): 77 | If weights should be converted too (may take a lot of time !) 78 | -quiet (bool): 79 | If a progress bar should appear 80 | 81 | Returns: 82 | the model (LayerRepresentation) 83 | """ 84 | 85 | global lastProgress 86 | lastProgress = 0 87 | 88 | # Step 1 : Compute all input and output shapes and place it on our model 89 | # Convert input_size into tulpe 90 | input_size = normalizeShape(input_size) 91 | 92 | if not quiet: 93 | progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer') 94 | print("\nAnalysing model...") 95 | else: 96 | progressBar = None 97 | 98 | findAllInputShapes(model, input_size) 99 | 100 | # Step 2: convert every simple layer (i.e native layers, in most cases) 101 | if not quiet: 102 | print("\nComputing equivalents layer by layer...") 103 | 104 | createSimpleEquivalences(model, 105 | weights=weights, 106 | quiet=quiet, 107 | progressBar=progressBar) 108 | 109 | # Let's check if our model is fully converted: 110 | if 'keras' in model.equivalent.keys(): 111 | return model 112 | 113 | # Step 3: keras Fonctionnal API 114 | if not quiet: 115 | print("\nConnecting layers together with Keras Functionnal API...") 116 | 117 | while 'keras' not in model.equivalent.keys(): 118 | advancedKerasEquivalence(model, 119 | quiet=quiet, 120 | progressBar=progressBar) 121 | 122 | # Done! 123 | 124 | if not quiet: 125 | progressBar.close() 126 | print("\nDone !") 127 | 128 | return model 129 | 130 | 131 | def createSimpleEquivalences(model, 132 | file=False, 133 | weights=True, 134 | quiet=True, 135 | progressBar=None): 136 | """ 137 | Computes equivalent of most simple layers (native pyTorch layers, 138 | nn.Sequential containing only native layers...) 139 | 140 | Arguments: 141 | -model: 142 | A LayerRepresentation object to use 143 | -file (bool): 144 | If we want to write the equivalent in a python file 145 | -weights (bool): 146 | Also convert weights 147 | -quiet: 148 | If a progress bar should appear 149 | -progressBar: 150 | If a progress bar was already created, put it were 151 | """ 152 | # Create a progress bar if necessary 153 | if not quiet and progressBar is None: 154 | progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer') 155 | 156 | if 'torch' in model.equivalent.keys(): # torch equivalent available 157 | # CONVERSION: torch -> keras 158 | if not model.children: # 1st case: no children 159 | if model.isTorchBuiltIn(): 160 | kerasEq = None 161 | kerasEq = c_l.torch2kerasEquivalent(model, weights=weights) 162 | 163 | kerasEqTxt = None 164 | if file: 165 | kerasEqTxt = c_l.torch2kerasEquivalent(model, 166 | file=True, 167 | weights=weights) 168 | 169 | if kerasEq is not None: 170 | # keras equivalent computation succeeded! 171 | model.equivalent['keras'] = kerasEq 172 | 173 | if kerasEqTxt is not None: 174 | # keras equivalent computation succeeded! 175 | model.equivalentTxt['keras'] = kerasEqTxt 176 | 177 | if not quiet: 178 | updateProgress(model, progressBar) 179 | 180 | else: # 2nd case: there are children 181 | if not model.childrenEquivalentsCompleted('keras', 182 | file=file): 183 | # Trere are children, 184 | # but all equivalents aren't computed yet 185 | for child in model.children: 186 | createSimpleEquivalences(child, 187 | file=file, 188 | weights=weights, 189 | quiet=quiet, 190 | progressBar=progressBar) 191 | 192 | # Here, we have computed all simple layers 193 | # If possible, we can still find an equivalent 194 | # if model is a container (sequential for example) 195 | success = model.childrenEquivalentsCompleted('keras') 196 | if model.isTorchBuiltIn() and success: 197 | kerasEq = c_l.torch2kerasEquivalent(model, weights=weights) 198 | if kerasEq is not None: 199 | model.equivalent['keras'] = kerasEq 200 | if file: 201 | successTxt = model.childrenEquivalentsCompleted('keras', 202 | file=True) 203 | if model.isTorchBuiltIn() and successTxt: 204 | kerasEqTxt = c_l.torch2kerasEquivalent(model, 205 | file=True, 206 | weights=weights) 207 | model.equivalentTxt['keras'] = kerasEqTxt 208 | 209 | if not quiet: 210 | updateProgress(model, progressBar) 211 | 212 | 213 | def findAllInputShapes(model, pyTorch_input_size): 214 | """ 215 | Finds input and output shapes of every layer in a model only knowing main 216 | input shape 217 | 218 | Arguments: 219 | -model: 220 | A LayerRepresentation object of the model to analsye 221 | -pyTorch_input_size: 222 | input shape 223 | 224 | Raises: 225 | -RuntimeError: 226 | If provided input shape isn't compatible with the model 227 | """ 228 | if torch is None: 229 | raise ImportError("Could not import torch. Conversion failed !") 230 | 231 | pyTorchModel = model.equivalent['torch'] 232 | 233 | def register_hook(module): 234 | 235 | def hook(module, Input, Output): 236 | 237 | identifier = id(module) 238 | 239 | # Input shape 240 | inputShape = list(Input[0].size()) 241 | del inputShape[0] 242 | 243 | # Output shape 244 | if isinstance(Output, (list, tuple)): 245 | outputShape = [ 246 | list(o.size())[1:] for o in Output 247 | ] 248 | else: 249 | outputShape = list(Output.size()) 250 | del outputShape[0] 251 | 252 | inputShape = normalizeShape(inputShape) 253 | outputShape = normalizeShape(outputShape) 254 | 255 | # Saving shapes 256 | selectedModel = model.getChildId(identifier, framework='torch') 257 | selectedModel.input_shape = inputShape 258 | selectedModel.output_shape = outputShape 259 | 260 | module.register_forward_hook(hook) 261 | 262 | # multiple inputs to the network 263 | if isinstance(pyTorch_input_size, tuple): 264 | pyTorch_input_size = [pyTorch_input_size] 265 | 266 | # batch_size of 2 for batchnorm 267 | x = [torch.rand(1, *in_size) for in_size in pyTorch_input_size] 268 | 269 | # register hook 270 | pyTorchModel.apply(register_hook) 271 | 272 | # make a forward pass 273 | try: 274 | pyTorchModel(*x) 275 | except RuntimeError as err: 276 | raise RuntimeError('Failed to analyse pyTorch model !\n{}'.format(err)) 277 | 278 | 279 | def advancedKerasEquivalence(model, 280 | quiet=True, 281 | progressBar=None): 282 | """ 283 | Uses keras Functionnal API to find all remaining equivalents 284 | 285 | Arguments: 286 | -model: 287 | A LayerRepresentation object to complete, or a list of 288 | LayerRepresentation objects to complete 289 | -quiet: 290 | If a progress bar should appear 291 | -progressBar: 292 | If a progress bar was already created, put it were 293 | """ 294 | 295 | # Create a progress bar if necessary 296 | if not quiet and progressBar is None: 297 | progressBar = tqdm(total=model.numberOfChildren() + 1, unit='layer') 298 | 299 | if isinstance(model, list): 300 | # If we have to deal with a list of models: 301 | for oneModel in model: 302 | advancedKerasEquivalence(oneModel, 303 | quiet=quiet, 304 | progressBar=progressBar) 305 | else: 306 | if not quiet: 307 | updateProgress(model, progressBar) 308 | 309 | notKerasEquivExist = not('keras' in model.equivalent.keys()) 310 | kerasOutputExist = model.kerasOutput is not None 311 | 312 | if notKerasEquivExist and model.childrenEquivalentsCompleted('keras'): 313 | c_l.spreadSignal(model) 314 | kerasOutputExist = model.kerasOutput is not None 315 | 316 | if kerasOutputExist: 317 | if model.name is not None: 318 | kerasEq = keras.models.Model(inputs=model.kerasInput, 319 | outputs=model.kerasOutput, 320 | name=model.name) 321 | else: 322 | kerasEq = keras.models.Model(inputs=model.kerasInput, 323 | outputs=model.kerasOutput) 324 | model.equivalent['keras'] = kerasEq 325 | 326 | # Do the same to sub-sub-layers 327 | if not quiet: 328 | updateProgress(model, progressBar) 329 | advancedKerasEquivalence(model.children, 330 | quiet=quiet, 331 | progressBar=progressBar) 332 | 333 | 334 | def updateProgress(model, progressBar): 335 | """ 336 | During a conversion, updates the progress bar. 337 | Value is aucomatically computed using numberOfEquivalents 338 | 339 | Arguments: 340 | -model: 341 | A LayerRepresentation objest of one layer in the model being 342 | converted 343 | -progressBar: 344 | A ProgressBar object : the bar to update 345 | """ 346 | global lastProgress 347 | mainParent = model.firstParent() 348 | progress = mainParent.numberOfEquivalents(framework='keras') 349 | diff = progress-lastProgress 350 | progressBar.update(diff) 351 | lastProgress = progress 352 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | dependencies = ['torch', 7 | 'pretrainedmodels', 8 | 'tensorflow', 9 | 'graphviz', 10 | 'numpy', 11 | 'h5py', 12 | 'tqdm'] 13 | 14 | setuptools.setup( 15 | name="pytorch_keras_converter", 16 | version="0.0.1", 17 | author="sonibla", 18 | description="A PyTorch Keras Converter", 19 | long_description=long_description, 20 | long_description_content_type="text/markdown", 21 | url="https://github.com/sonibla/pytorch_keras_converter", 22 | packages=setuptools.find_packages(), 23 | classifiers=[ 24 | "Programming Language :: Python :: 3.5", 25 | "Programming Language :: Python :: 3.6", 26 | "Programming Language :: Python :: 3.7", 27 | "License :: OSI Approved :: MIT License", 28 | "Operating System :: OS Independent", 29 | ], 30 | keywords='pytorch keras tensorflow converter cadene', 31 | install_requires=dependencies, 32 | python_requires='>=3.5' 33 | ) 34 | --------------------------------------------------------------------------------