├── .gitattributes ├── .gitignore ├── .travis.yml ├── ISSUE_TEMPLATE.md ├── LICENSE ├── README.md ├── SECURITY.md ├── _config.yml ├── docs ├── InstallDockerCE.md ├── Poster-MMdnn.jpg ├── client.py ├── cntk2keras.jpg ├── darkent_keras_yolov3_converted.jpg ├── keras2cntk.md ├── supported.jpg ├── tensorboard.png ├── tf2pytorch.md └── vismmdnn.png ├── mmdnn ├── __init__.py ├── conversion │ ├── __init__.py │ ├── _script │ │ ├── IRToCode.py │ │ ├── IRToModel.py │ │ ├── __init__.py │ │ ├── convert.py │ │ ├── convertToIR.py │ │ ├── dump_code.py │ │ └── extractModel.py │ ├── caffe │ │ ├── README.md │ │ ├── __init__.py │ │ ├── caffe_emitter.py │ │ ├── caffe_pb2.py │ │ ├── common_graph.py │ │ ├── errors.py │ │ ├── graph.py │ │ ├── mapper.py │ │ ├── network.py │ │ ├── resolver.py │ │ ├── saver.py │ │ ├── shape.py │ │ ├── transformer.py │ │ ├── utils.py │ │ └── writer.py │ ├── cntk │ │ ├── README.md │ │ ├── __init__.py │ │ ├── cntk_emitter.py │ │ ├── cntk_graph.py │ │ ├── cntk_parser.py │ │ └── saver.py │ ├── common │ │ ├── DataStructure │ │ │ ├── __init__.py │ │ │ ├── emitter.py │ │ │ ├── graph.py │ │ │ └── parser.py │ │ ├── IR │ │ │ ├── IR_graph.py │ │ │ ├── __init__.py │ │ │ ├── graph.proto │ │ │ ├── graph_pb2.py │ │ │ └── ops.pbtxt │ │ ├── __init__.py │ │ └── utils.py │ ├── coreml │ │ ├── README.md │ │ ├── __init__.py │ │ ├── coreml_emitter.py │ │ ├── coreml_graph.py │ │ ├── coreml_parser.py │ │ └── coreml_utils.py │ ├── darknet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── cfg.py │ │ ├── darknet_graph.py │ │ ├── darknet_parser.py │ │ ├── darknet_utils.py │ │ └── prototxt.py │ ├── examples │ │ ├── __init__.py │ │ ├── caffe │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ └── imagenet_test.py │ │ ├── cntk │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ └── imagenet_test.py │ │ ├── coreml │ │ │ ├── __init__.py │ │ │ ├── extractor.py │ │ │ ├── imagenet_test.py │ │ │ └── test_tfcoreml.py │ │ ├── darknet │ │ │ ├── __init__.py │ │ │ ├── darknet.py │ │ │ ├── extractor.py │ │ │ ├── libdarknet.a │ │ │ └── libdarknet.so │ │ ├── data │ │ │ ├── dog.jpg │ │ │ ├── imagenet_1000.txt │ │ │ ├── one_imdb.npy │ │ │ └── seagull.jpg │ │ ├── extractor.py │ │ ├── imagenet_test.py │ │ ├── keras │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ ├── imagenet_test.py │ │ │ └── utils.py │ │ ├── mxnet │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ └── imagenet_test.py │ │ ├── onnx │ │ │ ├── __init__.py │ │ │ └── imagenet_test.py │ │ ├── paddle │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ ├── imagenet_test.py │ │ │ └── models │ │ │ │ ├── __init__.py │ │ │ │ ├── alexnet.py │ │ │ │ ├── resnet.py │ │ │ │ └── vgg.py │ │ ├── pytorch │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ └── imagenet_test.py │ │ └── tensorflow │ │ │ ├── __init__.py │ │ │ ├── extract_model.py │ │ │ ├── extractor.py │ │ │ ├── imagenet_test.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── inception_resnet_v1.py │ │ │ ├── inception_resnet_v2.py │ │ │ ├── mobilenet │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_blocks.py │ │ │ │ ├── mobilenet.py │ │ │ │ └── mobilenet_v2.py │ │ │ ├── mobilenet_v1.py │ │ │ ├── nasnet.py │ │ │ ├── nasnet_utils.py │ │ │ └── test_rnn.py │ │ │ └── vis_meta.py │ ├── keras │ │ ├── README.md │ │ ├── __init__.py │ │ ├── extra_layers.py │ │ ├── keras2_emitter.py │ │ ├── keras2_graph.py │ │ ├── keras2_parser.py │ │ └── saver.py │ ├── mxnet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── mxnet_emitter.py │ │ ├── mxnet_graph.py │ │ ├── mxnet_parser.py │ │ └── saver.py │ ├── onnx │ │ ├── DEVELOPMENT.md │ │ ├── README.md │ │ ├── __init__.py │ │ ├── elephant.jpg │ │ ├── onnx_emitter.py │ │ ├── onnx_graph.py │ │ ├── onnx_parser.py │ │ ├── saver.py │ │ └── shape_inference.py │ ├── paddle │ │ ├── README.md │ │ ├── __init__.py │ │ ├── paddle_graph.py │ │ └── paddle_parser.py │ ├── pytorch │ │ ├── README.md │ │ ├── __init__.py │ │ ├── pytorch_emitter.py │ │ ├── pytorch_graph.py │ │ ├── pytorch_parser.py │ │ ├── saver.py │ │ └── torch_to_np.py │ ├── rewriter │ │ ├── __init__.py │ │ ├── folder.py │ │ ├── graph_matcher.py │ │ ├── rewriter.py │ │ ├── rnn_utils.py │ │ └── utils.py │ ├── tensorflow │ │ ├── README.md │ │ ├── __init__.py │ │ ├── rewriter │ │ │ ├── __init__.py │ │ │ ├── gru_rewriter.py │ │ │ └── lstm_rewriter.py │ │ ├── saver.py │ │ ├── tensorflow_emitter.py │ │ ├── tensorflow_frozenparser.py │ │ ├── tensorflow_graph.py │ │ └── tensorflow_parser.py │ └── torch │ │ ├── __init__.py │ │ ├── torch_graph.py │ │ └── torch_parser.py ├── models │ ├── GenerateMdByDataset.py │ ├── GenerateMdFromJson.py │ ├── README.md │ └── modelmap2.json ├── vis_edit │ ├── package.json │ ├── public │ │ ├── favicon.ico │ │ ├── index.html │ │ └── manifest.json │ ├── src │ │ ├── actions │ │ │ └── index.tsx │ │ ├── components │ │ │ ├── App.css │ │ │ ├── App.tsx │ │ │ ├── Graph.css │ │ │ ├── Graph.tsx │ │ │ ├── MiniMap.tsx │ │ │ ├── SideBar.css │ │ │ └── SideBar.tsx │ │ ├── constants │ │ │ └── index.tsx │ │ ├── containers │ │ │ ├── Graph.tsx │ │ │ └── SideBar.tsx │ │ ├── helper │ │ │ └── index.tsx │ │ ├── index.css │ │ ├── index.tsx │ │ ├── logo.svg │ │ ├── reducers │ │ │ └── index.tsx │ │ ├── registerServiceWorker.ts │ │ └── types │ │ │ └── index.tsx │ ├── tsconfig.json │ ├── tsconfig.test.json │ └── tslint.json └── visualization │ ├── README.md │ ├── app.js │ ├── draw.js │ ├── index.html │ ├── package.json │ ├── public │ ├── draw.js │ └── style.css │ └── style.css ├── requirements ├── linux-py3.txt └── select_requirements.py ├── setup.cfg ├── setup.py ├── test.sh └── tests ├── conversion_imagenet.py ├── gen_test.py ├── test_caffe.py ├── test_caffe_2.py ├── test_caffe_3.py ├── test_caffe_4.py ├── test_cntk.py ├── test_cntk_2.py ├── test_coreml.py ├── test_coreml_2.py ├── test_darknet.py ├── test_keras.py ├── test_keras_2.py ├── test_keras_3.py ├── test_keras_4.py ├── test_keras_5.py ├── test_mxnet.py ├── test_mxnet_2.py ├── test_mxnet_3.py ├── test_mxnet_4.py ├── test_mxnet_5.py ├── test_paddle.py ├── test_pytorch.py ├── test_pytorch_2.py ├── test_pytorch_3.py ├── test_pytorch_4.py ├── test_pytorch_5.py ├── test_tensorflow.py ├── test_tensorflow_2.py ├── test_tensorflow_3.py ├── test_tensorflow_4.py ├── test_tensorflow_5.py ├── test_tensorflow_6.py ├── test_tensorflow_7.py ├── test_tensorflow_frozen.py └── utils.py /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sh text eol=lf 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | # visualization 104 | node_modules/ 105 | package-lock.json 106 | *.log 107 | 108 | # tests temporary files 109 | tests/cache/ 110 | tests/tmp/ 111 | 112 | # Visual Studio 113 | .vs/ 114 | .vscode/ 115 | 116 | # JetBrains 117 | .idea/ 118 | 119 | # macOS 120 | # Desktop Services Store files 121 | .DS_Store 122 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: bionic 3 | 4 | os: 5 | - linux 6 | 7 | language: python 8 | python: 9 | - "3.6" 10 | virtualenv: 11 | system_site_packages: true 12 | 13 | env: 14 | - TEST_PARSER=tensorflow 15 | - TEST_PARSER=tensorflow_2 16 | - TEST_PARSER=tensorflow_3 17 | - TEST_PARSER=tensorflow_4 18 | - TEST_PARSER=tensorflow_5 19 | - TEST_PARSER=tensorflow_6 20 | #- TEST_PARSER=tensorflow_7 21 | - TEST_PARSER=tensorflow_frozen 22 | - TEST_PARSER=pytorch 23 | - TEST_PARSER=pytorch_2 24 | - TEST_PARSER=pytorch_3 25 | - TEST_PARSER=pytorch_4 26 | - TEST_PARSER=pytorch_5 27 | - TEST_PARSER=keras 28 | - TEST_PARSER=keras_2 29 | - TEST_PARSER=keras_3 30 | - TEST_PARSER=keras_4 31 | - TEST_PARSER=keras_5 32 | - TEST_PARSER=cntk 33 | - TEST_PARSER=cntk_2 34 | - TEST_PARSER=caffe 35 | - TEST_PARSER=caffe_2 36 | - TEST_PARSER=caffe_3 37 | #- TEST_PARSER=caffe_4 38 | - TEST_PARSER=mxnet 39 | - TEST_PARSER=mxnet_2 40 | - TEST_PARSER=mxnet_3 41 | #- TEST_PARSER=mxnet_4 42 | - TEST_PARSER=mxnet_5 43 | - TEST_PARSER=paddle 44 | - TEST_PARSER=coreml 45 | - TEST_PARSER=coreml_2 46 | - TEST_PARSER=darknet 47 | 48 | cache: 49 | directories: 50 | - $HOME/.cache/pip 51 | 52 | addons: 53 | apt: 54 | update: false 55 | 56 | before_install: 57 | - python -m pip install --upgrade pip 58 | - sudo apt-get install -y --no-install-recommends caffe-cpu 59 | - sudo apt-get install -y --no-install-recommends openmpi-bin 60 | - sudo ln -s /usr/lib/x86_64-linux-gnu/libmpi_cxx.so.20 /usr/lib/x86_64-linux-gnu/libmpi_cxx.so.1 61 | - sudo ln -s /usr/lib/x86_64-linux-gnu/libmpi.so.20 /usr/lib/x86_64-linux-gnu/libmpi.so.12 62 | 63 | install: 64 | - python -m pip install -q -r $(python requirements/select_requirements.py) 65 | 66 | before_script: 67 | - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/x86_64-linux-gnu 68 | 69 | after_failure: true 70 | 71 | after_success: true 72 | 73 | after_script: true 74 | 75 | script: bash test.sh $TEST_PARSER 76 | 77 | matrix: 78 | fast_finish: true 79 | 80 | allow_failures: 81 | - env: TEST_PARSER=paddle 82 | 83 | exclude: 84 | - python: "3.6" 85 | env: TEST_PARSER=paddle 86 | - os: linux 87 | env: TEST_PARSER=coreml 88 | - os: linux 89 | env: TEST_PARSER=coreml_2 90 | 91 | notifications: 92 | email: 93 | on_success: never 94 | on_failure: never 95 | -------------------------------------------------------------------------------- /ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Platform (like ubuntu 16.04/win10): 2 | 3 | Python version: 4 | 5 | Source framework with version (like Tensorflow 1.4.1 with GPU): 6 | 7 | Destination framework with version (like CNTK 2.3 with GPU): 8 | 9 | Pre-trained model path (webpath or webdisk path): 10 | 11 | Running scripts: 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /docs/InstallDockerCE.md: -------------------------------------------------------------------------------- 1 | # Install Docker CE on Ubuntu 16.04 and newer (x86_64/amd64 architectures) 2 | 3 | ## Setup the Repository 4 | 5 | 1. Update the **apt** package index: 6 | ```bash 7 | $ sudo apt-get update 8 | ``` 9 | 2. Install packages: 10 | ```bash 11 | $ sudo apt-get install \ 12 | apt-transport-https \ 13 | ca-certificates \ 14 | curl \ 15 | software-properties-common 16 | ``` 17 | 3. Add Docker's official GPG key: 18 | ```bash 19 | $ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 20 | ``` 21 | 4. Setup the stable repository 22 | ```bash 23 | $ sudo add-apt-repository \ 24 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 25 | $(lsb_release -cs) \ 26 | stable" 27 | ``` 28 | 29 | ## Install Docker CE 30 | 1. Update the **apt** package index 31 | ```bash 32 | $ sudo apt-get update 33 | ``` 34 | 2. Install the _latest_ version of Docker CE 35 | ```bash 36 | $ sudo apt-get install docker-ce 37 | ``` 38 | 39 | ## More Information of Docker installation 40 | 41 | If you are using other OS version, or want to learn more about Docker installation, please view [docker docs](https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-ce-1). 42 | -------------------------------------------------------------------------------- /docs/Poster-MMdnn.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/Poster-MMdnn.jpg -------------------------------------------------------------------------------- /docs/client.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Send JPEG image to tensorflow_model_server loaded with GAN model. 3 | 4 | Hint: the code has been compiled together with TensorFlow serving 5 | and not locally. The client is called in the TensorFlow Docker container 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | # Communication to TensorFlow server via gRPC 11 | from grpc.beta import implementations 12 | import tensorflow as tf 13 | 14 | # TensorFlow serving stuff to send messages 15 | from tensorflow_serving.apis import predict_pb2 16 | from tensorflow_serving.apis import prediction_service_pb2 17 | 18 | 19 | # Command line arguments 20 | tf.app.flags.DEFINE_string('server', 'localhost:9000', 21 | 'PredictionService host:port') 22 | tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format') 23 | FLAGS = tf.app.flags.FLAGS 24 | 25 | 26 | def main(_): 27 | host, port = FLAGS.server.split(':') 28 | channel = implementations.insecure_channel(host, int(port)) 29 | stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) 30 | # Send request 31 | image = tf.gfile.FastGFile(FLAGS.image, 'rb').read() 32 | request = predict_pb2.PredictRequest() 33 | request.model_spec.name = 'tensorflow-serving' 34 | request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY 35 | request.inputs['image'].CopyFrom(tf.contrib.util.make_tensor_proto(image)) 36 | #request.inputs['input'].CopyFrom() 37 | 38 | result = stub.Predict(request, 10.0) # 10 secs timeout 39 | print(result) 40 | 41 | 42 | if __name__ == '__main__': 43 | tf.app.run() 44 | -------------------------------------------------------------------------------- /docs/cntk2keras.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/cntk2keras.jpg -------------------------------------------------------------------------------- /docs/darkent_keras_yolov3_converted.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/darkent_keras_yolov3_converted.jpg -------------------------------------------------------------------------------- /docs/keras2cntk.md: -------------------------------------------------------------------------------- 1 | # Keras "inception_v3" to CNTK conversion example 2 | 3 | Model: ["inception_v3" for ImageNet](https://github.com/fchollet/deep-learning-models) 4 | 5 | Source: Keras 2.1.3 6 | 7 | Destination: CNTK 2.4 8 | 9 | --- 10 | 11 | ## Framework Installation 12 | 13 | Install [Keras](https://keras.io/#installation) and [CNTK](https://docs.microsoft.com/en-us/cognitive-toolkit/Setup-CNTK-on-your-machine) in case 14 | 15 | ```bash 16 | $ pip install keras 17 | 18 | $ pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.4-cp27-cp27mu-linux_x86_64.whl 19 | or 20 | $ pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.4-cp35-cp35m-linux_x86_64.whl 21 | ``` 22 | 23 | --- 24 | 25 | ## Keras Model Preparation 26 | 27 | First, you'll need to prepare your pre-trained keras model. There is a pre-trained model extractor for frameworks to do this. Refer to it to extract your Keras model's structure and weights. 28 | 29 | ```bash 30 | $ mmdownload -f keras -n inception_v3 31 | 32 | Keras model inception_v3 is saved in [./imagenet_inception_v3.h5] 33 | ``` 34 | 35 | Then you got the Keras pre-trained inception_v3 model which is downloaded to the current working directory. 36 | 37 | --- 38 | 39 | ## Convert Keras Model to CNTK 40 | 41 | We provide two ways to convert models. 42 | 43 | ### **One-step Command** 44 | 45 | For versions above MMdnn@0.1.4, we provide one command to achieve converting a Keras Model to CNTK. 46 | 47 | ```bash 48 | $ mmconvert -sf keras -iw imagenet_inception_v3.h5 -df cntk -om keras_to_cntk_inception_v3.dnn 49 | . 50 | . 51 | . 52 | CNTK model file is saved as [keras_to_cntk_inception_v3.dnn], generated by [2c33f7f278cb46be992f50226fcfdb5d.py] and [2c33f7f278cb46be992f50226fcfdb5d.npy]. 53 | ``` 54 | 55 | Now you've got the CNTK original model *keras_to_cntk_inception_v3.dnn*, converted from Keras in one step!. **2c33f7f278cb46be992f50226fcfdb5d.py** and **2c33f7f278cb46be992f50226fcfdb5d.npy** are temporal files which will be removed automatically. 56 | 57 | ### **Step-by-step Command (for debugging)** 58 | 59 | #### Convert the pre-trained model files to intermediate representation 60 | 61 | ```bash 62 | $ mmtoir -f keras -w imagenet_inception_v3.h5 -o converted 63 | 64 | Using TensorFlow backend. 65 | . 66 | . 67 | . 68 | IR network structure is saved as [converted.json]. 69 | IR network structure is saved as [converted.pb]. 70 | IR weights are saved as [converted.npy]. 71 | ``` 72 | 73 | Then you got the **intermediate representation** files *converted.json* for visualization, *converted.pb* and *converted.npy* for next steps. 74 | 75 | #### Convert the IR files to CNTK models 76 | 77 | ```bash 78 | $ mmtocode -f cntk -d converted_cntk.py -n converted.pb -w converted.npy 79 | 80 | Parse file [converted.pb] with binary format successfully. 81 | Target network code snippet is saved as [converted_cntk.py]. 82 | ``` 83 | 84 | And you will get a file named *converted_cntk.py*, which contains the **original CNTK** codes to build the *Inception V3* network. 85 | 86 | With these three steps, you have already converted the pre-trained Keras Inception_v3 models to CNTK network file *converted_cntk.py* and weight file *converted.npy*. You can use these two files to fine-tune training or inference. 87 | 88 | #### Dump the original CNTK model 89 | 90 | ```bash 91 | $ mmtomodel -f cntk -in converted_cntk -iw converted.npy -o cntk_inception_v3.dnn 92 | . 93 | . 94 | . 95 | CNTK model file is saved as [cntk_inception_v3.dnn], generated by [converted_cntk.py] and [converted.npy]. 96 | ``` 97 | The file *cntk_inception_v3.dnn* can be loaded by CNTK directly. 98 | -------------------------------------------------------------------------------- /docs/supported.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/supported.jpg -------------------------------------------------------------------------------- /docs/tensorboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/tensorboard.png -------------------------------------------------------------------------------- /docs/vismmdnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/docs/vismmdnn.png -------------------------------------------------------------------------------- /mmdnn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/__init__.py -------------------------------------------------------------------------------- /mmdnn/conversion/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/_script/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function -------------------------------------------------------------------------------- /mmdnn/conversion/_script/dump_code.py: -------------------------------------------------------------------------------- 1 | import sys as _sys 2 | from six import text_type as _text_type 3 | import sys 4 | import imp 5 | import os.path 6 | 7 | 8 | def dump_code(framework, network_filepath, weight_filepath, dump_filepath, dump_tag): 9 | if network_filepath.endswith('.py'): 10 | network_filepath = network_filepath[:-3] 11 | sys.path.insert(0, os.path.dirname(os.path.abspath(network_filepath))) 12 | MainModel = imp.load_source('MainModel', network_filepath + '.py') 13 | if framework == 'caffe': 14 | from mmdnn.conversion.caffe.saver import save_model 15 | elif framework == 'cntk': 16 | from mmdnn.conversion.cntk.saver import save_model 17 | elif framework == 'keras': 18 | from mmdnn.conversion.keras.saver import save_model 19 | elif framework == 'mxnet': 20 | from mmdnn.conversion.mxnet.saver import save_model 21 | elif framework == 'pytorch': 22 | from mmdnn.conversion.pytorch.saver import save_model 23 | elif framework == 'tensorflow': 24 | from mmdnn.conversion.tensorflow.saver import save_model 25 | save_model(MainModel, network_filepath, weight_filepath, dump_filepath, dump_tag) 26 | return 0 27 | 28 | elif framework == 'onnx': 29 | from mmdnn.conversion.onnx.saver import save_model 30 | else: 31 | raise NotImplementedError("{} saver is not finished yet.".format(framework)) 32 | save_model(MainModel, network_filepath, weight_filepath, dump_filepath) 33 | 34 | return 0 35 | 36 | 37 | def _get_parser(): 38 | import argparse 39 | 40 | parser = argparse.ArgumentParser(description='Dump the model code into target model.') 41 | 42 | parser.add_argument( 43 | '-f', '--framework', type=_text_type, choices=["caffe", "cntk", "mxnet", "keras", "tensorflow", 'pytorch', 'onnx'], 44 | required=True, 45 | help='Format of model at srcModelPath (default is to auto-detect).' 46 | ) 47 | 48 | parser.add_argument( 49 | '-in', '--inputNetwork', 50 | type=_text_type, 51 | required=True, 52 | help='Path to the model network architecture file.') 53 | 54 | parser.add_argument( 55 | '-iw', '--inputWeight', 56 | type=_text_type, 57 | required=True, 58 | help='Path to the model network weight file.') 59 | 60 | parser.add_argument( 61 | '-o', '-om', '--outputModel', 62 | type=_text_type, 63 | required=True, 64 | help='Path to save the target model') 65 | 66 | parser.add_argument( 67 | '--dump_tag', 68 | type=_text_type, 69 | default=None, 70 | help='Tensorflow model dump type', 71 | choices=['SERVING', 'TRAINING']) 72 | 73 | return parser 74 | 75 | 76 | def _main(): 77 | parser = _get_parser() 78 | args = parser.parse_args() 79 | ret = dump_code(args.framework, args.inputNetwork, args.inputWeight, args.outputModel, args.dump_tag) 80 | _sys.exit(int(ret)) 81 | 82 | 83 | if __name__ == '__main__': 84 | _main() 85 | -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/errors.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | class ConversionError(Exception): 5 | ''' 6 | an abtract class 7 | ''' 8 | pass 9 | 10 | 11 | def print_stderr(msg): 12 | ''' 13 | a function to print information to the std 14 | ''' 15 | sys.stderr.write('%s\n' % msg) 16 | -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | DEFAULT_PADDING = 'SAME' 4 | 5 | 6 | def layer(op): 7 | '''Decorator for composable network layers.''' 8 | 9 | def layer_decorated(self, *args, **kwargs): 10 | # Automatically set a name if not provided. 11 | name = kwargs.setdefault('name', self.get_unique_name(op.__name__)) 12 | # Figure out the layer inputs. 13 | assert len(args) >= 1 14 | if len(args) == 1: 15 | layer_inputs = args[0] 16 | else: 17 | layer_inputs = list(args) 18 | layer_output = op(self, layer_inputs, **kwargs) 19 | # print('op: %s shape: %s' % (op, layer_output._keras_shape)) 20 | # print('op: %s shape: %s' % (op, layer_output.get_shape().as_list())) 21 | # Add to layer LUT. 22 | self.layers[name] = layer_output 23 | self.output = layer_output 24 | return layer_output 25 | 26 | return layer_decorated 27 | 28 | 29 | class Network(object): 30 | 31 | def __init__(self, trainable=False): 32 | self.output = None 33 | self.layers = {} 34 | self.trainable = trainable 35 | self.setup() 36 | 37 | def setup(self): 38 | raise NotImplementedError('Must be implemented by the subclass') 39 | 40 | def load(self, data_path, session, ignore_missing=False): 41 | raise NotImplementedError('Must be implemented by the subclass') 42 | 43 | def input(self, shape, name): 44 | raise NotImplementedError('Must be implemented by the subclass') 45 | 46 | def get_output(self): 47 | raise NotImplementedError('Must be implemented by the subclass') 48 | 49 | def get_unique_name(self, prefix): 50 | raise NotImplementedError('Must be implemented by the subclass') 51 | 52 | @layer 53 | def conv(self, input, k_h, k_w, c_o, s_h, s_w, p_h, p_w, name, group=1, biased=True): 54 | raise NotImplementedError('Must be implemented by the subclass') 55 | 56 | @layer 57 | def deconv(self, input, c_o, k_h, k_w, s_h, s_w, p_h, p_w, name): 58 | raise NotImplementedError('Must be implemented by the subclass') 59 | 60 | @layer 61 | def relu(self, input, name): 62 | raise NotImplementedError('Must be implemented by the subclass') 63 | 64 | @layer 65 | def sigmoid(self, input, name): 66 | raise NotImplementedError('Must be implemented by the subclass') 67 | 68 | @layer 69 | def max_pool(self, input, k_h, k_w, s_h, s_w, p_h, p_w, name): 70 | raise NotImplementedError('Must be implemented by the subclass') 71 | 72 | @layer 73 | def max_unpool(self, input, k_h, k_w, s_h, s_w, p_h, p_w, name): 74 | raise NotImplementedError('Must be implemented by the subclass') 75 | 76 | @layer 77 | def avg_pool(self, input, k_h, k_w, s_h, s_w, p_h, p_w, name): 78 | raise NotImplementedError('Must be implemented by the subclass') 79 | 80 | @layer 81 | def lrn(self, input, local_size, alpha, beta, name, bias=1): 82 | raise NotImplementedError('Must be implemented by the subclass') 83 | 84 | @layer 85 | def concat(self, inputs, axis, name): 86 | raise NotImplementedError('Must be implemented by the subclass') 87 | 88 | @layer 89 | def add(self, inputs, name): 90 | raise NotImplementedError('Must be implemented by the subclass') 91 | 92 | @layer 93 | def fc(self, input, num_out, name): 94 | raise NotImplementedError('Must be implemented by the subclass') 95 | 96 | @layer 97 | def softmax(self, input, name): 98 | raise NotImplementedError('Must be implemented by the subclass') 99 | 100 | @layer 101 | def batch_normalization(self, input, name, epsilon=0.00001, scale_offset=True): 102 | raise NotImplementedError('Must be implemented by the subclass') 103 | 104 | @layer 105 | def dropout(self, input, keep_prob, name): 106 | raise NotImplementedError('Must be implemented by the subclass') 107 | 108 | @layer 109 | def crop(self, inputs, offset, name): 110 | raise NotImplementedError('Must be implemented by the subclass') -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/resolver.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | SHARED_CAFFE_RESOLVER = None 4 | 5 | 6 | class CaffeResolver(object): 7 | def __init__(self): 8 | self.import_caffe() 9 | 10 | def import_caffe(self): 11 | self.caffe = None 12 | 13 | try: 14 | import caffe 15 | self.caffe = caffe 16 | except ImportError: 17 | # Fall back to the protobuf implementation 18 | from mmdnn.conversion.caffe import caffe_pb2 19 | self.caffepb = caffe_pb2 20 | show_fallback_warning() 21 | if self.caffe: 22 | self.caffepb = self.caffe.proto.caffe_pb2 23 | self.NetParameter = self.caffepb.NetParameter 24 | 25 | def has_pycaffe(self): 26 | return self.caffe is not None 27 | 28 | 29 | def get_caffe_resolver(): 30 | global SHARED_CAFFE_RESOLVER 31 | if SHARED_CAFFE_RESOLVER is None: 32 | SHARED_CAFFE_RESOLVER = CaffeResolver() 33 | return SHARED_CAFFE_RESOLVER 34 | 35 | 36 | def has_pycaffe(): 37 | return get_caffe_resolver().has_pycaffe() 38 | 39 | 40 | def show_fallback_warning(): 41 | msg = ''' 42 | ------------------------------------------------------------ 43 | WARNING: PyCaffe not found! 44 | Falling back to a pure protocol buffer implementation. 45 | * Conversions will be drastically slower. 46 | * This backend is UNTESTED! 47 | ------------------------------------------------------------ 48 | 49 | ''' 50 | sys.stderr.write(msg) 51 | -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/saver.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | 3 | 4 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 5 | dump_net = dump_filepath + '.prototxt' 6 | dump_weight = dump_filepath + '.caffemodel' 7 | dump_net = str(dump_net) 8 | dump_weight = str(dump_weight) 9 | MainModel.make_net(dump_net) 10 | MainModel.gen_weight(weight_filepath, dump_weight, dump_net) 11 | print('Caffe model files are saved as [{}] and [{}], generated by [{}.py] and [{}].'.format( 12 | dump_net, dump_weight, network_filepath, weight_filepath)) 13 | -------------------------------------------------------------------------------- /mmdnn/conversion/caffe/utils.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import re 7 | 8 | def get_lower_case(text): 9 | ''' 10 | Convert PascalCase name to words concatenated by '_'. 11 | 'PascalCase' -> 'pascal_case' 12 | ''' 13 | name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text) 14 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() 15 | 16 | 17 | def get_upper_case(text): 18 | ''' 19 | 'pascal_case' -> 'PascalCase' 20 | ''' 21 | return ''.join([item.title() for item in text.split('_')]) 22 | 23 | def get_real_name(text): 24 | text = text.strip().split(':') 25 | return ''.join(text[:-1]) 26 | -------------------------------------------------------------------------------- /mmdnn/conversion/cntk/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/cntk/saver.py: -------------------------------------------------------------------------------- 1 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 2 | model = MainModel.KitModel(weight_filepath) 3 | model.save(dump_filepath) 4 | print('CNTK model file is saved as [{}], generated by [{}.py] and [{}].'.format( 5 | dump_filepath, network_filepath, weight_filepath)) 6 | -------------------------------------------------------------------------------- /mmdnn/conversion/common/DataStructure/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/common/DataStructure/emitter.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ---------------------------------------------------------------------------------------------- 5 | from six import string_types as _string_types 6 | 7 | import mmdnn.conversion.common.IR.graph_pb2 as graph_pb2 8 | from mmdnn.conversion.common.IR.graph_pb2 import NodeDef, GraphDef, DataType 9 | 10 | 11 | class Emitter(object): 12 | 13 | def __init__(self): 14 | self.body_code = str() 15 | self.weights_dict = dict() 16 | self.used_layers = set() 17 | self.weight_loaded = False 18 | self.layers_codes = dict() 19 | 20 | def run(self, dstNetworkPath, dstWeightPath=None, phase='test'): 21 | self.save_code(dstNetworkPath, phase) 22 | 23 | # share functions 24 | def add_body(self, indent, codes): 25 | if isinstance(codes, _string_types): 26 | codes = [codes] 27 | for code in codes: 28 | self.body_code += (" " * indent) + code + '\n' 29 | 30 | def _load_weights(self, file_name=None): 31 | import numpy as np 32 | self.weight_loaded = True 33 | try: 34 | self.weights_dict = np.load(file_name, 35 | allow_pickle=True).item() # made default False in response to CVE-2019-6446 36 | except: 37 | self.weights_dict = np.load(file_name, encoding='bytes', allow_pickle=True).item() 38 | 39 | def parent_variable_name(self, IR_node, path_or_name=[0]): 40 | if isinstance(path_or_name, _string_types): 41 | path = [IR_node.in_edges.index(path_or_name)] 42 | elif isinstance(path_or_name, list): 43 | path = path_or_name 44 | else: 45 | raise ValueError 46 | return self.IR_graph.get_parent_variable_name(IR_node.name, path) 47 | 48 | def _build(self): 49 | self.IR_graph.build() 50 | 51 | def gen_code(self, phase): 52 | raise NotImplementedError("do not use base emitter class.") 53 | 54 | def save_code(self, filepath, phase): 55 | code = self.gen_code(phase) 56 | with open(filepath, 'w') as fout: 57 | fout.write(code) 58 | print("Target network code snippet is saved as [{}].".format(filepath)) 59 | 60 | @staticmethod 61 | def save_weights(weights, filename): 62 | import numpy as np 63 | with open(filename, 'wb') as of: 64 | np.save(of, weights) 65 | print("Target weights are saved as [{}].".format(filename)) 66 | 67 | @staticmethod 68 | def _image_in_transpose_str(dim): 69 | dims = [dim] 70 | dims.extend(range(dim)) 71 | return ','.join('%s' % id for id in dims) 72 | 73 | @staticmethod 74 | def _image_out_transpose_str(dim): 75 | dims = list(range(1, dim + 1)) 76 | dims.append(0) 77 | return ','.join('%s' % id for id in dims) 78 | 79 | @staticmethod 80 | def _conv_kernel_transpose_str(dim): 81 | dims = [dim + 1, dim] 82 | dims.extend(range(dim)) 83 | return ','.join('%s' % id for id in dims) 84 | -------------------------------------------------------------------------------- /mmdnn/conversion/common/DataStructure/parser.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import numpy as np 7 | import mmdnn.conversion.common.IR.graph_pb2 as graph_pb2 8 | from mmdnn.conversion.common.IR.graph_pb2 import NodeDef, GraphDef, DataType 9 | 10 | 11 | class Parser(object): 12 | 13 | def __init__(self): 14 | self.IR_graph = GraphDef() 15 | self.weight_loaded = False 16 | 17 | # name --> (weight_name --> ndarray) 18 | self.weights = dict() 19 | 20 | 21 | def run(self, dest_path): 22 | self.gen_IR() 23 | self.save_to_json(dest_path + ".json") 24 | self.save_to_proto(dest_path + ".pb") 25 | self.save_weights(dest_path + ".npy") 26 | 27 | 28 | @property 29 | def src_graph(self): 30 | raise NotImplementedError 31 | 32 | 33 | def get_son(self, name, path, set_flag = False): 34 | return self.src_graph.get_son(name, path, set_flag) 35 | 36 | 37 | def get_parent(self, name, path, set_flag = False): 38 | return self.src_graph.get_parent(name, path, set_flag) 39 | 40 | 41 | def set_weight(self, layer_name, weight_name, data): 42 | if not layer_name in self.weights: 43 | self.weights[layer_name] = dict() 44 | layer = self.weights[layer_name] 45 | layer[weight_name] = data 46 | 47 | 48 | def save_to_json(self, filename): 49 | import google.protobuf.json_format as json_format 50 | json_str = json_format.MessageToJson(self.IR_graph, preserving_proto_field_name = True) 51 | 52 | with open(filename, "w") as of: 53 | of.write(json_str) 54 | 55 | print ("IR network structure is saved as [{}].".format(filename)) 56 | 57 | return json_str 58 | 59 | 60 | def save_to_proto(self, filename): 61 | proto_str = self.IR_graph.SerializeToString() 62 | with open(filename, 'wb') as of: 63 | of.write(proto_str) 64 | 65 | print ("IR network structure is saved as [{}].".format(filename)) 66 | 67 | return proto_str 68 | 69 | 70 | def save_weights(self, filename): 71 | if self.weight_loaded: 72 | with open(filename, 'wb') as of: 73 | np.save(of, self.weights) 74 | print ("IR weights are saved as [{}].".format(filename)) 75 | 76 | else: 77 | print ("Warning: weights are not loaded.") 78 | 79 | 80 | def convert_inedge(self, source_node, IR_node, start_idx = 0, end_idx = None): 81 | if end_idx == None: end_idx = len(source_node.in_edges) 82 | for idx in range(start_idx, end_idx): 83 | IR_node.input.append(self.src_graph.get_node(source_node.in_edges[idx]).real_name.lstrip('_')) 84 | 85 | 86 | @staticmethod 87 | def channel_first_conv_kernel_to_IR(tensor): 88 | dim = tensor.ndim 89 | tensor = np.transpose(tensor, list(range(2, dim)) + [1, 0]) 90 | return tensor 91 | 92 | 93 | @staticmethod 94 | def channel_first_shape_to_IR(shape): 95 | return [shape[0]] + list(shape[2:]) + [shape[1]] 96 | 97 | @staticmethod 98 | def channel_first_axis_to_IR(index): 99 | if index == 0: 100 | return 0 101 | elif index == 1: 102 | return -1 103 | else: 104 | return index - 1 105 | -------------------------------------------------------------------------------- /mmdnn/conversion/common/IR/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/common/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/coreml/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/coreml/coreml_utils.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from coremltools.models import datatypes 7 | 8 | def _infer_coreml_input_shape(IR_shape, if_convert=True): 9 | """Infer CoreML input shape from IR shape. 10 | """ 11 | if len(IR_shape) == 0: 12 | # the end of the tensorflow_resnet_v2_50's squeeze shape is [unknown_rank: true] with len 0 13 | # 1001 means the 1001 classes for tensorflow_resnet_v2_50 14 | # !Alert! TODO 15 | # Future implement can be changed to the last two layer 16 | shape = [1001,1,1] 17 | elif len(IR_shape) == 1: 18 | # TODO - remove style transfer 1D hack 19 | # Input is 1D but it goes to the width dimension: (1,1,W) 20 | shape = [1, 1, IR_shape[0]] #(C,H,W) 21 | elif len(IR_shape) == 2: 22 | # assume (Batch, Channels) - Batch dimension should be dropped 23 | shape = [IR_shape[1]] 24 | elif len(IR_shape) == 3: 25 | # assume (Batch, Sequence-Length, channels) 26 | shape = [IR_shape[2], 1, IR_shape[1]] 27 | elif len(IR_shape) == 4: #(B,H,W,C) --> (C,H,W) 28 | shape = [IR_shape[3], IR_shape[1], IR_shape[2]] #(C,H,W) 29 | else: 30 | raise ValueError('Unrecognized IR input shape {}'.format(shape)) 31 | if if_convert: 32 | shape = datatypes.Array(*shape) 33 | return shape 34 | -------------------------------------------------------------------------------- /mmdnn/conversion/darknet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/darknet/darknet_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from collections import OrderedDict 4 | from mmdnn.conversion.darknet.cfg import * 5 | -------------------------------------------------------------------------------- /mmdnn/conversion/darknet/prototxt.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | def parse_prototxt(protofile): 4 | def line_type(line): 5 | if line.find(':') >= 0: 6 | return 0 7 | elif line.find('{') >= 0: 8 | return 1 9 | return -1 10 | 11 | def parse_block(fp): 12 | block = OrderedDict() 13 | line = fp.readline().strip() 14 | while line != '}': 15 | ltype = line_type(line) 16 | if ltype == 0: # key: value 17 | #print line 18 | line = line.split('#')[0] 19 | key, value = line.split(':') 20 | key = key.strip() 21 | value = value.strip().strip('"') 22 | if key in block.keys(): 23 | if type(block[key]) == list: 24 | block[key].append(value) 25 | else: 26 | block[key] = [block[key], value] 27 | else: 28 | block[key] = value 29 | elif ltype == 1: # blockname { 30 | key = line.split('{')[0].strip() 31 | sub_block = parse_block(fp) 32 | block[key] = sub_block 33 | line = fp.readline().strip() 34 | line = line.split('#')[0] 35 | return block 36 | 37 | fp = open(protofile, 'r') 38 | props = OrderedDict() 39 | layers = [] 40 | line = fp.readline() 41 | while line != '': 42 | line = line.strip().split('#')[0] 43 | if line == '': 44 | line = fp.readline() 45 | continue 46 | ltype = line_type(line) 47 | if ltype == 0: # key: value 48 | key, value = line.split(':') 49 | key = key.strip() 50 | value = value.strip().strip('"') 51 | if key in props.keys(): 52 | if type(props[key]) == list: 53 | props[key].append(value) 54 | else: 55 | props[key] = [props[key], value] 56 | else: 57 | props[key] = value 58 | elif ltype == 1: # blockname { 59 | key = line.split('{')[0].strip() 60 | if key == 'layer': 61 | layer = parse_block(fp) 62 | layers.append(layer) 63 | else: 64 | props[key] = parse_block(fp) 65 | line = fp.readline() 66 | 67 | if len(layers) > 0: 68 | net_info = OrderedDict() 69 | net_info['props'] = props 70 | net_info['layers'] = layers 71 | return net_info 72 | else: 73 | return props 74 | 75 | def is_number(s): 76 | try: 77 | float(s) 78 | return True 79 | except ValueError: 80 | return False 81 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/caffe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/caffe/__init__.py -------------------------------------------------------------------------------- /mmdnn/conversion/examples/caffe/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | from six import text_type as _text_type 11 | from mmdnn.conversion.examples.imagenet_test import TestKit 12 | import caffe 13 | 14 | 15 | class TestCaffe(TestKit): 16 | 17 | def __init__(self): 18 | super(TestCaffe, self).__init__() 19 | 20 | self.truth['caffe']['alexnet'] = [(657, 0.41121054), (744, 0.20789708), (847, 0.086725503), (821, 0.05908291), (595, 0.058017164)] 21 | 22 | if self.args.dump: 23 | self.dump_net = self.args.dump + '.prototxt' 24 | self.dump_weight = self.args.dump + '.caffemodel' 25 | else: 26 | self.dump_net = 'tmp.prototxt' 27 | self.dump_weight = 'tmp.caffemodel' 28 | 29 | self.MainModel.make_net(self.dump_net) 30 | self.MainModel.gen_weight(self.args.w, self.dump_weight, self.dump_net) 31 | self.model = caffe.Net(self.dump_net, self.dump_weight, caffe.TEST) 32 | 33 | def preprocess(self, image_path): 34 | x = super(TestCaffe, self).preprocess(image_path) 35 | # caffe uses NCHW 36 | x = np.transpose(x, [2, 0, 1]) 37 | self.data = np.expand_dims(x, 0) 38 | 39 | 40 | def print_result(self): 41 | self.model.blobs['input'].data[...] = self.data 42 | predict = self.model.forward()[self.model._layer_names[-1]][0] 43 | super(TestCaffe, self).print_result(predict) 44 | 45 | 46 | def print_intermediate_result(self, layer_name, if_transpose = False): 47 | intermediate_output = self.model.blobs[layer_name].data[0] 48 | super(TestCaffe, self).print_intermediate_result(intermediate_output, if_transpose) 49 | 50 | 51 | def inference(self, image_path): 52 | self.preprocess(image_path) 53 | 54 | self.print_result() 55 | 56 | # self.print_intermediate_result('pooling0', False) 57 | 58 | self.test_truth() 59 | 60 | # delete tmp model files 61 | if os.path.isfile(self.dump_net): 62 | os.remove(self.dump_net) 63 | if os.path.isfile(self.dump_weight): 64 | os.remove(self.dump_weight) 65 | 66 | 67 | def dump(self): 68 | print ('Caffe model files are saved as [{}] and [{}], generated by [{}.py] and [{}].'.format( 69 | self.dump_net, self.dump_weight, self.args.n, self.args.w)) 70 | 71 | 72 | if __name__=='__main__': 73 | tester = TestCaffe() 74 | if tester.args.dump: 75 | tester.dump() 76 | else: 77 | tester.inference(tester.args.image) 78 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/cntk/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/cntk/extract_model.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import os 8 | from six import text_type as _text_type 9 | import cntk as C 10 | from mmdnn.conversion.common.utils import download_file 11 | 12 | BASE_MODEL_URL = 'https://www.cntk.ai/Models/CNTK_Pretrained/' 13 | # pylint: disable=line-too-long 14 | MODEL_URL = { 15 | 'alexnet' : BASE_MODEL_URL + 'AlexNet_ImageNet_CNTK.model', 16 | 'inception_v3' : BASE_MODEL_URL + 'InceptionV3_ImageNet_CNTK.model', 17 | 'resnet18' : BASE_MODEL_URL + 'ResNet18_ImageNet_CNTK.model', 18 | 'resnet50' : BASE_MODEL_URL + 'ResNet50_ImageNet_CNTK.model', 19 | 'resnet101' : BASE_MODEL_URL + 'ResNet101_ImageNet_CNTK.model', 20 | 'resnet152' : BASE_MODEL_URL + 'ResNet152_ImageNet_CNTK.model', 21 | 'Fast-RCNN_grocery100' : 'https://www.cntk.ai/Models/FRCN_Grocery/Fast-RCNN_grocery100.model', 22 | 'Fast-RCNN_Pascal' : 'https://www.cntk.ai/Models/FRCN_Pascal/Fast-RCNN.model' 23 | } 24 | # pylint: enable=line-too-long 25 | 26 | 27 | def _main(): 28 | parser = argparse.ArgumentParser() 29 | 30 | parser.add_argument('-n', '--network', type=_text_type, help='Model Type', required=True, 31 | choices=MODEL_URL.keys()) 32 | 33 | parser.add_argument('-i', '--image', default=None, 34 | type=_text_type, help='Test Image Path') 35 | 36 | parser.add_argument('-o', '--output_dir', default='./', 37 | type=_text_type, help='CNTK Checkpoint file name') 38 | 39 | args = parser.parse_args() 40 | 41 | fn = download_file(MODEL_URL[args.network], directory=args.output_dir) 42 | if not fn: 43 | return -1 44 | 45 | model = C.Function.load(fn) 46 | 47 | if len(model.outputs) > 1: 48 | for idx, output in enumerate(model.outputs): 49 | if len(output.shape) > 0: 50 | eval_node = idx 51 | break 52 | 53 | model = C.as_composite(model[eval_node].owner) 54 | model.save(fn) 55 | 56 | print("Model {} is saved as {}.".format(args.network, fn)) 57 | 58 | if args.image: 59 | import numpy as np 60 | from mmdnn.conversion.examples.imagenet_test import TestKit 61 | func = TestKit.preprocess_func['cntk'][args.network] 62 | img = func(args.image) 63 | img = np.transpose(img, (2, 0, 1)) 64 | predict = model.eval({model.arguments[0]:[img]}) 65 | predict = np.squeeze(predict) 66 | top_indices = predict.argsort()[-5:][::-1] 67 | result = [(i, predict[i]) for i in top_indices] 68 | print(result) 69 | print(np.sum(result)) 70 | 71 | return 0 72 | 73 | 74 | if __name__ == '__main__': 75 | _main() 76 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/cntk/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | import cntk as C 8 | from mmdnn.conversion.examples.imagenet_test import TestKit 9 | from mmdnn.conversion.examples.extractor import base_extractor 10 | from mmdnn.conversion.common.utils import download_file 11 | 12 | 13 | class cntk_extractor(base_extractor): 14 | 15 | BASE_MODEL_URL = 'https://www.cntk.ai/Models/CNTK_Pretrained/' 16 | 17 | architecture_map = { 18 | 'alexnet' : BASE_MODEL_URL + 'AlexNet_ImageNet_CNTK.model', 19 | 'inception_v3' : BASE_MODEL_URL + 'InceptionV3_ImageNet_CNTK.model', 20 | 'resnet18' : BASE_MODEL_URL + 'ResNet18_ImageNet_CNTK.model', 21 | 'resnet50' : BASE_MODEL_URL + 'ResNet50_ImageNet_CNTK.model', 22 | 'resnet101' : BASE_MODEL_URL + 'ResNet101_ImageNet_CNTK.model', 23 | 'resnet152' : BASE_MODEL_URL + 'ResNet152_ImageNet_CNTK.model', 24 | 'Fast-RCNN_grocery100' : 'https://www.cntk.ai/Models/FRCN_Grocery/Fast-RCNN_grocery100.model', 25 | 'Fast-RCNN_Pascal' : 'https://www.cntk.ai/Models/FRCN_Pascal/Fast-RCNN.model' 26 | } 27 | 28 | 29 | @classmethod 30 | def download(cls, architecture, path="./"): 31 | if cls.sanity_check(architecture): 32 | architecture_file = download_file(cls.architecture_map[architecture], directory=path) 33 | model = C.Function.load(architecture_file) 34 | if len(model.outputs) > 1: 35 | for idx, output in enumerate(model.outputs): 36 | if len(output.shape) > 0: 37 | eval_node = idx 38 | break 39 | 40 | model = C.as_composite(model[eval_node].owner) 41 | model.save(architecture_file) 42 | print("Cntk Model {} saved as [{}].".format(architecture, architecture_file)) 43 | return architecture_file 44 | 45 | else: 46 | return None 47 | 48 | 49 | @classmethod 50 | def inference(cls, architecture_name, architecture_path, image_path): 51 | if cls.sanity_check(architecture_name): 52 | import numpy as np 53 | func = TestKit.preprocess_func['cntk'][architecture_name] 54 | img = func(image_path) 55 | img = np.transpose(img, (2, 0, 1)) 56 | model = C.Function.load(architecture_path) 57 | predict = model.eval({model.arguments[0]:[img]}) 58 | predict = np.squeeze(predict) 59 | 60 | top_indices = predict.argsort()[-5:][::-1] 61 | return predict 62 | 63 | else: 64 | return None 65 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/cntk/imagenet_test.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | 3 | # Licensed under the MIT license. See LICENSE.md file in the project root 4 | # for full license information. 5 | # ============================================================================== 6 | 7 | import argparse 8 | import numpy as np 9 | import sys 10 | import os 11 | import cntk as C 12 | from mmdnn.conversion.examples.imagenet_test import TestKit 13 | 14 | class TestCNTK(TestKit): 15 | 16 | def __init__(self): 17 | super(TestCNTK, self).__init__() 18 | 19 | self.truth['mxnet']['inception_bn'] = [(21, 0.84820729), (144, 0.06263639), (677, 0.015408826), (973, 0.014532777), (562, 0.0053690737)] 20 | 21 | self.truth['keras']['resnet'] = [(144, 0.77398175), (23, 0.10650793), (21, 0.081077583), (146, 0.0092755388), (562, 0.0089645367)] 22 | self.truth['tensorflow']['resnet'] = [(22, 13.370872), (147, 8.8040094), (24, 5.6983061), (90, 5.6143088), (95, 4.8060427)] 23 | 24 | self.model = self.MainModel.KitModel(self.args.w) 25 | # self.model, self.testop = self.MainModel.KitModel(self.args.w) 26 | 27 | 28 | def preprocess(self, image_path): 29 | self.data = super(TestCNTK, self).preprocess(image_path) 30 | 31 | 32 | def print_result(self): 33 | predict = self.model.eval({self.model.arguments[0]:[self.data]}) 34 | super(TestCNTK, self).print_result(predict) 35 | 36 | 37 | def print_intermediate_result(self, layer_name, if_transpose = False): 38 | test_arr = self.testop.eval({self.testop.arguments[0]:[self.data]}) 39 | super(TestCNTK, self).print_intermediate_result(test_arr, if_transpose) 40 | 41 | 42 | def inference(self, image_path): 43 | self.preprocess(image_path) 44 | 45 | # self.print_intermediate_result(None, False) 46 | 47 | self.print_result() 48 | 49 | self.test_truth() 50 | 51 | def dump(self, path = None): 52 | if path is None: path = self.args.dump 53 | self.model.save(path) 54 | print ('CNTK model file is saved as [{}], generated by [{}.py] and [{}].'.format( 55 | path, self.args.n, self.args.w)) 56 | 57 | def detect(self, image_path, path = None): 58 | self.preprocess(image_path) 59 | print("Found {} outputs".format(len(self.model))) 60 | for output in self.model: 61 | predict = output.eval({output.arguments[0]:[self.data/255.]}) 62 | predict.dump("finalconv_{}.npy".format(str(predict.shape[1]))) 63 | print ('The output of CNTK model file is saved as [finalconv_{}.npy].'.format( 64 | str(predict.shape[1]))) 65 | 66 | print('generated by [{}.py], [{}] and [{}].'.format(self.args.n, self.args.w, image_path)) 67 | 68 | if __name__=='__main__': 69 | tester = TestCNTK() 70 | if tester.args.dump: 71 | tester.dump() 72 | elif tester.args.detect: 73 | tester.detect(tester.args.image, tester.args.detect) 74 | else: 75 | tester.inference(tester.args.image) 76 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/coreml/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/coreml/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | import os 8 | import coremltools 9 | from coremltools.models import MLModel 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | from mmdnn.conversion.examples.extractor import base_extractor 12 | from mmdnn.conversion.common.utils import download_file 13 | 14 | 15 | class coreml_extractor(base_extractor): 16 | 17 | _base_model_url = "https://docs-assets.developer.apple.com/coreml/models/" 18 | 19 | # from collections import namedtuple 20 | # Batch = namedtuple('Batch', ['data']) 21 | 22 | # TODO 23 | # Apple has published some of their own models. They can be downloaded from https://developer.apple.com/machine-learning/. 24 | # Those published models are: SqueezeNet, Places205-GoogLeNet, ResNet50, Inception v3, VGG16 25 | architecture_map = { 26 | 'inception_v3' : "https://docs-assets.developer.apple.com/coreml/models/Inceptionv3.mlmodel", 27 | 'vgg16' : "https://docs-assets.developer.apple.com/coreml/models/VGG16.mlmodel", 28 | 'vgg19' : None, 29 | 'resnet50' : "https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel", # resnet50 30 | 'mobilenet' : "https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel", 31 | 'xception' : None, 32 | 'inception_resnet' : None, 33 | 'densenet' : None, 34 | 'nasnet' : None, 35 | 'tinyyolo' : "https://s3-us-west-2.amazonaws.com/coreml-models/TinyYOLO.mlmodel" 36 | 37 | } 38 | 39 | 40 | 41 | @classmethod 42 | def download(cls, architecture, path = './'): 43 | if cls.sanity_check(architecture): 44 | architecture_file = download_file(cls.architecture_map[architecture], directory = path) 45 | if not architecture_file: 46 | return None 47 | 48 | 49 | print('Coreml model {} is saved in [{}]'.format(architecture, path)) 50 | return architecture_file 51 | else: 52 | return None 53 | 54 | 55 | @classmethod 56 | def inference(cls, architecture, model_path, image_path): 57 | # TODO 58 | from PIL import Image 59 | import numpy as np 60 | from coremltools.models._infer_shapes_nn_mlmodel import infer_shapes 61 | if cls.sanity_check(architecture): 62 | func = TestKit.preprocess_func['coreml'][architecture] 63 | 64 | 65 | import inspect 66 | funcstr = inspect.getsource(func) 67 | 68 | if len(funcstr.split(',')) == 3: 69 | size = int(funcstr.split('path,')[1].split(')')[0]) 70 | else: 71 | size = int(funcstr.split('path,')[1].split(',')[0]) 72 | 73 | 74 | 75 | img = Image.open(image_path) 76 | img = img.resize((size, size)) 77 | 78 | # load model 79 | model = MLModel(model_path) 80 | spec = model.get_spec() 81 | 82 | # TODO: Multiple inputs 83 | input_name = spec.description.input[0].name 84 | 85 | # TODO: Multiple outputs 86 | output_name = spec.description.output[0].name 87 | 88 | # inference 89 | input_data = img 90 | coreml_input = {input_name: img} 91 | coreml_output = model.predict(coreml_input) 92 | 93 | 94 | prob = coreml_output[output_name] 95 | if isinstance(prob, dict): 96 | prob = list(coreml_output[output_name].values()) 97 | prob = np.array(prob).squeeze() 98 | 99 | return prob 100 | 101 | else: 102 | return None 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/coreml/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | import coremltools 12 | 13 | class TestCoreML(TestKit): 14 | 15 | def __init__(self): 16 | from six import text_type as _text_type 17 | parser = argparse.ArgumentParser() 18 | 19 | parser.add_argument('-p', '--preprocess', type=_text_type, help='Model Preprocess Type') 20 | 21 | parser.add_argument('--model', '-n', '-w', type=_text_type, 22 | required=True, help='CoreML Model path.') 23 | 24 | parser.add_argument('-s', type=_text_type, help='Source Framework Type', 25 | choices=self.truth.keys()) 26 | 27 | parser.add_argument('--image', '-i', 28 | type=_text_type, help='Test image path.', 29 | default="mmdnn/conversion/examples/data/seagull.jpg") 30 | 31 | parser.add_argument('-input', type=_text_type, 32 | required=True, help='CoreML Input Node') 33 | 34 | parser.add_argument('-output', type=_text_type, 35 | required=True, help='CoreML Output Node') 36 | 37 | parser.add_argument('-size', type=int, 38 | default=224, help='CoreML Input Image Size') 39 | 40 | 41 | self.args = parser.parse_args() 42 | 43 | print("Loading model [{}].".format(self.args.model)) 44 | 45 | self.model = coremltools.models.MLModel(self.args.model.encode()) 46 | 47 | print("Model loading success.") 48 | 49 | def preprocess(self, image_path): 50 | from PIL import Image as pil_image 51 | img = pil_image.open(image_path) 52 | img = img.resize((self.args.size, self.args.size)) 53 | self.data = img 54 | 55 | def print_result(self): 56 | coreml_inputs = {self.args.input: self.data} 57 | self.coreml_output = self.model.predict(coreml_inputs, useCPUOnly=False) 58 | predict = self.coreml_output[self.args.output] 59 | super(TestCoreML, self).print_result(predict) 60 | 61 | 62 | def print_intermediate_result(self, layer_name, if_transpose = False): 63 | super(TestCoreML, self).print_intermediate_result(self.coreml_output[layer_name], if_transpose) 64 | 65 | 66 | def inference(self, image_path): 67 | self.preprocess(image_path) 68 | 69 | self.print_result() 70 | 71 | # self.print_intermediate_result('conv1_7x7_s2_1', True) 72 | 73 | # self.test_truth() 74 | 75 | if __name__=='__main__': 76 | tester = TestCoreML() 77 | tester.inference(tester.args.image) 78 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/darknet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/darknet/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | from __future__ import print_function 8 | import os 9 | from mmdnn.conversion.examples.darknet import darknet as cdarknet 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | from mmdnn.conversion.examples.extractor import base_extractor 12 | from mmdnn.conversion.common.utils import download_file 13 | 14 | 15 | class darknet_extractor(base_extractor): 16 | 17 | _base_model_url = "https://raw.githubusercontent.com/pjreddie/darknet/master/" 18 | 19 | architecture_map = { 20 | 'yolov3' : { 21 | 'config' : _base_model_url + "cfg/yolov3.cfg", 22 | 'weights' : "https://pjreddie.com/media/files/yolov3.weights" 23 | }, 24 | 25 | 'yolov2' :{ 26 | 'config' : _base_model_url + "cfg/yolov2.cfg", 27 | 'weights' : "https://pjreddie.com/media/files/yolov2.weights" 28 | } 29 | 30 | } 31 | 32 | 33 | @classmethod 34 | def download(cls, architecture, path = './'): 35 | 36 | if cls.sanity_check(architecture): 37 | cfg_name = architecture + ".cfg" 38 | architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) 39 | if not architecture_file: 40 | return None 41 | 42 | weight_name = architecture + ".weights" 43 | weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) 44 | if not weight_file: 45 | return None 46 | 47 | print("Darknet Model {} saved as [{}] and [{}].".format(architecture, architecture_file, weight_file)) 48 | return (architecture_file, weight_file) 49 | 50 | else: 51 | return None 52 | 53 | 54 | @classmethod 55 | def inference(cls, architecture, files, model_path, image_path): 56 | import numpy as np 57 | 58 | if cls.sanity_check(architecture): 59 | download_file(cls._base_model_url + "cfg/coco.data", directory='./') 60 | download_file(cls._base_model_url + "data/coco.names", directory='./data/') 61 | 62 | print(files) 63 | net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) 64 | meta = cdarknet.load_meta("coco.data".encode()) 65 | 66 | 67 | r = cdarknet.detect(net, meta, image_path.encode()) 68 | # print(r) 69 | return r 70 | 71 | else: 72 | return None 73 | 74 | 75 | 76 | # d = darknet_extractor() 77 | # model_filename = d.download('yolov3') 78 | # print(model_filename) 79 | 80 | # image_path = "./mmdnn/conversion/examples/data/dog.jpg" 81 | # model_path = "./" 82 | # d = darknet_extractor() 83 | # result = d.inference('yolov3', model_filename, model_path, image_path = image_path) 84 | # print(result) 85 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/darknet/libdarknet.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/darknet/libdarknet.a -------------------------------------------------------------------------------- /mmdnn/conversion/examples/darknet/libdarknet.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/darknet/libdarknet.so -------------------------------------------------------------------------------- /mmdnn/conversion/examples/data/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/data/dog.jpg -------------------------------------------------------------------------------- /mmdnn/conversion/examples/data/one_imdb.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/data/one_imdb.npy -------------------------------------------------------------------------------- /mmdnn/conversion/examples/data/seagull.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/data/seagull.jpg -------------------------------------------------------------------------------- /mmdnn/conversion/examples/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | 8 | class base_extractor(object): 9 | 10 | def __init__(self): 11 | pass 12 | 13 | 14 | @classmethod 15 | def help(cls): 16 | print('Supported models: {}'.format(list(cls.architecture_map.keys()))) 17 | 18 | 19 | @classmethod 20 | def sanity_check(cls, architecture): 21 | if architecture is None: 22 | cls.help() 23 | return False 24 | 25 | elif not architecture in cls.architecture_map: 26 | cls.help() 27 | raise ValueError("Unknown pretrained model name [{}].".format(architecture)) 28 | 29 | else: 30 | return True 31 | 32 | @classmethod 33 | def download(cls, architecture): 34 | raise NotImplementedError() 35 | 36 | 37 | @classmethod 38 | def inference(cls, image_path): 39 | raise NotImplementedError() 40 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/keras/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/keras/extract_model.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | from six import text_type as _text_type 8 | import keras 9 | from mmdnn.conversion.examples.imagenet_test import TestKit 10 | 11 | networks_map = { 12 | 'inception_v3' : lambda : keras.applications.inception_v3.InceptionV3(input_shape=(299, 299, 3)), 13 | 'vgg16' : lambda : keras.applications.vgg16.VGG16(), 14 | 'vgg19' : lambda : keras.applications.vgg19.VGG19(), 15 | 'resnet' : lambda : keras.applications.resnet50.ResNet50(), 16 | 'mobilenet' : lambda : keras.applications.mobilenet.MobileNet(), 17 | 'xception' : lambda : keras.applications.xception.Xception(input_shape=(299, 299, 3)), 18 | 'inception_resnet' : lambda : keras.applications.inception_resnet_v2.InceptionResNetV2() 19 | } 20 | 21 | image_size = { 22 | 'inception_v3' : 299, 23 | 'vgg16' : 224, 24 | 'vgg19' : 224, 25 | 'resnet' : 224, 26 | 'mobilenet' : 224, 27 | 'xception' : 299, 28 | 'inception_resnet' : 299 29 | } 30 | 31 | def _main(): 32 | parser = argparse.ArgumentParser() 33 | 34 | parser.add_argument('-n', '--network', 35 | type=_text_type, help='Model Type', required=True, 36 | choices=networks_map.keys()) 37 | 38 | parser.add_argument('-i', '--image', 39 | type=_text_type, help='Test Image Path') 40 | 41 | args = parser.parse_args() 42 | 43 | model = networks_map.get(args.network) 44 | if model is None: 45 | raise NotImplementedError("Unknown keras application [{}]".format(args.network)) 46 | 47 | model = model() 48 | # save network structure as JSON 49 | json_string = model.to_json() 50 | with open("imagenet_{}.json".format(args.network), "w") as of: 51 | of.write(json_string) 52 | 53 | print("Network structure is saved as [imagenet_{}.json].".format(args.network)) 54 | 55 | model.save_weights('imagenet_{}.h5'.format(args.network)) 56 | 57 | print("Network weights are saved as [imagenet_{}.h5].".format(args.network)) 58 | 59 | if args.image: 60 | import numpy as np 61 | func = TestKit.preprocess_func['keras'][args.network] 62 | img = func(args.image) 63 | img = np.expand_dims(img, axis=0) 64 | predict = model.predict(img) 65 | predict = np.squeeze(predict) 66 | top_indices = predict.argsort()[-5:][::-1] 67 | result = [(i, predict[i]) for i in top_indices] 68 | print(result) 69 | 70 | # layer_name = 'block2_pool' 71 | # intermediate_layer_model = keras.Model(inputs=model.input, 72 | # outputs=model.get_layer(layer_name).output) 73 | # intermediate_output = intermediate_layer_model.predict(img) 74 | # print (intermediate_output) 75 | # print (intermediate_output.shape) 76 | # print ("%.30f" % np.sum(intermediate_output)) 77 | 78 | 79 | if __name__ == '__main__': 80 | _main() 81 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/keras/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | import os 8 | import keras 9 | from keras import backend as K 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | from mmdnn.conversion.examples.extractor import base_extractor 12 | from mmdnn.conversion.common.utils import download_file 13 | 14 | 15 | class keras_extractor(base_extractor): 16 | 17 | MMDNN_BASE_URL = 'http://mmdnn.eastasia.cloudapp.azure.com:89/models/' 18 | 19 | architecture_map = { 20 | 'inception_v3' : lambda : keras.applications.inception_v3.InceptionV3(input_shape=(299, 299, 3)), 21 | 'vgg16' : lambda : keras.applications.vgg16.VGG16(), 22 | 'vgg19' : lambda : keras.applications.vgg19.VGG19(), 23 | 'resnet50' : lambda : keras.applications.resnet50.ResNet50(), 24 | 'mobilenet' : lambda : keras.applications.mobilenet.MobileNet(), 25 | 'xception' : lambda : keras.applications.xception.Xception(input_shape=(299, 299, 3)), 26 | 'inception_resnet_v2' : lambda : keras.applications.inception_resnet_v2.InceptionResNetV2(input_shape=(299, 299, 3)), 27 | 'densenet' : lambda : keras.applications.densenet.DenseNet201(), 28 | 'nasnet' : lambda : keras.applications.nasnet.NASNetLarge(), 29 | } 30 | 31 | thirdparty_map = { 32 | 'yolo2' : MMDNN_BASE_URL + 'keras/yolo2.h5', 33 | } 34 | 35 | image_size = { 36 | 'inception_v3' : 299, 37 | 'vgg16' : 224, 38 | 'vgg19' : 224, 39 | 'resnet' : 224, 40 | 'mobilenet' : 224, 41 | 'xception' : 299, 42 | 'inception_resnet' : 299, 43 | 'densenet' : 224, 44 | 'nasnet' : 331, 45 | } 46 | 47 | @classmethod 48 | def help(cls): 49 | print('Supported models: {}'.format(set().union(cls.architecture_map.keys(), cls.thirdparty_map.keys()))) 50 | 51 | 52 | @classmethod 53 | def download(cls, architecture, path="./"): 54 | if architecture in cls.thirdparty_map: 55 | weight_file = download_file(cls.thirdparty_map[architecture], directory=path) 56 | return weight_file 57 | 58 | elif cls.sanity_check(architecture): 59 | output_filename = path + 'imagenet_{}.h5'.format(architecture) 60 | if os.path.exists(output_filename) == False: 61 | model = cls.architecture_map[architecture]() 62 | model.save(output_filename) 63 | print("Keras model {} is saved in [{}]".format(architecture, output_filename)) 64 | K.clear_session() 65 | del model 66 | return output_filename 67 | 68 | else: 69 | print("File [{}] existed, skip download.".format(output_filename)) 70 | return output_filename 71 | 72 | else: 73 | return None 74 | 75 | 76 | @classmethod 77 | def inference(cls, architecture, files, path, image_path): 78 | if architecture in cls.thirdparty_map: 79 | model = keras.models.load_model(files) 80 | 81 | elif cls.sanity_check(architecture): 82 | model = cls.architecture_map[architecture]() 83 | 84 | else: 85 | model = None 86 | 87 | if model: 88 | import numpy as np 89 | func = TestKit.preprocess_func['keras'][architecture] 90 | img = func(image_path) 91 | img = np.expand_dims(img, axis=0) 92 | predict = model.predict(img) 93 | predict = np.squeeze(predict) 94 | K.clear_session() 95 | del model 96 | return predict 97 | 98 | else: 99 | return None 100 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/mxnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/examples/mxnet/__init__.py -------------------------------------------------------------------------------- /mmdnn/conversion/examples/mxnet/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from collections import namedtuple 7 | import numpy as np 8 | from mmdnn.conversion.examples.imagenet_test import TestKit 9 | import mxnet as mx 10 | 11 | Batch = namedtuple('Batch', ['data']) 12 | 13 | 14 | class TestMXNet(TestKit): 15 | 16 | def __init__(self): 17 | super(TestMXNet, self).__init__() 18 | 19 | self.truth['tensorflow']['inception_v3'] = [(22, 9.6691055), (24, 4.3524752), (25, 3.5957956), (132, 3.5657482), (23, 3.3462858)] 20 | self.truth['keras']['inception_v3'] = [(21, 0.93430501), (23, 0.0028834261), (131, 0.0014781745), (24, 0.0014518937), (22, 0.0014435325)] 21 | 22 | self.model = self.MainModel.RefactorModel() 23 | self.model = self.MainModel.deploy_weight(self.model, self.args.w) 24 | 25 | 26 | def preprocess(self, image_path): 27 | self.data = super(TestMXNet, self).preprocess(image_path) 28 | self.data = np.swapaxes(self.data, 0, 2) 29 | self.data = np.swapaxes(self.data, 1, 2) 30 | self.data = np.expand_dims(self.data, 0) 31 | 32 | 33 | def print_result(self): 34 | self.model.forward(Batch([mx.nd.array(self.data)])) 35 | prob = self.model.get_outputs()[0].asnumpy() 36 | super(TestMXNet, self).print_result(prob) 37 | 38 | 39 | def inference(self, image_path): 40 | self.preprocess(image_path) 41 | 42 | # self.print_intermediate_result('pooling0', False) 43 | 44 | self.print_result() 45 | 46 | self.test_truth() 47 | 48 | 49 | def print_intermediate_result(self, layer_name, if_transpose = False): 50 | internals = self.model.symbol.get_internals() 51 | intermediate_output = internals[layer_name + "_output"] 52 | test_model = mx.mod.Module(symbol=intermediate_output, context=mx.cpu(), data_names=['data']) 53 | if self.args.preprocess == 'vgg19' or self.args.preprocess == 'inception_v1': 54 | test_model.bind(for_training=False, data_shapes = [('data', (1, 3, 224, 224))]) 55 | elif 'resnet' in self.args.preprocess or self.args.preprocess == 'inception_v3': 56 | test_model.bind(for_training=False, data_shapes = [('data', (1, 3, 299, 299))]) 57 | else: 58 | assert False 59 | 60 | arg_params, aux_params = self.model.get_params() 61 | 62 | test_model.set_params(arg_params = arg_params, aux_params = aux_params, allow_missing = True, allow_extra = True) 63 | test_model.forward(Batch([mx.nd.array(self.data)])) 64 | intermediate_output = test_model.get_outputs()[0].asnumpy() 65 | 66 | super(TestMXNet, self).print_intermediate_result(intermediate_output, if_transpose) 67 | 68 | 69 | def dump(self, path = None): 70 | if path is None: path = self.args.dump 71 | self.model.save_checkpoint(path, 0) 72 | print ('MXNet checkpoint file is saved with prefix [{}] and iteration 0, generated by [{}.py] and [{}].'.format( 73 | path, self.args.n, self.args.w)) 74 | 75 | 76 | if __name__ == '__main__': 77 | tester = TestMXNet() 78 | if tester.args.dump: 79 | tester.dump() 80 | else: 81 | tester.inference(tester.args.image) 82 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/onnx/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/onnx/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | import tensorflow as tf 11 | from onnx_tf.backend import prepare 12 | from mmdnn.conversion.examples.imagenet_test import TestKit 13 | 14 | class TestONNX(TestKit): 15 | 16 | def __init__(self): 17 | super(TestONNX, self).__init__() 18 | self.model = prepare(self.MainModel.KitModel(self.args.w)) 19 | # self.input, self.model, self.testop = self.MainModel.KitModel(self.args.w) 20 | 21 | 22 | def preprocess(self, image_path): 23 | x = super(TestONNX, self).preprocess(image_path) 24 | self.data = np.expand_dims(x, 0) 25 | 26 | 27 | def print_result(self): 28 | predict = self.model.run(self.data)[0] 29 | super(TestONNX, self).print_result(predict) 30 | 31 | 32 | def print_intermediate_result(self, layer_name, if_transpose = False): 33 | # testop = tf.get_default_graph().get_operation_by_name(layer_name) 34 | testop = self.testop 35 | with tf.Session() as sess: 36 | init = tf.global_variables_initializer() 37 | sess.run(init) 38 | intermediate_output = sess.run(testop, feed_dict = {self.input : self.data}) 39 | 40 | super(TestONNX, self).print_intermediate_result(intermediate_output, if_transpose) 41 | 42 | 43 | def inference(self, image_path): 44 | self.preprocess(image_path) 45 | 46 | # self.print_intermediate_result('conv1_7x7_s2_1', True) 47 | 48 | self.print_result() 49 | 50 | self.test_truth() 51 | 52 | if __name__=='__main__': 53 | tester = TestONNX() 54 | if tester.args.dump: 55 | tester.dump() 56 | else: 57 | tester.inference(tester.args.image) 58 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | import paddle.v2 as paddle 12 | import gzip 13 | from paddle.trainer_config_helpers.config_parser_utils import \ 14 | reset_parser 15 | 16 | 17 | class TestPaddle(TestKit): 18 | 19 | def __init__(self): 20 | from six import text_type as _text_type 21 | parser = argparse.ArgumentParser() 22 | 23 | parser.add_argument('-p', '--preprocess', type=_text_type, help='Model Preprocess Type') 24 | 25 | parser.add_argument('--model', '-n', '-w', type=_text_type, 26 | required=True, help='Paddle Model path.') 27 | 28 | parser.add_argument('-s', type=_text_type, help='Source Framework Type', 29 | choices=self.truth.keys()) 30 | 31 | parser.add_argument('--image', '-i', 32 | type=_text_type, help='Test image path.', 33 | default="mmdnn/conversion/examples/data/seagull.jpg") 34 | 35 | parser.add_argument('-input', type=_text_type, 36 | required=True, help='Paddle Input Node') 37 | 38 | parser.add_argument('-output', type=_text_type, 39 | required=True, help='Paddle Output Node') 40 | 41 | parser.add_argument('-size', type=int, 42 | default=224, help='Paddle Input Image Size') 43 | 44 | 45 | 46 | 47 | self.args = parser.parse_args() 48 | 49 | print("Loading model [{}].".format(self.args.model)) 50 | 51 | # import self.model 52 | # self.model 53 | 54 | # how the model can not load from `***.bin` 55 | 56 | print("Model loading success.") 57 | 58 | def preprocess(self, image_path): 59 | from PIL import Image as pil_image 60 | img = pil_image.open(image_path) 61 | img = img.resize((self.args.size, self.args.size)) 62 | self.data = img 63 | 64 | def print_result(self): 65 | reset_parser() 66 | img = np.transpose(self.data, (2, 0, 1)) 67 | test_data = [(img.flatten(),)] 68 | 69 | parameters_file = self.args.w 70 | with gzip.open(parameters_file, 'r') as f: 71 | parameters = paddle.parameters.Parameters.from_tar(f) 72 | 73 | 74 | predict = paddle.infer(output_layer = self.model, parameters=parameters, input=test_data) 75 | predict = np.squeeze(predict) 76 | 77 | super(TestPaddle, self).print_result(predict) 78 | 79 | 80 | def print_intermediate_result(self, layer_name, if_transpose = False): 81 | super(TestPaddle, self).print_intermediate_result(self.model.name, if_transpose) 82 | 83 | 84 | def inference(self, image_path): 85 | self.preprocess(image_path) 86 | self.print_result() 87 | 88 | 89 | if __name__=='__main__': 90 | tester = TestPaddle() 91 | tester.inference(tester.args.image) 92 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/models/alexnet.py: -------------------------------------------------------------------------------- 1 | import paddle.v2 as paddle 2 | 3 | __all__ = ['alexnet'] 4 | 5 | 6 | def alexnet(input, class_dim): 7 | conv1 = paddle.layer.img_conv( 8 | input=input, 9 | filter_size=11, 10 | num_channels=3, 11 | num_filters=96, 12 | stride=4, 13 | padding=1) 14 | cmrnorm1 = paddle.layer.img_cmrnorm( 15 | input=conv1, size=5, scale=0.0001, power=0.75) 16 | pool1 = paddle.layer.img_pool(input=cmrnorm1, pool_size=3, stride=2) 17 | 18 | conv2 = paddle.layer.img_conv( 19 | input=pool1, 20 | filter_size=5, 21 | num_filters=256, 22 | stride=1, 23 | padding=2, 24 | groups=1) 25 | cmrnorm2 = paddle.layer.img_cmrnorm( 26 | input=conv2, size=5, scale=0.0001, power=0.75) 27 | pool2 = paddle.layer.img_pool(input=cmrnorm2, pool_size=3, stride=2) 28 | 29 | pool3 = paddle.networks.img_conv_group( 30 | input=pool2, 31 | pool_size=3, 32 | pool_stride=2, 33 | conv_num_filter=[384, 384, 256], 34 | conv_filter_size=3, 35 | pool_type=paddle.pooling.Max()) 36 | 37 | fc1 = paddle.layer.fc(input=pool3, 38 | size=4096, 39 | act=paddle.activation.Relu(), 40 | layer_attr=paddle.attr.Extra(drop_rate=0.5)) 41 | fc2 = paddle.layer.fc(input=fc1, 42 | size=4096, 43 | act=paddle.activation.Relu(), 44 | layer_attr=paddle.attr.Extra(drop_rate=0.5)) 45 | 46 | out = paddle.layer.fc(input=fc2, 47 | size=class_dim, 48 | act=paddle.activation.Softmax()) 49 | return out 50 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/models/resnet.py: -------------------------------------------------------------------------------- 1 | import paddle.v2 as paddle 2 | 3 | __all__ = ['resnet_imagenet', 'resnet_cifar10'] 4 | 5 | 6 | def conv_bn_layer(input, 7 | ch_out, 8 | filter_size, 9 | stride, 10 | padding, 11 | active_type=paddle.activation.Relu(), 12 | ch_in=None): 13 | tmp = paddle.layer.img_conv( 14 | input=input, 15 | filter_size=filter_size, 16 | num_channels=ch_in, 17 | num_filters=ch_out, 18 | stride=stride, 19 | padding=padding, 20 | act=paddle.activation.Linear(), 21 | bias_attr=False) 22 | return paddle.layer.batch_norm(input=tmp, act=active_type) 23 | 24 | 25 | def shortcut(input, ch_out, stride): 26 | if input.num_filters != ch_out: 27 | return conv_bn_layer(input, ch_out, 1, stride, 0, 28 | paddle.activation.Linear()) 29 | else: 30 | return input 31 | 32 | 33 | def basicblock(input, ch_out, stride): 34 | short = shortcut(input, ch_out, stride) 35 | conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) 36 | conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear()) 37 | return paddle.layer.addto( 38 | input=[short, conv2], act=paddle.activation.Relu()) 39 | 40 | 41 | def bottleneck(input, ch_out, stride): 42 | short = shortcut(input, ch_out * 4, stride) 43 | conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) 44 | conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) 45 | conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, 46 | paddle.activation.Linear()) 47 | return paddle.layer.addto( 48 | input=[short, conv3], act=paddle.activation.Relu()) 49 | 50 | 51 | def layer_warp(block_func, input, ch_out, count, stride): 52 | conv = block_func(input, ch_out, stride) 53 | for i in range(1, count): 54 | conv = block_func(conv, ch_out, 1) 55 | return conv 56 | 57 | 58 | def resnet_imagenet(input, class_dim, depth=50): 59 | cfg = { 60 | 18: ([2, 2, 2, 1], basicblock), 61 | 34: ([3, 4, 6, 3], basicblock), 62 | 50: ([3, 4, 6, 3], bottleneck), 63 | 101: ([3, 4, 23, 3], bottleneck), 64 | 152: ([3, 8, 36, 3], bottleneck) 65 | } 66 | stages, block_func = cfg[depth] 67 | conv1 = conv_bn_layer( 68 | input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) 69 | pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2) 70 | res1 = layer_warp(block_func, pool1, 64, stages[0], 1) 71 | res2 = layer_warp(block_func, res1, 128, stages[1], 2) 72 | res3 = layer_warp(block_func, res2, 256, stages[2], 2) 73 | res4 = layer_warp(block_func, res3, 512, stages[3], 2) 74 | pool2 = paddle.layer.img_pool( 75 | input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) 76 | out = paddle.layer.fc(input=pool2, 77 | size=class_dim, 78 | act=paddle.activation.Softmax()) 79 | return out 80 | 81 | 82 | def resnet_cifar10(input, class_dim, depth=32): 83 | # depth should be one of 20, 32, 44, 56, 110, 1202 84 | assert (depth - 2) % 6 == 0 85 | n = (depth - 2) / 6 86 | nStages = {16, 64, 128} 87 | conv1 = conv_bn_layer( 88 | input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) 89 | res1 = layer_warp(basicblock, conv1, 16, n, 1) 90 | res2 = layer_warp(basicblock, res1, 32, n, 2) 91 | res3 = layer_warp(basicblock, res2, 64, n, 2) 92 | pool = paddle.layer.img_pool( 93 | input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) 94 | out = paddle.layer.fc(input=pool, 95 | size=class_dim, 96 | act=paddle.activation.Softmax()) 97 | return out 98 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/paddle/models/vgg.py: -------------------------------------------------------------------------------- 1 | import paddle.v2 as paddle 2 | 3 | __all__ = ['vgg13', 'vgg16', 'vgg19'] 4 | 5 | 6 | def vgg(input, nums, class_dim): 7 | def conv_block(input, num_filter, groups, num_channels=None): 8 | return paddle.networks.img_conv_group( 9 | input=input, 10 | num_channels=num_channels, 11 | pool_size=2, 12 | pool_stride=2, 13 | conv_num_filter=[num_filter] * groups, 14 | conv_filter_size=3, 15 | conv_act=paddle.activation.Relu(), 16 | pool_type=paddle.pooling.Max()) 17 | 18 | assert len(nums) == 5 19 | # the channel of input feature is 3 20 | conv1 = conv_block(input, 64, nums[0], 3) 21 | conv2 = conv_block(conv1, 128, nums[1]) 22 | conv3 = conv_block(conv2, 256, nums[2]) 23 | conv4 = conv_block(conv3, 512, nums[3]) 24 | conv5 = conv_block(conv4, 512, nums[4]) 25 | 26 | fc_dim = 4096 27 | fc1 = paddle.layer.fc(input=conv5, 28 | size=fc_dim, 29 | act=paddle.activation.Relu(), 30 | layer_attr=paddle.attr.Extra(drop_rate=0.5)) 31 | fc2 = paddle.layer.fc(input=fc1, 32 | size=fc_dim, 33 | act=paddle.activation.Relu(), 34 | layer_attr=paddle.attr.Extra(drop_rate=0.5)) 35 | out = paddle.layer.fc(input=fc2, 36 | size=class_dim, 37 | act=paddle.activation.Softmax()) 38 | return out 39 | 40 | 41 | def vgg13(input, class_dim): 42 | nums = [2, 2, 2, 2, 2] 43 | return vgg(input, nums, class_dim) 44 | 45 | 46 | def vgg16(input, class_dim): 47 | nums = [2, 2, 3, 3, 3] 48 | return vgg(input, nums, class_dim) 49 | 50 | 51 | def vgg19(input, class_dim): 52 | nums = [2, 2, 4, 4, 4] 53 | return vgg(input, nums, class_dim) 54 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/pytorch/extract_model.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import os 8 | from six import text_type as _text_type 9 | from mmdnn.conversion.examples.imagenet_test import TestKit 10 | import torch 11 | import torchvision.models as models 12 | 13 | 14 | NETWORKS_MAP = { 15 | 'inception_v3' : lambda : models.inception_v3(pretrained=True), 16 | 'vgg16' : lambda : models.vgg16(pretrained=True), 17 | 'vgg19' : lambda : models.vgg19(pretrained=True), 18 | 'resnet152' : lambda : models.resnet152(pretrained=True), 19 | 'densenet' : lambda : models.densenet201(pretrained=True), 20 | 'squeezenet' : lambda : models.squeezenet1_1(pretrained=True) 21 | } 22 | 23 | 24 | def _main(): 25 | parser = argparse.ArgumentParser() 26 | 27 | parser.add_argument('-n', '--network', 28 | type=_text_type, help='Model Type', required=True, 29 | choices=NETWORKS_MAP.keys()) 30 | 31 | parser.add_argument('-i', '--image', type=_text_type, help='Test Image Path') 32 | 33 | args = parser.parse_args() 34 | 35 | file_name = "imagenet_{}.pth".format(args.network) 36 | if not os.path.exists(file_name): 37 | model = NETWORKS_MAP.get(args.network) 38 | model = model() 39 | torch.save(model, file_name) 40 | print("PyTorch pretrained model is saved as [{}].".format(file_name)) 41 | else: 42 | print("File [{}] existed!".format(file_name)) 43 | model = torch.load(file_name) 44 | 45 | if args.image: 46 | import numpy as np 47 | func = TestKit.preprocess_func['pytorch'][args.network] 48 | img = func(args.image) 49 | img = np.transpose(img, (2, 0, 1)) 50 | img = np.expand_dims(img, 0).copy() 51 | data = torch.from_numpy(img) 52 | data = torch.autograd.Variable(data, requires_grad=False) 53 | 54 | model.eval() 55 | predict = model(data).data.numpy() 56 | predict = np.squeeze(predict) 57 | top_indices = predict.argsort()[-5:][::-1] 58 | result = [(i, predict[i]) for i in top_indices] 59 | print(result) 60 | 61 | # layer_name = 'block2_pool' 62 | # intermediate_layer_model = keras.Model(inputs=model.input, 63 | # outputs=model.get_layer(layer_name).output) 64 | # intermediate_output = intermediate_layer_model.predict(img) 65 | # print (intermediate_output) 66 | # print (intermediate_output.shape) 67 | # print ("%.30f" % np.sum(intermediate_output)) 68 | 69 | 70 | if __name__ == '__main__': 71 | _main() 72 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/pytorch/extractor.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from __future__ import absolute_import 7 | 8 | import os 9 | from mmdnn.conversion.examples.imagenet_test import TestKit 10 | from mmdnn.conversion.examples.extractor import base_extractor 11 | from mmdnn.conversion.common.utils import download_file 12 | import torch 13 | import torchvision.models as models 14 | 15 | class pytorch_extractor(base_extractor): 16 | 17 | architecture_map = sorted(name for name in models.__dict__ 18 | if name.islower() and not name.startswith("__") 19 | and callable(models.__dict__[name])) 20 | 21 | 22 | @classmethod 23 | def help(cls): 24 | print('Supported models: {}'.format(cls.architecture_map)) 25 | 26 | 27 | @classmethod 28 | def download(cls, architecture, path="./"): 29 | if cls.sanity_check(architecture): 30 | architecture_file = path + "imagenet_{}.pth".format(architecture) 31 | if not os.path.exists(architecture_file): 32 | kwargs = {} 33 | if architecture == 'inception_v3': 34 | kwargs['transform_input'] = False 35 | model = models.__dict__[architecture](pretrained=True, **kwargs) 36 | torch.save(model, architecture_file) 37 | print("PyTorch pretrained model is saved as [{}].".format(architecture_file)) 38 | else: 39 | print("File [{}] existed!".format(architecture_file)) 40 | 41 | return architecture_file 42 | 43 | else: 44 | return None 45 | 46 | 47 | @classmethod 48 | def inference(cls, architecture, path, image_path): 49 | model = torch.load(path) 50 | 51 | model.eval() 52 | 53 | import numpy as np 54 | func = TestKit.preprocess_func['pytorch'][architecture] 55 | img = func(image_path) 56 | img = np.transpose(img, (2, 0, 1)) 57 | 58 | img = np.expand_dims(img, 0).copy() 59 | 60 | data = torch.from_numpy(img) 61 | data = torch.autograd.Variable(data, requires_grad=False) 62 | 63 | predict = model(data).data.numpy() 64 | predict = np.squeeze(predict) 65 | 66 | return predict 67 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/pytorch/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | from mmdnn.conversion.examples.imagenet_test import TestKit 11 | import torch 12 | 13 | 14 | class TestTorch(TestKit): 15 | 16 | def __init__(self): 17 | super(TestTorch, self).__init__() 18 | 19 | self.truth['tensorflow']['inception_v3'] = [(22, 9.6691055), (24, 4.3524747), (25, 3.5957973), (132, 3.5657473), (23, 3.346283)] 20 | self.truth['keras']['inception_v3'] = [(21, 0.93430489), (23, 0.002883445), (131, 0.0014781791), (24, 0.0014518998), (22, 0.0014435351)] 21 | 22 | self.model = self.MainModel.KitModel(self.args.w) 23 | self.model.eval() 24 | 25 | def preprocess(self, image_path): 26 | x = super(TestTorch, self).preprocess(image_path) 27 | x = np.transpose(x, (2, 0, 1)) 28 | x = np.expand_dims(x, 0).copy() 29 | self.data = torch.from_numpy(x) 30 | self.data = torch.autograd.Variable(self.data, requires_grad = False) 31 | 32 | 33 | def print_result(self): 34 | predict = self.model(self.data) 35 | predict = predict.data.numpy() 36 | super(TestTorch, self).print_result(predict) 37 | 38 | 39 | def print_intermediate_result(self, layer_name, if_transpose=False): 40 | intermediate_output = self.model.test.data.numpy() 41 | super(TestTorch, self).print_intermediate_result(intermediate_output, if_transpose) 42 | 43 | 44 | def inference(self, image_path): 45 | self.preprocess(image_path) 46 | 47 | self.print_result() 48 | 49 | # self.print_intermediate_result(None, False) 50 | 51 | self.test_truth() 52 | 53 | 54 | def dump(self, path=None): 55 | if path is None: path = self.args.dump 56 | torch.save(self.model, path) 57 | print('PyTorch model file is saved as [{}], generated by [{}.py] and [{}].'.format( 58 | path, self.args.n, self.args.w)) 59 | 60 | 61 | if __name__=='__main__': 62 | tester = TestTorch() 63 | if tester.args.dump: 64 | tester.dump() 65 | else: 66 | tester.inference(tester.args.image) 67 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/imagenet_test.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import argparse 7 | import numpy as np 8 | import sys 9 | import os 10 | import tensorflow as tf 11 | from mmdnn.conversion.examples.imagenet_test import TestKit 12 | 13 | class TestTF(TestKit): 14 | 15 | def __init__(self): 16 | super(TestTF, self).__init__() 17 | 18 | self.truth['mxnet']['resnet152-11k'] = [(1278, 0.49070787), (1277, 0.21392652), (282, 0.12979421), (1282, 0.066355646), (1224, 0.022040566)] 19 | 20 | self.input, self.model = self.MainModel.KitModel(self.args.w) 21 | # self.input, self.model, self.testop = self.MainModel.KitModel(self.args.w) 22 | 23 | 24 | def preprocess(self, image_path): 25 | x = super(TestTF, self).preprocess(image_path) 26 | self.data = np.expand_dims(x, 0) 27 | 28 | 29 | def print_result(self): 30 | with tf.Session() as sess: 31 | init = tf.global_variables_initializer() 32 | sess.run(init) 33 | predict = sess.run(self.model, feed_dict = {self.input : self.data}) 34 | 35 | super(TestTF, self).print_result(predict) 36 | 37 | 38 | def print_intermediate_result(self, layer_name, if_transpose = False): 39 | # testop = tf.get_default_graph().get_operation_by_name(layer_name) 40 | testop = self.testop 41 | with tf.Session() as sess: 42 | init = tf.global_variables_initializer() 43 | sess.run(init) 44 | intermediate_output = sess.run(testop, feed_dict = {self.input : self.data}) 45 | 46 | super(TestTF, self).print_intermediate_result(intermediate_output, if_transpose) 47 | 48 | 49 | def inference(self, image_path): 50 | self.preprocess(image_path) 51 | 52 | # self.print_intermediate_result('conv1_7x7_s2_1', True) 53 | 54 | self.print_result() 55 | 56 | self.test_truth() 57 | 58 | 59 | def dump(self, path = None): 60 | dump_tag = self.args.dump_tag 61 | if dump_tag == 'SERVING': 62 | tag_list = [tf.saved_model.tag_constants.SERVING] 63 | else: 64 | tag_list = [tf.saved_model.tag_constants.TRAINING] 65 | 66 | if path is None: path = self.args.dump 67 | with tf.Session() as sess: 68 | sess.run(tf.global_variables_initializer()) 69 | 70 | builder = tf.saved_model.builder.SavedModelBuilder(path) 71 | 72 | tensor_info_input = tf.saved_model.utils.build_tensor_info(self.input) 73 | tensor_info_output = tf.saved_model.utils.build_tensor_info(self.model) 74 | 75 | prediction_signature = ( 76 | tf.saved_model.signature_def_utils.build_signature_def( 77 | inputs={'input': tensor_info_input}, 78 | outputs={'output': tensor_info_output}, 79 | method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME 80 | ) 81 | ) 82 | 83 | builder.add_meta_graph_and_variables( 84 | sess, 85 | tag_list, 86 | signature_def_map={ 87 | tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature 88 | } 89 | ) 90 | 91 | save_path = builder.save() 92 | 93 | print ('Tensorflow file is saved as [{}], generated by [{}.py] and [{}].'.format( 94 | save_path, self.args.n, self.args.w)) 95 | 96 | 97 | if __name__=='__main__': 98 | tester = TestTF() 99 | if tester.args.dump: 100 | if tester.args.dump_tag: 101 | tester.dump() 102 | else: 103 | raise ValueError("Need to provide the model type of Tensorflow model.") 104 | else: 105 | tester.inference(tester.args.image) 106 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/models/mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/models/test_rnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import tensorflow as tf 5 | import tensorflow.contrib.slim as slim 6 | 7 | def create_symbol(X, num_classes=0, is_training=False, CUDNN=False, 8 | maxf=30000, edim=125, nhid=100, batchs=64): 9 | word_vectors = tf.contrib.layers.embed_sequence(X, vocab_size=maxf, embed_dim=edim) 10 | 11 | 12 | word_list = tf.unstack(word_vectors, axis=1) 13 | 14 | if not CUDNN: 15 | cell1 = tf.contrib.rnn.LSTMCell(nhid) 16 | cell2 = tf.contrib.rnn.GRUCell(nhid) 17 | stacked_cell = tf.nn.rnn_cell.MultiRNNCell([cell1, cell2]) 18 | outputs, states = tf.nn.static_rnn(stacked_cell, word_list, dtype=tf.float32) 19 | logits = tf.layers.dense(outputs[-1], 2, activation=None, name='output') 20 | else: 21 | # Using cuDNN since vanilla RNN 22 | from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops 23 | cudnn_cell = cudnn_rnn_ops.CudnnGRU(num_layers=1, 24 | num_units=nhid, 25 | input_size=edim, 26 | input_mode='linear_input') 27 | params_size_t = cudnn_cell.params_size() 28 | params = tf.Variable(tf.random_uniform([params_size_t], -0.1, 0.1), validate_shape=False) 29 | input_h = tf.Variable(tf.zeros([1, batchs, nhid])) 30 | outputs, states = cudnn_cell(input_data=word_list, 31 | input_h=input_h, 32 | params=params) 33 | logits = tf.layers.dense(outputs[-1], 2, activation=None, name='output') 34 | 35 | return logits, logits 36 | 37 | def dummy_arg_scope(): 38 | with slim.arg_scope([]) as sc: 39 | return sc -------------------------------------------------------------------------------- /mmdnn/conversion/examples/tensorflow/vis_meta.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.python.platform import gfile 3 | import os 4 | import os.path 5 | import shutil 6 | import sys 7 | import argparse 8 | 9 | 10 | def _get_parser(): 11 | parser = argparse.ArgumentParser() 12 | 13 | parser.add_argument( 14 | '--ckpt', 15 | required=True, 16 | help='Path to the checkpoint meta file (.ckpt.meta).' 17 | ) 18 | parser.add_argument( 19 | '--logdir', 20 | required=True, 21 | help='Path to the log directory for writing the graph summary for visualization.' 22 | ) 23 | 24 | return parser 25 | 26 | 27 | def visualize(ckpt, logdir): 28 | with tf.Session() as sess: 29 | tf.train.import_meta_graph(ckpt) 30 | train_writer = tf.summary.FileWriter(logdir) 31 | train_writer.add_graph(sess.graph) 32 | train_writer.close() 33 | 34 | 35 | def _main(): 36 | """ 37 | Visualize the frozen TF graph using tensorboard. 38 | 39 | Arguments 40 | ---------- 41 | --ckpt: path to the checkpoint meta file (.ckpt.meta) 42 | --logdir: path to the log directory for writing graph summary for visualization 43 | 44 | Usage 45 | ---------- 46 | python vis_meta.py --ckpt=model.ckpt.meta --logdir=/tmp/pb 47 | 48 | 49 | To kill a previous tensorboard process, use the following commands in the terminal 50 | ps aux | grep tensorboard 51 | kill PID 52 | """ 53 | 54 | parser = _get_parser() 55 | args, unknown_args = parser.parse_known_args() 56 | 57 | if not os.path.isfile(args.ckpt): 58 | print('The checkpoint meta file does not exist.') 59 | exit(1) 60 | 61 | if not os.path.isdir(args.logdir): 62 | print('The log directory does not exist.') 63 | exit(1) 64 | 65 | # Load file 66 | visualize(args.ckpt, args.logdir) 67 | 68 | # Run TensorBoard 69 | cmd = 'tensorboard --logdir={} {}'.format( 70 | args.logdir, 71 | ' '.join(unknown_args) 72 | ) 73 | #print(cmd) 74 | os.system(cmd) 75 | 76 | 77 | if __name__ == '__main__': 78 | _main() 79 | -------------------------------------------------------------------------------- /mmdnn/conversion/keras/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/keras/keras2_graph.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | import os 6 | import keras as _keras 7 | from mmdnn.conversion.common.DataStructure.graph import GraphNode, Graph 8 | 9 | 10 | class Keras2GraphNode(GraphNode): 11 | 12 | def __init__(self, layer): 13 | super(Keras2GraphNode, self).__init__(layer) 14 | 15 | 16 | @property 17 | def name(self): 18 | return self.layer.name 19 | 20 | 21 | @property 22 | def type(self): 23 | return self.layer.__class__.__name__ 24 | 25 | 26 | @property 27 | def keras_layer(self): 28 | return self.layer 29 | 30 | 31 | 32 | class Keras2Graph(Graph): 33 | 34 | def __init__(self, model): 35 | # sanity check. 36 | if not (type(model) == _keras.models.Sequential or type(model) == _keras.models.Model): 37 | raise TypeError("Keras layer of type %s is not supported." % type(model)) 38 | super(Keras2Graph, self).__init__(model) 39 | self.model = model 40 | 41 | 42 | def build(self): 43 | self.input_layers = list() 44 | for i, layer in enumerate(self.model.layers): 45 | self.layer_map[layer.name] = Keras2GraphNode(layer) 46 | self.layer_name_map[layer.name] = layer.name 47 | for node in layer._inbound_nodes: 48 | for pred in node.inbound_layers: 49 | if pred.name not in self.layer_map: 50 | self.layer_map[pred.name] = Keras2GraphNode(pred) 51 | self.layer_name_map[pred.name] = pred.name 52 | self._make_connection(pred.name, layer.name) 53 | 54 | # Kit: TODO 55 | # Duplicate models for weight sharing 56 | # Expand the sub-models 57 | super(Keras2Graph, self).build() -------------------------------------------------------------------------------- /mmdnn/conversion/keras/saver.py: -------------------------------------------------------------------------------- 1 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 2 | model = MainModel.KitModel(weight_filepath) 3 | model.save(dump_filepath) 4 | print('Keras model file is saved as [{}], generated by [{}.py] and [{}].'.format( 5 | dump_filepath, network_filepath, weight_filepath)) 6 | -------------------------------------------------------------------------------- /mmdnn/conversion/mxnet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/mxnet/mxnet_graph.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | import os 7 | import mxnet as mx 8 | from mmdnn.conversion.common.DataStructure.graph import GraphNode, Graph 9 | 10 | class MXNetGraphNode(GraphNode): 11 | 12 | 13 | 14 | def __init__(self, layer): 15 | super(MXNetGraphNode, self).__init__(layer) 16 | 17 | if "attr" in layer: 18 | self.attr = layer["attr"] 19 | elif "param" in layer: 20 | self.attr = layer["param"] 21 | elif "attrs" in layer: 22 | self.attr = layer["attrs"] 23 | else: 24 | self.attr = None 25 | 26 | 27 | @property 28 | def name(self): 29 | return self.layer["name"] 30 | 31 | 32 | @property 33 | def type(self): 34 | return self.layer["op"] 35 | 36 | 37 | @property 38 | def mx_layer(self): 39 | return self.layer 40 | 41 | 42 | def get_attr(self, name, default_value=None): 43 | if self.attr: 44 | if name in self.attr.keys(): 45 | return self.attr.get(name) 46 | else: 47 | return default_value 48 | else: 49 | return default_value 50 | 51 | class MXNetGraph(Graph): 52 | 53 | def __init__(self, model): 54 | # sanity check non-sense always input module.Module 55 | # if not (type(model) == mx.module.Module 56 | # or type(model) == mx.module.SequentialModule 57 | # or type(model) == mx.model) 58 | # raise TypeError("MXNet layer of type %s is not supported." % type(model)) 59 | 60 | super(MXNetGraph, self).__init__(model) 61 | 62 | 63 | def build(self, json_data): 64 | 65 | self.input_layers = list() 66 | input_dict = dict() # dict{layer_num, layer_name} 67 | layer_num = -1 68 | 69 | import re 70 | 71 | for layer in json_data: 72 | 73 | layer_num += 1 74 | # if layer["op"] == "null": 75 | # continue 76 | 77 | if re.search("_(weight|bias|var|mean|gamma|beta|label)", layer["name"]) and layer["op"] == "null": 78 | continue 79 | 80 | input_dict.update({layer_num: layer["name"]}) 81 | self.layer_map[layer["name"]] = MXNetGraphNode(layer) 82 | self.layer_name_map[layer["name"]] = layer["name"] 83 | for input_layer in layer["inputs"]: 84 | assert isinstance(input_layer, list) 85 | if input_layer[0] in input_dict: 86 | pred = input_dict.get(input_layer[0]) 87 | 88 | if pred not in self.layer_map: 89 | new_node = dict({'op': 'NoOp', 'name': pred, 'inputs': list()}) 90 | self.layer_map[pred] = MXNetGraphNode(new_node) 91 | self.layer_name_map[pred] = pred 92 | 93 | self._make_connection(pred, layer["name"]) 94 | 95 | super(MXNetGraph, self).build() 96 | 97 | # raise NotImplementedError("Cannot support multi-input") -------------------------------------------------------------------------------- /mmdnn/conversion/mxnet/saver.py: -------------------------------------------------------------------------------- 1 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 2 | model = MainModel.RefactorModel() 3 | model = MainModel.deploy_weight(model, weight_filepath) 4 | model.save_checkpoint(dump_filepath, 0) 5 | print('MXNet checkpoint file is saved with prefix [{}] and iteration 0, generated by [{}.py] and [{}].'.format( 6 | dump_filepath, network_filepath, weight_filepath)) 7 | -------------------------------------------------------------------------------- /mmdnn/conversion/onnx/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/onnx/elephant.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/conversion/onnx/elephant.jpg -------------------------------------------------------------------------------- /mmdnn/conversion/onnx/onnx_graph.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ---------------------------------------------------------------------------------------------- 5 | 6 | from mmdnn.conversion.common.DataStructure.graph import GraphNode, Graph 7 | from onnx import onnx_pb2 8 | 9 | 10 | class ONNXGraphNode(GraphNode): 11 | def __init__(self, layer): 12 | super(ONNXGraphNode, self).__init__(layer) 13 | self.weights = list() 14 | self.inputs = list() 15 | self.outputs = list() 16 | 17 | @property 18 | def name(self): 19 | return self.layer.name 20 | 21 | @property 22 | def type(self): 23 | return self.layer.op_type 24 | 25 | @property 26 | def onnx_layer(self): 27 | return self.layer 28 | 29 | 30 | # node 31 | # input 32 | # edge(node a <-> node b) 33 | # 34 | 35 | class ONNXGraph(Graph): 36 | @staticmethod 37 | def _generate_name(layer): 38 | return "" 39 | 40 | def __init__(self, model): 41 | super(ONNXGraph, self).__init__(model) 42 | self._graph = model.graph 43 | # key is edge name, value is src/dst node name 44 | self._edge_src = dict() 45 | self._edge_dst = dict() 46 | # key is initializer name, value is TensorProto 47 | self._weights = dict() 48 | self._inputs = dict() 49 | self._outputs = dict() 50 | 51 | def build(self): 52 | for w in self._graph.initializer: 53 | self._weights[w.name] = w 54 | for s in self._graph.input: 55 | self._inputs[s.name] = s 56 | for s in self._graph.output: 57 | self._outputs[s.name] = s 58 | 59 | for i, layer in enumerate(self._graph.node): 60 | if not layer.name: 61 | layer.name = '{0}_{1}'.format(layer.op_type, i) 62 | name = layer.name 63 | # print(name) 64 | # print(layer.op_type) 65 | node = ONNXGraphNode(layer) 66 | self.layer_map[name] = node 67 | self.layer_name_map[name] = name 68 | for n in layer.input: 69 | if n in self._weights: 70 | # n is input data 71 | node.weights.append(n) 72 | if n in self._inputs: 73 | node.inputs.append(n) 74 | else: 75 | # n is input edge 76 | self._edge_dst[n] = name 77 | if n in self._edge_src: 78 | self._make_connection(self._edge_src[n], name) 79 | for n in layer.output: 80 | if n in self._outputs: 81 | node.outputs.append(n) 82 | else: 83 | self._edge_src[n] = name 84 | if n in self._edge_dst: 85 | self._make_connection(name, self._edge_dst[n]) 86 | 87 | super(ONNXGraph, self).build() 88 | -------------------------------------------------------------------------------- /mmdnn/conversion/onnx/onnx_parser.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ---------------------------------------------------------------------------------------------- 5 | 6 | from mmdnn.conversion.common.DataStructure.parser import Parser 7 | from mmdnn.conversion.onnx.onnx_graph import ONNXGraph 8 | 9 | 10 | class ONNXParser(Parser): 11 | skip_type = set() 12 | 13 | @property 14 | def src_graph(self): 15 | return self.onnx_graph 16 | 17 | @staticmethod 18 | def _load_model(model_file): 19 | """Load a ONNX model file from disk 20 | 21 | Parameters 22 | ---------- 23 | model_file: str 24 | Path where the model file path is (protobuf file) 25 | 26 | Returns 27 | ------- 28 | model: A ONNX protobuf model 29 | """ 30 | from onnx import onnx_pb2 31 | from mmdnn.conversion.common.IR.IR_graph import load_protobuf_from_file 32 | 33 | model = onnx_pb2.ModelProto() 34 | load_protobuf_from_file(model, model_file) 35 | 36 | print("ONNX model file [%s] loaded successfully." % model_file) 37 | return model 38 | 39 | def __init__(self, model_file): 40 | super(ONNXParser, self).__init__() 41 | 42 | model = ONNXParser._load_model(model_file) 43 | self.onnx_graph = ONNXGraph(model) 44 | self.onnx_graph.build() 45 | self.weight_loaded = True 46 | 47 | def rename_UNKNOWN(self, source_node): 48 | if source_node.type in self.skip_type: 49 | return 50 | print("ONNX has not supported operator [%s] with name [%s]." 51 | % (source_node.type, source_node.name)) 52 | return 53 | 54 | def gen_IR(self): 55 | # if node len(in_edges), generate additional DataInput node 56 | 57 | # print 58 | for layer in self.src_graph.topological_sort: 59 | current_node = self.src_graph.get_node(layer) 60 | node_type = current_node.type 61 | if hasattr(self, "rename_" + node_type): 62 | func = getattr(self, "rename_" + node_type) 63 | func(current_node) 64 | else: 65 | self.rename_UNKNOWN(current_node) 66 | -------------------------------------------------------------------------------- /mmdnn/conversion/onnx/saver.py: -------------------------------------------------------------------------------- 1 | import onnx 2 | 3 | 4 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 5 | model = MainModel.KitModel(weight_filepath) 6 | onnx.save(model, dump_filepath) 7 | print('ONNX model file is saved as [{}], generated by [{}.py] and [{}].'.format( 8 | dump_filepath, network_filepath, weight_filepath)) 9 | -------------------------------------------------------------------------------- /mmdnn/conversion/paddle/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/paddle/paddle_graph.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | import os 6 | import paddle.v2 as paddle 7 | import paddle.trainer_config_helpers.layers as layers 8 | from mmdnn.conversion.common.DataStructure.graph import GraphNode, Graph 9 | 10 | 11 | class PaddleGraphNode(GraphNode): 12 | 13 | def __init__(self, layer): 14 | super(PaddleGraphNode, self).__init__(layer) 15 | 16 | 17 | @property 18 | def name(self): 19 | return self.layer.name 20 | 21 | 22 | @property 23 | def type(self): 24 | return self.layer.type 25 | 26 | 27 | @property 28 | def paddle_layer(self): 29 | return self.layer 30 | 31 | 32 | 33 | class PaddleGraph(Graph): 34 | 35 | def __init__(self, model): 36 | from paddle.proto import ModelConfig_pb2 37 | # sanity check. 38 | if not isinstance(model, ModelConfig_pb2.ModelConfig): 39 | raise TypeError("PaddlePaddle layer of type %s is not supported." % type(model)) 40 | super(PaddleGraph, self).__init__(model) 41 | self.model = model 42 | 43 | 44 | def build(self): 45 | self.input_layers = list() 46 | for layer in self.model.layers: 47 | self.layer_map[layer.name] = PaddleGraphNode(layer) 48 | self.layer_name_map[layer.name] = layer.name 49 | 50 | for input_layer in layer.inputs: 51 | self._make_connection(input_layer.input_layer_name, layer.name) 52 | 53 | super(PaddleGraph, self).build() -------------------------------------------------------------------------------- /mmdnn/conversion/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/pytorch/saver.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath): 5 | model = MainModel.KitModel(weight_filepath) 6 | model.eval() 7 | torch.save(model, dump_filepath) 8 | print('PyTorch model file is saved as [{}], generated by [{}.py] and [{}]. Notice that you may need [{}.py] to load the model back.'.format( 9 | dump_filepath, network_filepath, weight_filepath, network_filepath)) 10 | -------------------------------------------------------------------------------- /mmdnn/conversion/pytorch/torch_to_np.py: -------------------------------------------------------------------------------- 1 | import torchfile 2 | import numpy as np 3 | 4 | model = torchfile.load('kit.model') 5 | 6 | weights = dict() 7 | 8 | params = ['weight', 'bias', 'running_mean', 'running_var'] 9 | 10 | recursive = ['conv_nets'] 11 | 12 | def save_weight(name, node, level): 13 | weights[name] = dict() 14 | current_layer = weights[name] 15 | for p in params: 16 | if hasattr(node, p): 17 | func = getattr(node, p) 18 | arr = np.array(func) 19 | if arr.ndim >= 1: 20 | current_layer[p] = arr 21 | print (" " * level + "{}.{} shape {} {}".format(name, p, current_layer[p].shape, current_layer[p].dtype)) 22 | 23 | for p in recursive: 24 | if hasattr(node, p): 25 | func = getattr(node, p) 26 | if func != None: 27 | for idx, subnode in enumerate(func): 28 | new_name = name + ":{}:{}".format(p, idx) 29 | save_weight(new_name, subnode, level + 1) 30 | 31 | 32 | for idx, data in enumerate(model.modules): 33 | if data != None: 34 | print ("Find layer #{} : {}".format(idx, data._typename)) 35 | if hasattr(data, 'search_flag'): 36 | print (" name = {}".format(data.search_flag)) 37 | if data.modules != None: 38 | print (" submodule = {}#".format(len(data.modules))) 39 | for idx_j, sub in enumerate(data.modules): 40 | print (" layer [{}]".format(sub._typename)) 41 | name = data.search_flag + ":" + str(idx_j) 42 | save_weight(name, sub, 2) 43 | print ("\n") 44 | else: 45 | pass 46 | #print (dir(data)) 47 | 48 | print ("\n") 49 | 50 | with open("stylebank.npy", 'wb') as of: 51 | np.save(of, weights) 52 | 53 | print ("-------------------------------------------------") 54 | 55 | load_weight = np.load('stylebank.npy').item() 56 | for i in load_weight: 57 | #print (i) 58 | for j in load_weight[i]: 59 | pass 60 | #print (" {} with shape {}".format(j, load_weight[i][j].shape)) -------------------------------------------------------------------------------- /mmdnn/conversion/rewriter/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function -------------------------------------------------------------------------------- /mmdnn/conversion/rewriter/utils.py: -------------------------------------------------------------------------------- 1 | from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase 2 | from mmdnn.conversion.tensorflow.rewriter.gru_rewriter import GRURewriter 3 | from mmdnn.conversion.tensorflow.rewriter.lstm_rewriter import LSTMRewriter 4 | 5 | def process_graph(graph, weights): 6 | rewriter_list = [GRURewriter, LSTMRewriter] 7 | 8 | for rewriter in rewriter_list: 9 | rewriter(graph, weights).run() -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/rewriter/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/rewriter/gru_rewriter.py: -------------------------------------------------------------------------------- 1 | from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase 2 | import numpy as np 3 | import re 4 | 5 | class GRURewriter(UnitRewriterBase): 6 | 7 | def __init__(self, graph, weights_dict): 8 | return super(GRURewriter, self).__init__(graph, weights_dict) 9 | 10 | def process_gru_cell(self, match_result): 11 | if 'gru_cell' not in match_result._pattern_to_op.keys(): 12 | return 13 | kwargs = dict() 14 | top_node = match_result._pattern_to_op[match_result._name_to_pattern['gru_cell']] 15 | 16 | w_e = match_result.get_op("cell_kernel") 17 | w = self._weights_dict[w_e.name.replace('/read', '')] 18 | 19 | num_units = w.shape[1]//2 20 | input_size = w.shape[0] - num_units 21 | 22 | kwargs['num_units'] = num_units 23 | kwargs['input_size'] = input_size 24 | 25 | if hasattr(top_node, 'kwargs'): 26 | top_node.kwargs.update(kwargs) 27 | else: 28 | top_node.kwargs = kwargs 29 | 30 | 31 | def process_rnn_h_zero(self, match_result): 32 | if 'h_zero' not in match_result._name_to_pattern.keys(): 33 | return 34 | kwargs = dict() 35 | top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] 36 | 37 | fill_size = match_result.get_op('fill_size') 38 | fill_value = match_result.get_op('fill_value') 39 | 40 | kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] 41 | kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] 42 | 43 | if hasattr(top_node, 'kwargs'): 44 | top_node.kwargs.update(kwargs) 45 | else: 46 | top_node.kwargs = kwargs 47 | 48 | 49 | def process_match_result(self, match_result, pattern_name): 50 | if pattern_name == 'gru_cell': 51 | self.process_gru_cell(match_result) 52 | elif pattern_name == 'h_zero': 53 | if self.check_match_scope(match_result, 'GRUCellZeroState'): 54 | self.process_rnn_h_zero(match_result) 55 | 56 | '''For some short pattern, to avoid match other pattern, check it's scope''' 57 | def check_match_scope(self, match_result, scope_name): 58 | ops = match_result._pattern_to_op.values() 59 | 60 | for op in ops: 61 | op_name_splits = op.name.split('/') 62 | if len(op_name_splits) < 2: 63 | return False 64 | if re.sub(r'(_\d+)*$', '', op_name_splits[-2]) != scope_name: 65 | if len(op_name_splits) > 2: 66 | if re.sub(r'(_\d+)*$', '', op_name_splits[-3]) != scope_name: 67 | return False 68 | else: 69 | return False 70 | return True 71 | 72 | 73 | def run(self): 74 | return super(GRURewriter, self).run(['gru_cell', 'h_zero'], 'tensorflow') -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/rewriter/lstm_rewriter.py: -------------------------------------------------------------------------------- 1 | from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase 2 | import numpy as np 3 | import re 4 | 5 | 6 | class LSTMRewriter(UnitRewriterBase): 7 | 8 | def __init__(self, graph, weights_dict): 9 | return super(LSTMRewriter, self).__init__(graph, weights_dict) 10 | 11 | 12 | def process_lstm_cell(self, match_result): 13 | if 'lstm_cell' not in match_result._pattern_to_op.keys(): 14 | return 15 | kwargs = dict() 16 | 17 | top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']] 18 | 19 | w_e = match_result.get_op("cell_kernel") 20 | w = self._weights_dict[w_e.name.replace('/read', '')] 21 | 22 | num_units = w.shape[1]//4 23 | [wx, wh] = np.split(w, [-1 * num_units]) 24 | input_size = wx.shape[0] 25 | 26 | kwargs['num_units'] = num_units 27 | kwargs['input_size'] = input_size 28 | 29 | if hasattr(top_node, 'kwargs'): 30 | top_node.kwargs.update(kwargs) 31 | else: 32 | top_node.kwargs = kwargs 33 | 34 | 35 | def process_rnn_h_zero(self, match_result): 36 | if 'h_zero' not in match_result._name_to_pattern.keys(): 37 | return 38 | kwargs = dict() 39 | top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']] 40 | 41 | fill_size = match_result.get_op('fill_size') 42 | fill_value = match_result.get_op('fill_value') 43 | 44 | kwargs['fill_size'] = fill_size.get_attr('value').int_val[0] 45 | kwargs['fill_value'] = fill_value.get_attr('value').float_val[0] 46 | 47 | if hasattr(top_node, 'kwargs'): 48 | top_node.kwargs.update(kwargs) 49 | else: 50 | top_node.kwargs = kwargs 51 | 52 | 53 | def process_match_result(self, match_result, pattern_name): 54 | if pattern_name == 'lstm_cell': 55 | self.process_lstm_cell(match_result) 56 | elif pattern_name == 'h_zero': 57 | if self.check_match_scope(match_result, 'LSTMCellZeroState'): 58 | self.process_rnn_h_zero(match_result) 59 | 60 | 61 | '''For some short pattern, to avoid match other pattern, check it's scope''' 62 | def check_match_scope(self, match_result, scope_name): 63 | ops = match_result._pattern_to_op.values() 64 | 65 | for op in ops: 66 | op_name_splits = op.name.split('/') 67 | if len(op_name_splits) < 2: 68 | return False 69 | if re.sub(r'(_\d+)*$', '', op_name_splits[-2]) != scope_name: 70 | if len(op_name_splits) > 2: 71 | if re.sub(r'(_\d+)*$', '', op_name_splits[-3]) != scope_name: 72 | return False 73 | else: 74 | return False 75 | return True 76 | 77 | 78 | def run(self): 79 | return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'], 'tensorflow') -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/saver.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def save_model(MainModel, network_filepath, weight_filepath, dump_filepath, dump_tag = 'SERVING'): 5 | if dump_tag == 'SERVING': 6 | tag_list = [tf.saved_model.tag_constants.SERVING] 7 | else: 8 | tag_list = [tf.saved_model.tag_constants.TRAINING] 9 | res = MainModel.KitModel(weight_filepath) 10 | input = res[0] 11 | model = res[1:] 12 | with tf.Session() as sess: 13 | sess.run(tf.global_variables_initializer()) 14 | 15 | builder = tf.saved_model.builder.SavedModelBuilder(dump_filepath) 16 | 17 | tensor_info_input = tf.saved_model.utils.build_tensor_info(input) 18 | outputs = {'output{}'.format(i): tf.saved_model.utils.build_tensor_info(model[i]) for i in range(len(model))} 19 | prediction_signature = ( 20 | tf.saved_model.signature_def_utils.build_signature_def( 21 | inputs={'input': tensor_info_input}, 22 | outputs=outputs, 23 | method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME 24 | ) 25 | ) 26 | 27 | builder.add_meta_graph_and_variables( 28 | sess, 29 | tag_list, 30 | signature_def_map={ 31 | tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature 32 | } 33 | ) 34 | 35 | save_path = builder.save() 36 | 37 | print('Tensorflow file is saved as [{}], generated by [{}.py] and [{}].'.format( 38 | save_path, network_filepath, weight_filepath)) 39 | -------------------------------------------------------------------------------- /mmdnn/conversion/tensorflow/tensorflow_graph.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | #---------------------------------------------------------------------------------------------- 5 | 6 | from mmdnn.conversion.common.DataStructure.graph import GraphNode, Graph 7 | from tensorflow.core.framework.node_def_pb2 import NodeDef 8 | from tensorflow.core.framework import attr_value_pb2 9 | 10 | 11 | class TensorflowGraphNode(GraphNode): 12 | 13 | def __init__(self, layer): 14 | super(TensorflowGraphNode, self).__init__(layer) 15 | self.in_nodes = list() 16 | self.out_nodes = list() 17 | self._scope = str() 18 | 19 | 20 | @property 21 | def scope(self): 22 | return self._scope 23 | 24 | @scope.setter 25 | def scope(self, scope): 26 | self._scope = scope 27 | 28 | 29 | @property 30 | def name(self): 31 | return self.layer.name 32 | 33 | 34 | @property 35 | def type(self): 36 | return self.layer.op 37 | 38 | 39 | @property 40 | def tf_layer(self): 41 | return self.layer 42 | 43 | 44 | def get_attr(self, name, default_value = None): 45 | if name in self.layer.attr: 46 | attr = self.layer.attr[name] 47 | field = attr.WhichOneof('value') 48 | val = getattr(attr, field) if field else default_value 49 | if isinstance(val, attr_value_pb2.AttrValue.ListValue): 50 | return list(val.ListFields()[0][1]) 51 | else: 52 | return val.decode('utf-8') if isinstance(val, bytes) else val 53 | else: 54 | return default_value 55 | 56 | 57 | class TensorflowGraph(Graph): 58 | 59 | multi_tensor_type = [ 60 | "Slice", 61 | "Split", 62 | "Unpack" 63 | ] 64 | 65 | 66 | def __init__(self, model): 67 | # sanity check. 68 | pass 69 | 70 | super(TensorflowGraph, self).__init__(model) 71 | self.model = model 72 | 73 | 74 | def build(self): 75 | for i, layer in enumerate(self.model.node): 76 | self.layer_map[layer.name] = TensorflowGraphNode(layer) 77 | self.layer_name_map[layer.name] = layer.name 78 | for pred in layer.input: 79 | if pred not in self.layer_map: 80 | if not pred.split(':')[0] in self.layer_map: #test 81 | new_node = NodeDef() 82 | new_node.name = pred 83 | new_node.op = "NoOp" 84 | self.layer_map[pred] = TensorflowGraphNode(new_node) 85 | self.layer_name_map[pred] = pred 86 | 87 | self.tf_make_connection(pred, layer.name) 88 | 89 | super(TensorflowGraph, self).build() 90 | 91 | 92 | def tf_make_connection(self, src, dst): 93 | 94 | if ':' not in src and self.get_node(src).type in self.multi_tensor_type: 95 | src += ':0' 96 | 97 | self._make_connection(src, dst) 98 | src_node = self.get_node(src.split(':')[0]) 99 | dst_node = self.get_node(dst.split(':')[0]) 100 | 101 | if not src_node in self.layer_map[dst.split(':')[0]].in_nodes: 102 | self.layer_map[dst.split(':')[0]].in_nodes.append(src_node) 103 | if not dst_node in self.layer_map[src.split(':')[0]].out_nodes: 104 | self.layer_map[src.split(':')[0]].out_nodes.append(dst_node) 105 | -------------------------------------------------------------------------------- /mmdnn/conversion/torch/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "vis_edit", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@types/antd": "^1.0.0", 7 | "@types/dagre": "^0.7.38", 8 | "@types/react-redux": "^5.0.13", 9 | "antd": "^2.13.10", 10 | "dagre": "^0.7.4", 11 | "react": "^16.1.1", 12 | "react-dom": "^16.1.1", 13 | "react-redux": "^5.0.6", 14 | "react-scripts-ts": "2.8.0", 15 | "redux": "^3.7.2" 16 | }, 17 | "scripts": { 18 | "start": "react-scripts-ts start", 19 | "build": "react-scripts-ts build", 20 | "test": "react-scripts-ts test --env=jsdom --setupTestFrameworkScriptFile=raf/polyfill", 21 | "eject": "react-scripts-ts eject" 22 | }, 23 | "devDependencies": { 24 | "@types/enzyme-adapter-react-16": "^1.0.1", 25 | "@types/jest": "^21.1.6", 26 | "@types/node": "^8.0.53", 27 | "@types/react": "^16.0.23", 28 | "@types/react-dom": "^16.0.3", 29 | "enzyme": "^3.2.0", 30 | "enzyme-adapter-react-16": "^1.1.0" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/vis_edit/public/favicon.ico -------------------------------------------------------------------------------- /mmdnn/vis_edit/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 11 | 12 | 13 | 22 | VisualDNN 23 | 24 | 25 | 28 |
29 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | } 10 | ], 11 | "start_url": "./index.html", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/actions/index.tsx: -------------------------------------------------------------------------------- 1 | import * as constants from '../constants' 2 | import {Model} from "../types" 3 | 4 | // export interface IncrementEnthusiasmAction { 5 | // type: constants.INCREMENT_ENTHUSIASM; 6 | // } 7 | 8 | // export interface DecrementEnthusiasmAction { 9 | // type: constants.DECREMENT_ENTHUSIASM; 10 | // } 11 | 12 | 13 | // export function incrementEnthusiasm(): IncrementEnthusiasmAction { 14 | // return { 15 | // type: constants.INCREMENT_ENTHUSIASM 16 | // } 17 | // } 18 | 19 | // export function decrementEnthusiasm(): DecrementEnthusiasmAction { 20 | // return { 21 | // type: constants.DECREMENT_ENTHUSIASM 22 | // } 23 | // } 24 | 25 | 26 | // 27 | export interface ImportModelAction { 28 | type: constants.IMPORT_MODEL, 29 | model:Model 30 | 31 | } 32 | export function importModel(json:Model):ImportModelAction{ 33 | return { 34 | type:constants.IMPORT_MODEL, 35 | model:json 36 | } 37 | } 38 | export interface SelectLayerAction { 39 | type: constants.SELECT_LAYER, 40 | name:string 41 | } 42 | export function selectLayer(name: string):SelectLayerAction{ 43 | return { 44 | type:constants.SELECT_LAYER, 45 | name 46 | } 47 | } 48 | 49 | // export type EnthusiasmAction = IncrementEnthusiasmAction | DecrementEnthusiasmAction 50 | export type AllActions = ImportModelAction|SelectLayerAction 51 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/components/App.css: -------------------------------------------------------------------------------- 1 | 2 | 3 | .resizer-col{ 4 | cursor: col-resize; 5 | width:40px; 6 | height: calc(100vh - 70px); 7 | position: absolute; 8 | top:70px; 9 | 10 | } 11 | 12 | .header{ 13 | padding: 10px 20px; 14 | font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif; 15 | font-size: 40px; 16 | /* width:100vw; */ 17 | height: 70px; 18 | background-color: #49a9ee; 19 | box-shadow: 0 4px 2px 0 rgba(0,0,0,0.16); 20 | color:#ffffff; 21 | position: relative; 22 | z-index: 10; 23 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/components/App.tsx: -------------------------------------------------------------------------------- 1 | 2 | import * as React from 'react'; 3 | import "./App.css"; 4 | import SiderBar from "../containers/SideBar"; 5 | import Graph from "../containers/Graph"; 6 | 7 | // import { Row, Col} from 'antd'; 8 | 9 | 10 | export interface State { 11 | width: number 12 | } 13 | class App extends React.Component{ 14 | public moveStart: number; 15 | constructor(pros: any) { 16 | super(pros) 17 | this.mouseDown = this.mouseDown.bind(this) 18 | this.mouseUp = this.mouseUp.bind(this) 19 | this.resize = this.resize.bind(this) 20 | this.state = { width: window.innerWidth * 0.15 } 21 | } 22 | mouseDown(e: any) { 23 | e.stopPropagation() 24 | e.preventDefault() 25 | document.addEventListener("mousemove", this.resize) 26 | this.moveStart = e.clientX 27 | } 28 | mouseUp() { 29 | document.removeEventListener("mousemove", this.resize) 30 | } 31 | resize(e: any) { 32 | // console.info(e) 33 | e.stopPropagation() 34 | e.preventDefault() 35 | let { width } = this.state 36 | this.setState({ width: width + e.clientX - this.moveStart }) 37 | this.moveStart = e.clientX 38 | } 39 | render() { 40 | let { width } = this.state 41 | return ( 42 |
43 |
Visual DNN
44 | 45 | 50 | 51 |
52 | ); 53 | } 54 | } 55 | 56 | export default App; 57 | 58 | // helpers 59 | 60 | // function getExclamationMarks(numChars: number) { 61 | // return Array(numChars + 1).join('!'); 62 | // } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/components/Graph.css: -------------------------------------------------------------------------------- 1 | .graphWindow{ 2 | /* background-color: #595959; */ 3 | position: absolute; 4 | top:70px; 5 | left:20vw; 6 | /* width:85vw; */ 7 | height:calc(100vh - 70px); 8 | overflow: hidden; 9 | /* z-index: -1; */ 10 | /* overflow-y: scroll; */ 11 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/components/MiniMap.tsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/MMdnn/5cf01b2b4a8ce3a6e2b9c74b9e4cc55798429bef/mmdnn/vis_edit/src/components/MiniMap.tsx -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/components/SideBar.css: -------------------------------------------------------------------------------- 1 | .sideBar{ 2 | height:calc(100vh - 70px); 3 | border-right: 1px solid #e9e9e9; 4 | box-shadow: 2px 0 2px 0 rgba(0,0,0,0.16), 0 0 0 1px rgba(0,0,0,0.08); 5 | /* resize: horizontal; */ 6 | overflow: hidden; 7 | /* background: #333; */ 8 | } 9 | 10 | .ant-menu-inline{ 11 | border-right: none !important; 12 | 13 | } 14 | li.allLayers>ul.ant-menu-sub{ 15 | max-height: 400px; 16 | overflow-y: auto; 17 | overflow-x: hidden; 18 | } 19 | .menuItem{ 20 | font-size: 15px; 21 | } 22 | .layerClass{ 23 | font-size:14px; 24 | } 25 | .layerName{ 26 | font-size:12px; 27 | } 28 | li.search{ 29 | background-color: white !important; 30 | line-height:30px !important; 31 | height: 30px !important; 32 | position: relative !important; 33 | left: -25px !important; 34 | } 35 | /* stype the input button */ 36 | div.fileinputs { 37 | position: relative; 38 | } 39 | 40 | div.fakefile { 41 | position: absolute; 42 | top: 0px; 43 | left: 0px; 44 | z-index: 1; 45 | } 46 | 47 | div.fakefile input[type=button] { 48 | /* enough width to completely overlap the real hidden file control */ 49 | cursor: pointer; 50 | width: 148px; 51 | } 52 | 53 | div.fileinputs input.file { 54 | position: relative; 55 | text-align: right; 56 | -moz-opacity:0 ; 57 | filter:alpha(opacity:0); 58 | opacity: 0; 59 | z-index: 2; 60 | } 61 | 62 | .inputButton{ 63 | padding: 0px !important; 64 | /* background-color: #333 !important; */ 65 | /* color:white !important; */ 66 | border:none !important; 67 | } 68 | 69 | [hidden] { 70 | display: none !important; 71 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/constants/index.tsx: -------------------------------------------------------------------------------- 1 | export const INCREMENT_ENTHUSIASM = 'INCREMENT_ENTHUSIASM'; 2 | export type INCREMENT_ENTHUSIASM = typeof INCREMENT_ENTHUSIASM; 3 | 4 | 5 | export const DECREMENT_ENTHUSIASM = 'DECREMENT_ENTHUSIASM'; 6 | export type DECREMENT_ENTHUSIASM = typeof DECREMENT_ENTHUSIASM; 7 | 8 | export const IMPORT_MODEL = "IMPORT_MODEL" 9 | export type IMPORT_MODEL = typeof IMPORT_MODEL 10 | 11 | export const SELECT_LAYER = "SELECT_LAYER" 12 | export type SELECT_LAYER = typeof SELECT_LAYER 13 | 14 | 15 | 16 | // style config 17 | export const BG_COLOR="#333"; 18 | export const FG_COLOR=["#49a9ee", "#F29C30", "#75C277", "#DA5246"] -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/containers/Graph.tsx: -------------------------------------------------------------------------------- 1 | import Graph from '../components/Graph'; 2 | import * as actions from '../actions/'; 3 | import { StoreState } from '../types/index'; 4 | import { connect, Dispatch } from 'react-redux'; 5 | 6 | export function mapStateToProps(state:StoreState, ownProps:{width:number}) { 7 | return { 8 | nodes: state.model.node, 9 | selectedLayer: state.selectedLayer, 10 | width:ownProps.width 11 | }; 12 | } 13 | 14 | export function mapDispatchToProps(dispatch: Dispatch) { 15 | return { 16 | 17 | } 18 | } 19 | 20 | export default connect(mapStateToProps, mapDispatchToProps)(Graph); -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/containers/SideBar.tsx: -------------------------------------------------------------------------------- 1 | import SideBar from '../components/SideBar'; 2 | import * as actions from '../actions/'; 3 | import { StoreState } from '../types/index'; 4 | import { connect, Dispatch } from 'react-redux'; 5 | 6 | export function mapStateToProps(state:StoreState, ownProps:{width:number}) { 7 | return { 8 | nodes:state.model.node, 9 | width:ownProps.width 10 | }; 11 | } 12 | 13 | export function mapDispatchToProps(dispatch: Dispatch) { 14 | return { 15 | onImportModel: (json:any) => {dispatch(actions.importModel(json))}, 16 | onSelectLayer: (name:string) => {dispatch(actions.selectLayer(name))} 17 | } 18 | } 19 | 20 | export default connect(mapStateToProps, mapDispatchToProps)(SideBar); -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/helper/index.tsx: -------------------------------------------------------------------------------- 1 | 2 | let names:string[] = [] 3 | export function getColor(name:string):string{ 4 | const COLORS: string[]=[ 5 | ' #9e0142', 6 | ' #fdae61', 7 | ' #d53e4f', 8 | ' #e6f598', 9 | ' #f46d43', 10 | ' #ffffbf', 11 | ' #66c2a5', 12 | ' #fee08b', 13 | ' #3288bd', 14 | ' #5e4fa2'] 15 | let idx:number = names.indexOf(name) 16 | let numColor = COLORS.length 17 | if(idx === -1){ 18 | names.push(name) 19 | return COLORS[(names.length-1) % numColor] 20 | }else{ 21 | return COLORS[idx % numColor] 22 | } 23 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/index.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --bk-color: #ffffff; 3 | --fg-color: #00bcd4; 4 | --scroll-bar: #7dc1f2; 5 | } 6 | 7 | body { 8 | margin: 0; 9 | padding: 0; 10 | font-family: sans-serif; 11 | overflow: hidden; 12 | } 13 | 14 | /*scroll bar*/ 15 | 16 | ::-webkit-scrollbar { 17 | width: 10px; 18 | height: 10px; 19 | } 20 | 21 | /* Track */ 22 | 23 | ::-webkit-scrollbar-track { 24 | -webkit-box-shadow: inset 0 0 3px rgba(0, 0, 0, 0.3); 25 | -webkit-border-radius: 6px; 26 | border-radius: 3px; 27 | } 28 | 29 | /* Handle */ 30 | 31 | ::-webkit-scrollbar-thumb { 32 | -webkit-border-radius: 6px; 33 | border-radius: 3px; 34 | background: var(--scroll-bar); 35 | opacity: 0.5; 36 | -webkit-box-shadow: inset 0 0 3px rgba(0, 0, 0, 0.25); 37 | } 38 | 39 | /* ::-webkit-scrollbar-thumb:window-inactive { 40 | background:var(--scroll-bar); 41 | } */ -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/index.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import * as ReactDOM from 'react-dom'; 3 | import App from './components/App'; 4 | import { createStore } from 'redux'; 5 | import { reducer } from './reducers/index'; 6 | import { StoreState } from './types/index'; 7 | import { Provider } from 'react-redux'; 8 | import registerServiceWorker from './registerServiceWorker'; 9 | import './index.css'; 10 | 11 | import 'antd/dist/antd.css'; 12 | 13 | 14 | let initState:StoreState = {model:{node:[]}, selectedLayer:undefined} 15 | const store = createStore(reducer, initState ); 16 | 17 | ReactDOM.render( 18 | 19 | 20 | , 21 | document.getElementById('root') as HTMLElement 22 | ); 23 | registerServiceWorker(); 24 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/reducers/index.tsx: -------------------------------------------------------------------------------- 1 | // src/reducers/index.tsx 2 | 3 | import { AllActions } from '../actions'; 4 | import { StoreState } from '../types'; 5 | import { IMPORT_MODEL, SELECT_LAYER } from '../constants'; 6 | 7 | export function reducer(state: StoreState, action: AllActions): StoreState { 8 | switch (action.type) { 9 | // case INCREMENT_ENTHUSIASM: 10 | 11 | // return { ...state, enthusiasmLevel:state.enthusiasmLevel+1 }; 12 | // case DECREMENT_ENTHUSIASM: 13 | // return { ...state, enthusiasmLevel:state.enthusiasmLevel-1 }; 14 | case IMPORT_MODEL: 15 | return { ...state, model:action.model} 16 | case SELECT_LAYER: 17 | return { ...state, selectedLayer: action.name} 18 | default: 19 | return state; 20 | } 21 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/src/types/index.tsx: -------------------------------------------------------------------------------- 1 | export interface StoreState { 2 | model:Model; 3 | selectedLayer: string|undefined; 4 | } 5 | 6 | export interface Model { 7 | node:Array 8 | } 9 | 10 | export interface IRNode { 11 | [key:string]:any 12 | } 13 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "outDir": "build/dist", 4 | "module": "esnext", 5 | "target": "es5", 6 | "lib": ["es6", "dom", "es2017"], 7 | "sourceMap": true, 8 | "allowJs": true, 9 | "jsx": "react", 10 | "moduleResolution": "node", 11 | "rootDir": "src", 12 | "forceConsistentCasingInFileNames": true, 13 | "noImplicitReturns": true, 14 | "noImplicitThis": true, 15 | "noImplicitAny": true, 16 | "strictNullChecks": true, 17 | "suppressImplicitAnyIndexErrors": true, 18 | "noUnusedLocals": true, 19 | "allowSyntheticDefaultImports": true 20 | }, 21 | "exclude": [ 22 | "node_modules", 23 | "build", 24 | "scripts", 25 | "acceptance-tests", 26 | "webpack", 27 | "jest", 28 | "src/setupTests.ts" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /mmdnn/vis_edit/tsconfig.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "module": "commonjs" 5 | } 6 | } -------------------------------------------------------------------------------- /mmdnn/vis_edit/tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["tslint-react"], 3 | "rules": { 4 | "align": [ 5 | true, 6 | "parameters", 7 | "arguments", 8 | "statements" 9 | ], 10 | "ban": false, 11 | "class-name": true, 12 | "comment-format": [ 13 | true, 14 | "check-space" 15 | ], 16 | "curly": true, 17 | "eofline": false, 18 | "forin": true, 19 | "indent": [ true, "spaces" ], 20 | "interface-name": [true, "never-prefix"], 21 | "jsdoc-format": true, 22 | "jsx-no-lambda": false, 23 | "jsx-no-multiline-js": false, 24 | "label-position": true, 25 | "max-line-length": [ true, 120 ], 26 | "member-ordering": [ 27 | true, 28 | "public-before-private", 29 | "static-before-instance", 30 | "variables-before-functions" 31 | ], 32 | "no-any": true, 33 | "no-arg": true, 34 | "no-bitwise": true, 35 | "no-console": [ 36 | true, 37 | "log", 38 | "error", 39 | "debug", 40 | "info", 41 | "time", 42 | "timeEnd", 43 | "trace" 44 | ], 45 | "no-consecutive-blank-lines": true, 46 | "no-construct": true, 47 | "no-debugger": true, 48 | "no-duplicate-variable": true, 49 | "no-empty": true, 50 | "no-eval": true, 51 | "no-shadowed-variable": true, 52 | "no-string-literal": true, 53 | "no-switch-case-fall-through": true, 54 | "no-trailing-whitespace": false, 55 | "no-unused-expression": true, 56 | "no-use-before-declare": true, 57 | "one-line": [ 58 | true, 59 | "check-catch", 60 | "check-else", 61 | "check-open-brace" 62 | // "check-whitespace" 63 | ], 64 | "quotemark": [true, "single", "jsx-double"], 65 | "radix": true, 66 | "semicolon": [false, "always"], 67 | "switch-default": true, 68 | 69 | "trailing-comma": [false], 70 | 71 | "triple-equals": [ true, "allow-null-check" ], 72 | "typedef": [ 73 | true, 74 | "parameter", 75 | "property-declaration" 76 | ], 77 | "typedef-whitespace": [ 78 | true, 79 | { 80 | "call-signature": "nospace", 81 | "index-signature": "nospace", 82 | "parameter": "nospace", 83 | "property-declaration": "nospace", 84 | "variable-declaration": "nospace" 85 | } 86 | ], 87 | "variable-name": [true, "ban-keywords", "check-format", "allow-leading-underscore", "allow-pascal-case"], 88 | "whitespace": [ 89 | false, 90 | "check-branch", 91 | "check-decl", 92 | "check-module", 93 | "check-operator", 94 | "check-separator", 95 | "check-type", 96 | "check-typecast" 97 | ] 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /mmdnn/visualization/README.md: -------------------------------------------------------------------------------- 1 | # Visualize Neural Networks 2 | 3 | ## Prepare 4 | 5 | npm install 6 | 7 | ## Run 8 | 9 | node app.js -------------------------------------------------------------------------------- /mmdnn/visualization/app.js: -------------------------------------------------------------------------------- 1 | const express = require('express') 2 | const app = express() 3 | app.use(express.static('public')) 4 | app.get('/', (req, res) => res.sendFile('index.html', {"root": __dirname})) 5 | 6 | app.listen(8080, () => console.log('vis is running at http://localhost:8080')) -------------------------------------------------------------------------------- /mmdnn/visualization/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | MMdnn Visualizer 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 |
MMDNN Visualizer
17 |
18 | 19 | 20 | 21 | 22 |
23 |

Choose Your Model 24 | 25 | 26 | 27 |

28 | 29 | 33 | 34 | 35 |
36 |
37 |
Layer Config
38 |
39 |
40 | 41 |
42 |

Tips

43 |
    44 |
  • 45 |
    46 | 47 |
    48 |
    49 | 52 |
    53 | 54 |
  • 55 |
  • wheel for scrolling
  • 56 |
  • shift+wheel for zooming
  • 57 |
  • mouse for paning
  • 58 | 59 | 60 |
61 |
62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /mmdnn/visualization/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "visualization", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "app.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "wqw", 10 | "license": "ISC", 11 | "dependencies": { 12 | "express": "^4.16.2", 13 | "pm2": "^2.9.1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /requirements/linux-py3.txt: -------------------------------------------------------------------------------- 1 | six==1.14.0 2 | setuptools==46.1.3 3 | wget==3.2 4 | numpy==1.18.2 5 | scipy==1.4.1 6 | scikit-image==0.16.2 7 | protobuf==3.11.3 8 | pillow==6.2.2 9 | tensorflow==1.15.2 10 | keras==2.2.4 11 | coremltools==2.1.0 12 | mxnet==1.2.0 13 | cntk==2.6 14 | pytorch==0.4.0 or 1.5.1 15 | http://download.pytorch.org/whl/cpu/torch-0.4.0-cp36-cp36m-linux_x86_64.whl 16 | torchvision==0.2.1 17 | onnx==1.4.1 18 | onnx-tf==1.2.1 19 | -------------------------------------------------------------------------------- /requirements/select_requirements.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | To have a single pip command that uses the specific requirements file use this 4 | in a shell script for posix OS:: 5 | 6 | pip install -r $(select_requirements.py) 7 | 8 | On windows, create a bat of cmd file that loads the windows-specific 9 | requirements directly:: 10 | 11 | for /f %%i in ('python select_requirements.py') do (set req_file="%%i") 12 | pip install -r %req_file% 13 | """ 14 | 15 | from __future__ import print_function 16 | 17 | import os 18 | import platform 19 | import struct 20 | import sys 21 | 22 | # major python major_python_versions as python2 and python3 23 | major_python_versions = tuple(map(str, platform.python_version_tuple())) 24 | python2 = major_python_versions[0] == '2' 25 | python3 = major_python_versions[0] == '3' 26 | 27 | 28 | # operating system 29 | sys_platform = str(sys.platform).lower() 30 | linux = 'linux' in sys_platform 31 | windows = 'win32' in sys_platform 32 | cygwin = 'cygwin' in sys_platform 33 | solaris = 'sunos' in sys_platform 34 | macosx = 'darwin' in sys_platform 35 | posix = 'posix' in os.name.lower() 36 | 37 | def select_requirements_file(): 38 | """ 39 | Print the path to a requirements file based on some os/arch condition. 40 | """ 41 | if windows: 42 | print('requirements/win.txt') 43 | elif macosx: 44 | print('requirements/mac.txt') 45 | elif linux: 46 | if python2: 47 | print('requirements/linux-py2.txt') 48 | elif python3: 49 | print('requirements/linux-py3.txt') 50 | elif cygwin: 51 | print('requirements/cygwin.txt') 52 | else: 53 | raise Exception('Unsupported OS/platform') 54 | 55 | if __name__ == '__main__': 56 | select_requirements_file() -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | license_files = LICENSE 3 | 4 | [bdist_wheel] 5 | # https://packaging.python.org/guides/distributing-packages-using-setuptools/#wheels 6 | universal=1 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from setuptools import setup, find_packages 3 | from io import open 4 | 5 | # Get the long description from the README file 6 | with open('README.md', encoding='utf-8') as f: 7 | long_description = f.read() 8 | 9 | setup( 10 | name='mmdnn', 11 | 12 | # Versions should comply with PEP440. For a discussion on single-sourcing 13 | # the version across setup.py and the project code, see 14 | # https://packaging.python.org/en/latest/single_source_version.html 15 | version='0.3.1', 16 | 17 | description='Deep learning model converter, visualization and editor.', 18 | long_description=long_description, 19 | long_description_content_type='text/markdown', 20 | 21 | # The project's main homepage. 22 | url='https://github.com/Microsoft/MMdnn', 23 | 24 | # Author details 25 | author = 'System Research Group, Microsoft Research Asia', 26 | author_email='mmdnn_feedback@microsoft.com', 27 | 28 | # Choose your license 29 | license='MIT', 30 | 31 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 32 | classifiers=[ 33 | # How mature is this project? Common values are 34 | # 3 - Alpha 35 | # 4 - Beta 36 | # 5 - Production/Stable 37 | 'Development Status :: 3 - Alpha', 38 | 39 | # Indicate who your project is intended for 40 | 'Intended Audience :: Developers', 41 | 'Intended Audience :: Education', 42 | 'Intended Audience :: Science/Research', 43 | 'Topic :: Scientific/Engineering :: Mathematics', 44 | 'Topic :: Software Development :: Libraries :: Python Modules', 45 | 'Topic :: Software Development :: Libraries', 46 | 47 | # Pick your license as you wish (should match "license" above) 48 | 'License :: OSI Approved :: MIT License', 49 | 50 | # Specify the Python versions you support here. In particular, ensure 51 | # that you indicate whether you support Python 2, Python 3 or both. 52 | 'Programming Language :: Python :: 3' 53 | ], 54 | 55 | # What does your project relate to? 56 | keywords='deep learning model converter visualization', 57 | 58 | # You can just specify the packages manually here if your project is 59 | # simple. Or you can use find_packages(). 60 | packages=find_packages(), 61 | 62 | package_data={ 63 | 'mmdnn':['visualization/public/*', 64 | 'visualization/*.json', 65 | 'visualization/*.js', 66 | 'visualization/*.html', 67 | 'visualization/*.css'] 68 | }, 69 | 70 | # Alternatively, if you want to distribute just a my_module.py, uncomment 71 | # this: 72 | # py_modules=["my_module"], 73 | 74 | # List run-time dependencies here. These will be installed by pip when 75 | # your project is installed. For an analysis of "install_requires" vs pip's 76 | # requirements files see: 77 | # https://packaging.python.org/en/latest/requirements.html 78 | install_requires=[ 79 | 'numpy >= 1.15.0', 80 | 'protobuf >= 3.6.0', 81 | 'six >= 1.10.0', 82 | 'pillow >= 6.2.1', 83 | ], 84 | 85 | # To provide executable scripts, use entry points in preference to the 86 | # "scripts" keyword. Entry points provide cross-platform support and allow 87 | # pip to create the appropriate form of executable for the target platform. 88 | entry_points={ 89 | 'console_scripts': [ 90 | 'mmconvert = mmdnn.conversion._script.convert:_main', 91 | 'mmdownload = mmdnn.conversion._script.extractModel:_main', 92 | 'mmvismeta = mmdnn.conversion.examples.tensorflow.vis_meta:_main', 93 | 'mmtoir = mmdnn.conversion._script.convertToIR:_main', 94 | 'mmtocode = mmdnn.conversion._script.IRToCode:_main', 95 | 'mmtomodel = mmdnn.conversion._script.dump_code:_main', 96 | ], 97 | }, 98 | ) 99 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Abort on Error 3 | set -e 4 | 5 | export PING_SLEEP=60s 6 | export WORKDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | export BUILD_OUTPUT=$WORKDIR/build.out 8 | 9 | touch $BUILD_OUTPUT 10 | 11 | dump_output() { 12 | echo Tailing the last 500 lines of output: 13 | tail -500 $BUILD_OUTPUT 14 | } 15 | error_handler() { 16 | echo ERROR: An error was encountered with the build. 17 | dump_output 18 | exit 1 19 | } 20 | 21 | # If an error occurs, run our error handler to output a tail of the build 22 | trap 'error_handler' ERR 23 | 24 | # Set up a repeating loop to send some output to Travis. 25 | 26 | bash -c "while true; do echo \$(date) - building ...; sleep $PING_SLEEP; done" & 27 | PING_LOOP_PID=$! 28 | 29 | # My build is using maven, but you could build anything with this, E.g. 30 | python -m pytest -s -q tests/test_$1.py >> $BUILD_OUTPUT 2>&1 31 | 32 | # The build finished without returning an error so dump a tail of the output 33 | dump_output 34 | 35 | # nicely terminate the ping output loop 36 | kill $PING_LOOP_PID 37 | -------------------------------------------------------------------------------- /tests/test_caffe.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'caffe' : 11 | { 12 | 'alexnet' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'inception_v1' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | #Temporarily disable 'xception' : [TestModels.coreml_emit, TestModels.cntk_emit, TestModels.tensorflow_emit], 32 | } 33 | } 34 | 35 | def test_caffe(): 36 | test_table = get_test_table() 37 | tester = TestModels(test_table) 38 | tester._test_function('caffe', tester.caffe_parse) 39 | 40 | 41 | if __name__ == '__main__': 42 | test_caffe() -------------------------------------------------------------------------------- /tests/test_caffe_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'caffe' : 11 | { 12 | 'resnet152' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ], 22 | 'squeezenet' : [ 23 | TestModels.onnx_emit, 24 | TestModels.caffe_emit, 25 | TestModels.cntk_emit, 26 | TestModels.coreml_emit, 27 | TestModels.keras_emit, 28 | TestModels.mxnet_emit, 29 | TestModels.pytorch_emit, 30 | TestModels.tensorflow_emit 31 | ], 32 | } 33 | } 34 | 35 | def test_caffe(): 36 | test_table = get_test_table() 37 | tester = TestModels(test_table) 38 | tester._test_function('caffe', tester.caffe_parse) 39 | 40 | 41 | if __name__ == '__main__': 42 | test_caffe() -------------------------------------------------------------------------------- /tests/test_caffe_3.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'caffe' : 11 | { 12 | 'vgg19' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ] 22 | } 23 | } 24 | 25 | def test_caffe(): 26 | test_table = get_test_table() 27 | tester = TestModels(test_table) 28 | tester._test_function('caffe', tester.caffe_parse) 29 | 30 | 31 | if __name__ == '__main__': 32 | test_caffe() -------------------------------------------------------------------------------- /tests/test_caffe_4.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'caffe' : 11 | { 12 | # Cannot run on Travis since it seems to consume too much memory. 13 | 'voc-fcn8s' : [ 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.tensorflow_emit 17 | ], 18 | 'voc-fcn16s' : [ 19 | TestModels.cntk_emit, 20 | TestModels.coreml_emit, 21 | TestModels.tensorflow_emit 22 | ], 23 | 'voc-fcn32s' : [ 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.tensorflow_emit 27 | ], 28 | #Temporarily disable 'xception' : [TestModels.mxnet_emit, TestModels.pytorch_emit], 29 | #Temporarily disable 'inception_v4' : [TestModels.cntk_emit, TestModels.coreml_emit, TestModels.keras_emit, TestModels.pytorch_emit, TestModels.tensorflow_emit], 30 | } 31 | } 32 | 33 | def test_caffe(): 34 | test_table = get_test_table() 35 | tester = TestModels(test_table) 36 | tester._test_function('caffe', tester.caffe_parse) 37 | 38 | 39 | if __name__ == '__main__': 40 | test_caffe() -------------------------------------------------------------------------------- /tests/test_cntk.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'cntk' : 10 | { 11 | 'inception_v3' : [ 12 | TestModels.onnx_emit, 13 | #TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | #TestModels.coreml_emit, 16 | #TestModels.keras_emit, 17 | #TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'resnet18' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ] 31 | } 32 | } 33 | 34 | def test_cntk(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('cntk', tester.cntk_parse) 38 | 39 | 40 | if __name__ == '__main__': 41 | test_cntk() 42 | -------------------------------------------------------------------------------- /tests/test_cntk_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'cntk' : 10 | { 11 | 'resnet152' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_cntk(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('cntk', tester.cntk_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_cntk() 32 | -------------------------------------------------------------------------------- /tests/test_coreml.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | from conversion_imagenet import is_coreml_supported 8 | 9 | def get_test_table(): 10 | return { 'coreml' : 11 | { 12 | 'inception_v3' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | #TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'mobilenet' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | } 32 | } 33 | 34 | 35 | def test_coreml(): 36 | if is_coreml_supported(): 37 | test_table = get_test_table() 38 | tester = TestModels(test_table) 39 | tester._test_function('coreml', tester.coreml_parse) 40 | 41 | 42 | if __name__ == '__main__': 43 | test_coreml() 44 | -------------------------------------------------------------------------------- /tests/test_coreml_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | from conversion_imagenet import is_coreml_supported 8 | 9 | def get_test_table(): 10 | return { 'coreml' : 11 | { 12 | 'resnet50' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | #TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'vgg16' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | 'tinyyolo' : [ 32 | TestModels.onnx_emit, 33 | #TestModels.caffe_emit, 34 | #TestModels.cntk_emit, 35 | TestModels.coreml_emit, 36 | #TestModels.keras_emit, 37 | #TestModels.mxnet_emit, 38 | #TestModels.pytorch_emit, 39 | #TestModels.tensorflow_emit 40 | ] 41 | } 42 | } 43 | 44 | 45 | def test_coreml(): 46 | if is_coreml_supported(): 47 | test_table = get_test_table() 48 | tester = TestModels(test_table) 49 | tester._test_function('coreml', tester.coreml_parse) 50 | 51 | 52 | if __name__ == '__main__': 53 | test_coreml() 54 | -------------------------------------------------------------------------------- /tests/test_darknet.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'darknet' : 10 | { 11 | 'yolov2' : [ 12 | #TestModels.onnx_emit, 13 | #TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | #TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | #TestModels.mxnet_emit, 18 | #TestModels.pytorch_emit, 19 | #TestModels.tensorflow_emit 20 | ], 21 | 'yolov3' : [ 22 | #TestModels.onnx_emit, 23 | #TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | #TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | #TestModels.mxnet_emit, 28 | #TestModels.pytorch_emit, 29 | #TestModels.tensorflow_emit 30 | ] 31 | } 32 | } 33 | 34 | def test_darknet(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('darknet', tester.darknet_parse) 38 | 39 | 40 | if __name__ == '__main__': 41 | test_darknet() 42 | -------------------------------------------------------------------------------- /tests/test_keras.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'keras' : { 12 | 'vgg16' : [ 13 | TestModels.onnx_emit 14 | ], 15 | 'vgg19' : [ 16 | TestModels.onnx_emit, 17 | TestModels.caffe_emit, 18 | TestModels.cntk_emit, 19 | TestModels.coreml_emit, 20 | TestModels.keras_emit, 21 | TestModels.mxnet_emit, 22 | TestModels.pytorch_emit, 23 | TestModels.tensorflow_emit 24 | ], 25 | }} 26 | 27 | 28 | def test_keras(): 29 | test_table = get_test_table() 30 | tester = TestModels(test_table) 31 | tester._test_function('keras', tester.keras_parse) 32 | 33 | 34 | if __name__ == '__main__': 35 | test_keras() 36 | -------------------------------------------------------------------------------- /tests/test_keras_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'keras' : { 12 | 'resnet50' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ] 22 | }} 23 | 24 | 25 | def test_keras(): 26 | test_table = get_test_table() 27 | tester = TestModels(test_table) 28 | tester._test_function('keras', tester.keras_parse) 29 | 30 | 31 | if __name__ == '__main__': 32 | test_keras() 33 | -------------------------------------------------------------------------------- /tests/test_keras_3.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'keras' : { 12 | 'densenet' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ] 22 | }} 23 | 24 | 25 | def test_keras(): 26 | test_table = get_test_table() 27 | tester = TestModels(test_table) 28 | tester._test_function('keras', tester.keras_parse) 29 | 30 | 31 | if __name__ == '__main__': 32 | test_keras() 33 | -------------------------------------------------------------------------------- /tests/test_keras_4.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'keras' : { 12 | 'inception_v3' : [ 13 | TestModels.onnx_emit, 14 | TestModels.caffe_emit, 15 | TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ], 22 | }} 23 | 24 | 25 | def test_keras(): 26 | test_table = get_test_table() 27 | tester = TestModels(test_table) 28 | tester._test_function('keras', tester.keras_parse) 29 | 30 | 31 | if __name__ == '__main__': 32 | test_keras() 33 | -------------------------------------------------------------------------------- /tests/test_keras_5.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'keras' : { 12 | 'mobilenet' : [ 13 | TestModels.onnx_emit, 14 | #TestModels.caffe_emit, 15 | #TestModels.cntk_emit, 16 | TestModels.coreml_emit, 17 | TestModels.keras_emit, 18 | #TestModels.mxnet_emit, 19 | #TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ], 22 | 'xception' : [ 23 | #TestModels.onnx_emit, 24 | #TestModels.caffe_emit, 25 | #TestModels.cntk_emit, 26 | TestModels.coreml_emit, 27 | TestModels.keras_emit, 28 | #TestModels.mxnet_emit, 29 | #TestModels.pytorch_emit, 30 | TestModels.tensorflow_emit 31 | ] 32 | }} 33 | 34 | 35 | def test_keras(): 36 | test_table = get_test_table() 37 | tester = TestModels(test_table) 38 | tester._test_function('keras', tester.keras_parse) 39 | 40 | 41 | if __name__ == '__main__': 42 | test_keras() 43 | -------------------------------------------------------------------------------- /tests/test_mxnet.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'mxnet' : { 11 | 'vgg19' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'squeezenet_v1.1' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | 'imagenet1k-inception-bn' : [ 32 | TestModels.onnx_emit, 33 | TestModels.caffe_emit, 34 | TestModels.cntk_emit, 35 | TestModels.coreml_emit, 36 | TestModels.keras_emit, 37 | TestModels.mxnet_emit, 38 | TestModels.pytorch_emit, 39 | TestModels.tensorflow_emit 40 | ], 41 | } 42 | } 43 | 44 | 45 | def test_mxnet(): 46 | test_table = get_test_table() 47 | tester = TestModels(test_table) 48 | tester._test_function('mxnet', tester.mxnet_parse) 49 | 50 | 51 | if __name__ == '__main__': 52 | test_mxnet() 53 | 54 | -------------------------------------------------------------------------------- /tests/test_mxnet_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'mxnet' : { 11 | 'imagenet1k-resnet-18' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'imagenet1k-resnet-152' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | #TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ] 31 | }} 32 | 33 | 34 | def test_mxnet(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('mxnet', tester.mxnet_parse) 38 | 39 | 40 | if __name__ == '__main__': 41 | test_mxnet() 42 | 43 | -------------------------------------------------------------------------------- /tests/test_mxnet_3.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'mxnet' : { 11 | 'imagenet1k-resnet-152' : [ 12 | #TestModels.onnx_emit, 13 | #TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | #TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | #TestModels.mxnet_emit, 18 | #TestModels.pytorch_emit, 19 | #TestModels.tensorflow_emit 20 | ] 21 | }} 22 | 23 | 24 | def test_mxnet(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('mxnet', tester.mxnet_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_mxnet() 32 | 33 | -------------------------------------------------------------------------------- /tests/test_mxnet_4.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 11 | 'mxnet' : { 12 | # Run too slow on Travis. 13 | 'imagenet1k-resnext-101-64x4d' : [ 14 | TestModels.onnx_emit, 15 | TestModels.caffe_emit, 16 | # cntk_emit OOM on Travis 17 | TestModels.cntk_emit, 18 | TestModels.coreml_emit, 19 | TestModels.keras_emit, 20 | TestModels.mxnet_emit, 21 | TestModels.pytorch_emit, 22 | TestModels.tensorflow_emit 23 | ] 24 | }} 25 | 26 | 27 | def test_mxnet(): 28 | test_table = get_test_table() 29 | tester = TestModels(test_table) 30 | tester._test_function('mxnet', tester.mxnet_parse) 31 | 32 | 33 | if __name__ == '__main__': 34 | test_mxnet() 35 | 36 | -------------------------------------------------------------------------------- /tests/test_mxnet_5.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | import six 7 | from conversion_imagenet import TestModels 8 | 9 | def get_test_table(): 10 | return { 'mxnet' : { 11 | 'imagenet1k-resnext-50' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | }} 22 | 23 | 24 | def test_mxnet(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('mxnet', tester.mxnet_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_mxnet() 32 | 33 | -------------------------------------------------------------------------------- /tests/test_paddle.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | from conversion_imagenet import is_paddle_supported 8 | 9 | def get_test_table(): 10 | return { 'paddle' : { 11 | 'resnet50' : [ 12 | TestModels.onnx_emit, 13 | #TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'resnet101' : [ 22 | #TestModels.onnx_emit, 23 | #TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | 'vgg16' : [ 32 | TestModels.onnx_emit, 33 | #TestModels.caffe_emit, 34 | #TestModels.cntk_emit, 35 | #TestModels.coreml_emit, 36 | #TestModels.keras_emit, 37 | #TestModels.mxnet_emit, 38 | #TestModels.pytorch_emit, 39 | #TestModels.tensorflow_emit 40 | ], 41 | }} 42 | 43 | def test_paddle(): 44 | if not is_paddle_supported(): 45 | return 46 | # omit tensorflow lead to crash 47 | import tensorflow as tf 48 | test_table = get_test_table() 49 | tester = TestModels(test_table) 50 | tester._test_function('paddle', tester.paddle_parse) 51 | 52 | 53 | if __name__ == '__main__': 54 | test_paddle() 55 | -------------------------------------------------------------------------------- /tests/test_pytorch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'pytorch' : 10 | { 11 | 'densenet201' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_pytorch(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('pytorch', tester.pytorch_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_pytorch() 32 | -------------------------------------------------------------------------------- /tests/test_pytorch_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'pytorch' : 10 | { 11 | 'inception_v3' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | #TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_pytorch(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('pytorch', tester.pytorch_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_pytorch() 32 | -------------------------------------------------------------------------------- /tests/test_pytorch_3.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'pytorch' : 10 | { 11 | 'alexnet' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_pytorch(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('pytorch', tester.pytorch_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_pytorch() 32 | -------------------------------------------------------------------------------- /tests/test_pytorch_4.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'pytorch' : 10 | { 11 | 'resnet152' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_pytorch(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('pytorch', tester.pytorch_parse) 28 | 29 | 30 | if __name__ == '__main__': 31 | test_pytorch() 32 | -------------------------------------------------------------------------------- /tests/test_pytorch_5.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'pytorch' : 10 | { 11 | 'vgg19' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'vgg19_bn' : [ 22 | #TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ] 31 | } 32 | } 33 | 34 | def test_pytorch(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('pytorch', tester.pytorch_parse) 38 | 39 | 40 | if __name__ == '__main__': 41 | test_pytorch() 42 | -------------------------------------------------------------------------------- /tests/test_tensorflow.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'vgg19' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_tensorflow(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('tensorflow', tester.tensorflow_parse) 28 | 29 | 30 | if __name__ == "__main__": 31 | test_tensorflow() 32 | -------------------------------------------------------------------------------- /tests/test_tensorflow_2.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'inception_v1' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'inception_v3' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ] 31 | } 32 | } 33 | 34 | def test_tensorflow(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('tensorflow', tester.tensorflow_parse) 38 | 39 | 40 | if __name__ == "__main__": 41 | test_tensorflow() 42 | -------------------------------------------------------------------------------- /tests/test_tensorflow_3.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'mobilenet_v1_1.0' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'mobilenet_v2_1.0_224' : [ 22 | TestModels.onnx_emit, 23 | TestModels.caffe_emit, 24 | TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ] 31 | } 32 | } 33 | 34 | def test_tensorflow(): 35 | test_table = get_test_table() 36 | tester = TestModels(test_table) 37 | tester._test_function('tensorflow', tester.tensorflow_parse) 38 | 39 | 40 | if __name__ == "__main__": 41 | test_tensorflow() 42 | -------------------------------------------------------------------------------- /tests/test_tensorflow_4.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'resnet_v1_152' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_tensorflow(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('tensorflow', tester.tensorflow_parse) 28 | 29 | 30 | if __name__ == "__main__": 31 | test_tensorflow() 32 | -------------------------------------------------------------------------------- /tests/test_tensorflow_5.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'resnet_v2_152' : [ 12 | #TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_tensorflow(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('tensorflow', tester.tensorflow_parse) 28 | 29 | 30 | if __name__ == "__main__": 31 | test_tensorflow() 32 | -------------------------------------------------------------------------------- /tests/test_tensorflow_6.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | 'inception_resnet_v2' : [ 12 | TestModels.onnx_emit, 13 | TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | #TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ] 21 | } 22 | } 23 | 24 | def test_tensorflow(): 25 | test_table = get_test_table() 26 | tester = TestModels(test_table) 27 | tester._test_function('tensorflow', tester.tensorflow_parse) 28 | 29 | 30 | if __name__ == "__main__": 31 | test_tensorflow() 32 | -------------------------------------------------------------------------------- /tests/test_tensorflow_7.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow' : 10 | { 11 | # Cannot run on Travis since it seems to consume too much memory. 12 | 'nasnet-a_large' : [ 13 | #TestModels.onnx_emit, 14 | #TestModels.caffe_emit, 15 | #TestModels.cntk_emit, 16 | #TestModels.coreml_emit, 17 | #TestModels.keras_emit, 18 | TestModels.mxnet_emit, 19 | TestModels.pytorch_emit, 20 | TestModels.tensorflow_emit 21 | ] 22 | } 23 | } 24 | 25 | def test_tensorflow(): 26 | test_table = get_test_table() 27 | tester = TestModels(test_table) 28 | tester._test_function('tensorflow', tester.tensorflow_parse) 29 | 30 | 31 | if __name__ == "__main__": 32 | test_tensorflow() 33 | -------------------------------------------------------------------------------- /tests/test_tensorflow_frozen.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | import os 5 | import sys 6 | from conversion_imagenet import TestModels 7 | 8 | def get_test_table(): 9 | return { 'tensorflow_frozen' : 10 | { 11 | 'inception_v1' : [ 12 | TestModels.onnx_emit, 13 | #TestModels.caffe_emit, 14 | #TestModels.cntk_emit, 15 | TestModels.coreml_emit, 16 | TestModels.keras_emit, 17 | TestModels.mxnet_emit, 18 | #TestModels.pytorch_emit, 19 | TestModels.tensorflow_emit 20 | ], 21 | 'inception_v3' : [ 22 | TestModels.onnx_emit, 23 | #TestModels.caffe_emit, 24 | #TestModels.cntk_emit, 25 | TestModels.coreml_emit, 26 | TestModels.keras_emit, 27 | TestModels.mxnet_emit, 28 | #TestModels.pytorch_emit, 29 | TestModels.tensorflow_emit 30 | ], 31 | 'mobilenet_v1_1.0' : [ 32 | TestModels.onnx_emit, 33 | #TestModels.caffe_emit, 34 | #TestModels.cntk_emit, 35 | TestModels.coreml_emit, 36 | TestModels.keras_emit, 37 | TestModels.mxnet_emit, 38 | #TestModels.pytorch_emit, 39 | TestModels.tensorflow_emit 40 | ] 41 | } 42 | } 43 | 44 | 45 | def test_tensorflow_frozen(): 46 | tester = TestModels() 47 | tester._test_function('tensorflow_frozen', tester.tensorflow_frozen_parse) 48 | 49 | 50 | if __name__ == '__main__': 51 | test_tensorflow_frozen() 52 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import print_function 3 | 4 | __all__ = ['ensure_dir', 'checkfrozen', 'CorrectnessTest'] 5 | 6 | import os 7 | import unittest 8 | import numpy as np 9 | 10 | def _compute_SNR(x,y): 11 | noise = x - y 12 | noise_var = np.sum(noise ** 2) / len(noise) + 1e-7 13 | signal_energy = np.sum(y ** 2) / len(y) 14 | max_signal_energy = np.amax(y ** 2) 15 | SNR = 10 * np.log10(signal_energy / noise_var) 16 | PSNR = 10 * np.log10(max_signal_energy / noise_var) 17 | return SNR, PSNR 18 | 19 | 20 | def _compute_max_relative_error(x, y): 21 | from six.moves import xrange 22 | rerror = 0 23 | index = 0 24 | for i in xrange(len(x)): 25 | den = max(1.0, np.abs(x[i]), np.abs(y[i])) 26 | if np.abs(x[i]/den - y[i] / den) > rerror: 27 | rerror = np.abs(x[i] / den - y[i] / den) 28 | index = i 29 | return rerror, index 30 | 31 | 32 | def _compute_L1_error(x, y): 33 | return np.linalg.norm(x - y, ord=1) 34 | 35 | 36 | def ensure_dir(f): 37 | d = os.path.dirname(f) 38 | if not os.path.exists(d): 39 | os.makedirs(d) 40 | 41 | def checkfrozen(f): 42 | if f == 'tensorflow_frozen': 43 | return 'tensorflow' 44 | else: 45 | return f 46 | 47 | 48 | class CorrectnessTest(unittest.TestCase): 49 | 50 | err_thresh = 0.15 51 | snr_thresh = 12 52 | psnr_thresh = 30 53 | 54 | @classmethod 55 | def setUpClass(cls): 56 | """ Set up the unit test by loading common utilities. 57 | """ 58 | pass 59 | 60 | 61 | def _compare_outputs(self, original_framework, target_framework, network_name, original_predict, converted_predict, need_assert=True): 62 | # Function self.assertEquals has deprecated, change to assertEqual 63 | if (converted_predict is None or original_predict is None) and not need_assert: 64 | return 65 | 66 | # self.assertEqual(original_predict.shape, converted_predict.shape) 67 | original_predict = original_predict.flatten() 68 | converted_predict = converted_predict.flatten() 69 | len1 = original_predict.shape[0] 70 | len2 = converted_predict.shape[0] 71 | length = min(len1, len2) 72 | original_predict = np.sort(original_predict)[::-1] 73 | converted_predict = np.sort(converted_predict)[::-1] 74 | original_predict = original_predict[0:length] 75 | converted_predict = converted_predict[0:length] 76 | error, ind = _compute_max_relative_error(converted_predict, original_predict) 77 | L1_error = _compute_L1_error(converted_predict, original_predict) 78 | SNR, PSNR = _compute_SNR(converted_predict, original_predict) 79 | print("error:", error) 80 | print("L1 error:", L1_error) 81 | print("SNR:", SNR) 82 | print("PSNR:", PSNR) 83 | 84 | if need_assert: 85 | self.assertGreater(SNR, self.snr_thresh, "Error in converting {} from {} to {}".format(network_name, original_framework, target_framework)) 86 | self.assertGreater(PSNR, self.psnr_thresh, "Error in converting {} from {} to {}".format(network_name, original_framework, target_framework)) 87 | self.assertLess(error, self.err_thresh, "Error in converting {} from {} to {}".format(network_name, original_framework, target_framework)) 88 | --------------------------------------------------------------------------------