├── .ccls ├── .eslintrc ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── binding.gyp ├── cc ├── detection.cc ├── detection.h └── main.cc ├── gulpfile.js ├── index.js ├── lib ├── classifier.js ├── detector.js ├── download.js ├── image.js ├── labels.js └── rpc.js ├── model ├── .npmignore ├── group1-shard1of1 └── model.json ├── package-lock.json ├── package.json ├── postinstall.js └── test ├── find-e2e-specs.js ├── fixtures ├── ap-mail.png ├── birds.jpg ├── cart.png ├── folder.png ├── menu.png ├── menu_small.png ├── microphone.png └── screen.jpg ├── model-e2e-specs.js ├── native-e2e-specs.js └── test-android.js /.ccls: -------------------------------------------------------------------------------- 1 | -isystem 2 | /usr/local/include 3 | -isystem 4 | /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1 5 | -isystem 6 | /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/11.0.0/include 7 | -isystem 8 | /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include 9 | -isystem 10 | /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include 11 | -isystem 12 | /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/Frameworks 13 | -I 14 | ./node_modules/node-addon-api 15 | -I 16 | ./node_modules/@tensorflow/tfjs-node/deps/include 17 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "appium" 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | node_modules 3 | test-ai-classifier.code-workspace 4 | model/obj_detection_model 5 | build-js 6 | .vim 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "classifier-proto"] 2 | path = classifier-proto 3 | url = git@github.com:testdotai/classifier-proto.git 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright JS Foundation and other contributors, https://js.foundation 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Test.ai Classifier Server and Appium Plugin 2 | 3 | This is an experimental plugin for [Appium](https://appium.io) that enables 4 | test automation of mobile apps using [Test.ai](https://test.ai)'s 5 | machine-learning element type classifier. It allows you to find Appium elements 6 | using a semantic label (like "cart" or "microphone" or "arrow") instead of 7 | having to dig through your app hierarchy. The same labels can be used to find 8 | elements with the same general shape across different apps and different visual 9 | designs. 10 | 11 | In addition to being a plugin for Appium, this project also contains a small 12 | server that can be run, bundled with clients in various programming languages 13 | that allow the same functionality for [Selenium](https://seleniumhq.org). (See further below) 14 | 15 | If you haven't worked with Appium element finding plugins before, you should 16 | first check out the [Appium element finding plugins 17 | doc](https://github.com/appium/appium/blob/52e5bf1217f08b963136254222ba2ebef428f0d1/docs/en/advanced-concepts/element-finding-plugins.md). 18 | 19 | ## System Setup 20 | 21 | First, you'll need some system dependencies to do with image processing. 22 | 23 | ### macOS 24 | 25 | ``` 26 | brew install pkg-config cairo pango libpng jpeg giflib 27 | ``` 28 | 29 | ### Linux 30 | 31 | ``` 32 | sudo apt-get install pkg-config libcairo2-dev libpango* libpng-dev libjpeg-dev giflib* 33 | ``` 34 | You may have to install each package individually if you run into issues 35 | 36 | ### Windows 37 | 38 | TBD (not yet tested or supported) 39 | 40 | ## Appium Setup 41 | 42 | Appium's element finding plugin feature is experimental, so you will need to be 43 | using Appium version 1.9.2-beta.2 at a minimum. Also, be sure you either using 44 | the XCUITest driver (for iOS) or the UiAutomator2 or Espresso drivers (for 45 | Android). The older iOS and Android drivers do not support the required Appium 46 | capabilities, and are deprecated in any case. 47 | 48 | If you wish to take advantage of the object detection mode for the plugin (see 49 | below), you'll need Appium 1.13.0 or higher. 50 | 51 | ## Classifier Setup 52 | 53 | To make this plugin available to Appium, you have three options: 54 | 55 | 1. Simply go to the directory where Appium is installed (whether a git clone, 56 | or installed in the global `node_modules` directory by NPM), and run `npm 57 | install test-ai-classifier` to install this plugin into Appium's dependency 58 | tree and make it available. 59 | 2. Install it anywhere on your filesystem and use an absolute path as the 60 | module name (see below). 61 | 3. Install it globally (`npm install -g test-ai-classifier`) and make sure your 62 | `NODE_PATH` is set to the global `node_modules` dir. 63 | 64 | ## Usage 65 | 66 | Element finding plugins are made available via a special locator strategy, 67 | `-custom`. To tell Appium which plugin to use when this locator strategy is 68 | requested, send in the module name and a selector shortcut as the 69 | `customFindModules` capability. For example, to use this plugin, set the 70 | `customFindModules` capability to something like `{"ai": "test-ai-classifier"}` 71 | (here `ai` is the "selector shortcut" and `test-ai-classifier` is the "module 72 | name"). This will enable access to the plugin when using selectors of the form 73 | `ai:foo` (or simply `foo` if this is the only custom find module you are using 74 | with Appium). 75 | 76 | In addition to this capability, you'll need to set another Appium capability, 77 | `shouldUseCompactResponses`, to `false`. This directs Appium to include extra 78 | information about elements while they are being found, which dramatically 79 | speeds up the process of getting inputs to this plugin. 80 | 81 | In your test, you can now make new findElement calls, for example: 82 | 83 | ```js 84 | driver.findElement('-custom', 'ai:cart'); 85 | ``` 86 | 87 | The above command (which will differ for each Appium client, of course), will 88 | use this plugin to find a shopping cart element on the screen. 89 | 90 | How did we know we could use "cart" as a label? There is a predefined list of 91 | available labels in `lib/labels.js`--check there to see if the elements you 92 | want to find match any of them. 93 | 94 | ### Match Confidence 95 | 96 | Using the `testaiConfidenceThreshold` capability, you can set a confidence 97 | threshold below which the plugin will refuse to consider elements as matching 98 | your label. This capability should be a number between 0 and 1, where 1 means 99 | confidence must be perfect, and 0 means no confidence at all is required. 100 | 101 | This is a useful capability to set after reading the Appium logs from a failed 102 | element find; this plugin will tell you what the highest confidence of any 103 | element that matched your label was, so you could use that to modulate the 104 | confidence value. The default confidence level is `0.2`. 105 | 106 | ### Element Discovery Modes 107 | 108 | There are two ways that this plugin can attempt to find elements: 109 | 110 | 1. The default mode uses Appium to get a list of all leaf-node elements, and 111 | can be specified by setting the `testaiFindMode` capability to 112 | `element_lookup`. Images of these elements are collected and sent to the 113 | test.ai classifier for labeling. Matched elements are returned to your test 114 | script as full-blown `WebElement`s, just as if you were using any of the 115 | standard Appium locator strategies. 116 | 2. The alternative mode takes a single screenshot, and uses an object detection 117 | network to attempt to identify screen regions of interest. These regions are 118 | then sent into the classifier for labeling. Matched regions are returned to 119 | your test script as Appium ImageElements (meaning that all you can do with 120 | them is click/tap them). This mode can be specified by setting the 121 | `testaiFindMode` capability to `object_detection`. By default, no output is logged from the native object detection code (apart from what TensorFlow itself does), but this can be turned on by setting the `testaiObjDetectionDebug` capability to `true`. 122 | 123 | Each of these modes comes with different benefits and drawbacks: 124 | 125 | #### Pros/cons of `element_lookup` mode 126 | 127 | Element lookup mode returns full-blown elements to your test script, which 128 | means you can perform any standard actions on them. However, leaf-node elements 129 | are not always easy for the classifier to label. For example, in iOS it is 130 | common to have a single element with both an icon and text as part of the 131 | element, and this kind of element will never be labeled with high confidence. 132 | Element lookup mode is also especially slow in cases where there are many 133 | elements. 134 | 135 | #### Pros/cons of `object_detection` mode 136 | 137 | Object detection mode is not limited to actual UI elements, as it deals only 138 | with an image of the screen. So, it can accurately find icons to label even if 139 | those icons are mixed with other content in their UI element form. Object 140 | detection is currently slow, but in principle it is faster (at least in the 141 | limit) than element lookup mode. The main drawback is that elements returned to 142 | your script are really just representations of screen regions, not full-blown 143 | UI elements. So all that can be done with them is clicking/tapping them (of 144 | course, that's typically all you would do with an icon anyway). 145 | 146 | Currently, detecting objects in a screenshot is quite slow. 147 | 148 | Object detection mode relies on C/C++ code which is built on install. This 149 | code is portable but may not compile on some systems. 150 | 151 | ### Model Download 152 | 153 | The TensorFlow network used to run the object detection strategy is provided as a free download by Test.ai, and downloaded automatically on install. If something goes wrong or you want to download it manually, you can run: 154 | 155 | ``` 156 | node ./build-js/lib/download.js 157 | ``` 158 | 159 | This will not re-download the model if the MD5 hash of the model online matches what is currently downloaded. 160 | 161 | ## Classifier Server 162 | 163 | While the functionality provided by this project is available as a plugin for 164 | direct use with Appium, it can also be used for arbitrary purposes. In this 165 | fashion, it must be run as a server, which accepts connections from a client 166 | written in a number of languages. These clients can ask the server to classify 167 | images. The clients also make available a method which takes a Selenium driver 168 | object and finds elements matching a label. 169 | 170 | ### Server Usage 171 | 172 | ``` 173 | test-ai-classifier -h -p 174 | ``` 175 | 176 | The default host is `127.0.0.1` and the default port is `50051`. 177 | 178 | For information on how to use the clients to take advantage of the server's functionality, see the repositories for each of them: 179 | 180 | * [Java client](https://github.com/testdotai/classifier-client-java) 181 | * [Python client](https://github.com/testdotai/classifier-client-python) 182 | * [Node client](https://github.com/testdotai/classifier-client-node) 183 | * [Ruby client](https://github.com/testdotai/classifier-client-ruby) 184 | 185 | There are some limitations to how the Selenium support works, because it relies 186 | on the `getElementScreenshot` functionality, which is not yet supported well by 187 | all the major browsers. (In my testing, Chrome was the most reliable). 188 | 189 | ## Development 190 | 191 | There are some tests, but they must be run ad hoc. See the tests themselves for 192 | assumptions. 193 | -------------------------------------------------------------------------------- /binding.gyp: -------------------------------------------------------------------------------- 1 | { 2 | "targets": [{ 3 | "target_name": "test-ai-classifier", 4 | "cflags!": [ "-fno-exceptions" ], 5 | "cflags_cc!": [ "-fno-exceptions" ], 6 | "sources": [ 7 | "cc/main.cc", 8 | "cc/detection.cc" 9 | ], 10 | 'include_dirs': [ 11 | " 3 | #include 4 | 5 | using namespace std; 6 | 7 | void Deallocator (void* data, size_t size, void* arg) { 8 | TF_DeleteTensor((TF_Tensor *)data); 9 | *(int*)arg = 1; 10 | } 11 | 12 | void printTensorDims (vector tensors) { 13 | for (TF_Tensor* tensor : tensors) { 14 | int dims = TF_NumDims(tensor); 15 | string shape = "["; 16 | for (int j = 0; j < dims; j++) { 17 | shape = shape.append(to_string(TF_Dim(tensor, j))).append(", "); 18 | } 19 | shape = shape.append("]"); 20 | cout << "Shape for tensor: " << shape << endl; 21 | } 22 | } 23 | 24 | ImageBuffer readFile (string file) { 25 | ifstream filedata(file, ifstream::binary); 26 | 27 | if (!filedata) { 28 | return {nullptr, -1}; 29 | } 30 | 31 | filedata.seekg(0, filedata.end); 32 | int len = filedata.tellg(); 33 | filedata.seekg(0, filedata.beg); 34 | 35 | char* buf = new char[len]; 36 | filedata.read(buf, len); 37 | return {buf, len}; 38 | } 39 | 40 | Detection::Detection (string modelPath, string imgPath, float detectThreshold, bool debug) { 41 | this->modelPath = modelPath; 42 | this->imgPath = imgPath; 43 | this->detectThreshold = detectThreshold; 44 | this->debug = debug; // whether or not to print debug output 45 | response.status = -1; // set the detection status to failed by default 46 | } 47 | 48 | void Detection::detect () { 49 | // If any subroutine fails, short-circuit and return, and wait for the caller to call 50 | // getDetectResponse 51 | if (!initSession()) return; 52 | if (!initOperations()) return; 53 | if (!runSession()) return; 54 | cleanup(); 55 | } 56 | 57 | bool Detection::initSession () { 58 | graph = TF_NewGraph(); 59 | status = TF_NewStatus(); 60 | 61 | // "serve" is the magic tag to get the model in the mode we want 62 | vector tags = {"serve"}; 63 | TF_SessionOptions* options = TF_NewSessionOptions(); 64 | 65 | // get the session out of the model path 66 | session = TF_LoadSessionFromSavedModel(options, nullptr, modelPath.c_str(), tags.data(), 1, graph, nullptr, status); 67 | TF_DeleteSessionOptions(options); 68 | 69 | if (TF_GetCode(status) != TF_OK) { 70 | return setErrorWithStatus("Could not load saved model."); 71 | } 72 | 73 | return true; 74 | } 75 | 76 | bool Detection::setErrorWithStatus(string msg) { 77 | response.message = msg + " Message was: " + TF_Message(status); 78 | TF_DeleteStatus(status); 79 | return false; 80 | } 81 | 82 | bool Detection::setError(string msg) { 83 | response.message = msg; 84 | return false; 85 | } 86 | 87 | bool Detection::setImageTensor() { 88 | ImageBuffer image = readFile(imgPath); 89 | if (image.bufferLen == -1) { 90 | return setError("Could not read image data at path provided."); 91 | } 92 | 93 | if (debug) cout << "image data is: " << image.bufferLen << " bytes long" << endl; 94 | 95 | // TF requires that the image data start 8 bytes into the char array 96 | int tensorByteOffset = 8; 97 | 98 | // get the size that TF thinks our image data will take up 99 | size_t encodedSize = TF_StringEncodedSize(image.bufferLen); 100 | 101 | // the total size will be that plus the magic byte offset length 102 | size_t totalSize = encodedSize + tensorByteOffset; 103 | 104 | // zero out the first magic bytes 105 | encodedImage = (char*)malloc(totalSize); 106 | for (int i = 0; i < tensorByteOffset; i++) { 107 | encodedImage[i] = 0; 108 | } 109 | 110 | // direct TF to encode our image data into the encodedImage var, 8 bytes in 111 | TF_StringEncode(image.buffer, image.bufferLen, encodedImage + tensorByteOffset, encodedSize, status); 112 | 113 | if (TF_GetCode(status) != TF_OK) { 114 | setErrorWithStatus("Could not encode image data."); 115 | return false; 116 | } 117 | 118 | if (debug) cout << "constructing tensor from image data" << endl; 119 | 120 | // the image tensor will be 1-dimensional 121 | const int64_t dims[] = {1}; 122 | imageTensor = TF_NewTensor(TF_STRING, dims, 1, encodedImage, totalSize, &Deallocator, 0); 123 | 124 | if (debug) cout << "tensor has " << TF_NumDims(imageTensor) << " dims, and length " 125 | << TF_Dim(imageTensor, 0) << " in the 0th dim" << endl; 126 | 127 | if (imageTensor == nullptr) { 128 | return setError("Could not init image input tensor."); 129 | } 130 | 131 | return true; 132 | } 133 | 134 | bool Detection::initOperations() { 135 | // TF_Output is a struct with a graph operation and an index. Our indexes are all 0 136 | TF_Output input = {TF_GraphOperationByName(graph, "encoded_image_string_tensor"), 0}; 137 | if (input.oper == nullptr) { 138 | return setError("Could not init input operation."); 139 | } 140 | 141 | if (debug) cout << "expected dims of encoded_image_string_tensor: " 142 | << TF_GraphGetTensorNumDims(graph, input, status) << endl; 143 | 144 | sessionArgs.inputOps.push_back(input); 145 | 146 | if (!setImageTensor()) { 147 | return false; 148 | } 149 | 150 | sessionArgs.inputVals.push_back(imageTensor); 151 | 152 | vector outputNames = { 153 | "detection_boxes", 154 | "detection_scores", 155 | "detection_classes", 156 | "num_detections" 157 | }; 158 | 159 | for (string name : outputNames) { 160 | TF_Output output = {TF_GraphOperationByName(graph, name.c_str()), 0}; 161 | if (output.oper == nullptr) { 162 | const string message = "Could not init output graph " + name; 163 | return setError(message); 164 | } 165 | sessionArgs.outputOps.push_back(output); 166 | } 167 | sessionArgs.outputVals = vector(4); 168 | 169 | if (debug) cout << "will run session with input values size " << sessionArgs.inputVals.size() 170 | << " and output values size " << sessionArgs.outputVals.size() 171 | << " and inputs size " << sessionArgs.inputOps.size() 172 | << " and outputs size " << sessionArgs.outputOps.size() << endl; 173 | 174 | return true; 175 | } 176 | 177 | bool Detection::runSession () { 178 | // finally, actually run the session 179 | TF_SessionRun( 180 | session, 181 | nullptr, 182 | sessionArgs.inputOps.data(), 183 | sessionArgs.inputVals.data(), 184 | sessionArgs.inputVals.size(), 185 | sessionArgs.outputOps.data(), 186 | sessionArgs.outputVals.data(), 187 | sessionArgs.outputVals.size(), 188 | nullptr, 189 | 0, 190 | nullptr, 191 | status); 192 | 193 | if (TF_GetCode(status) != TF_OK) { 194 | return setErrorWithStatus("Could not run TF session."); 195 | } 196 | 197 | // close the session afterward 198 | TF_CloseSession(session, status); 199 | if (TF_GetCode(status) != TF_OK) { 200 | return setErrorWithStatus("Could not close TF session cleanly."); 201 | } 202 | 203 | // and delete it too 204 | TF_DeleteSession(session, status); 205 | if (TF_GetCode(status) != TF_OK) { 206 | return setErrorWithStatus("Could not delete TF session cleanly."); 207 | } 208 | 209 | // get the bounds and scores data as flat float arrays from the output tensors 210 | float* boxesData = static_cast(TF_TensorData(sessionArgs.outputVals[0])); 211 | float* scoresData = static_cast(TF_TensorData(sessionArgs.outputVals[1])); 212 | 213 | // the number of detections we found will be the length of the 1st dimension of an output tensor 214 | data.numDetections = TF_Dim(sessionArgs.outputVals[0], 1); 215 | 216 | // use the vector methods to put the raw data into c++ vectors more easy to work with 217 | data.boxes.assign(boxesData, boxesData + data.numDetections); 218 | data.scores.assign(scoresData, scoresData + data.numDetections); 219 | 220 | response.status = 0; // we've gotten all the data out, so status is successful 221 | response.message = "OK"; 222 | return true; 223 | } 224 | 225 | DetectResponse Detection::getDetectResponse (Napi::Env env) { 226 | // if we had a failure, no need to do anything, just return response 227 | if (response.status == -1) { 228 | return response; 229 | } 230 | 231 | // otherwise build node values from response 232 | response.detected = buildNodeValues(env); 233 | return response; 234 | } 235 | 236 | Napi::Array Detection::buildNodeValues (Napi::Env env) { 237 | // data.boxes comes in as a flat array, but every set of 4 values actually corresponds 238 | // to a single detected object. what we want is a 2-dim vector instead. 239 | vector> boxMatrix(data.numDetections, vector(4)); 240 | for (int i = 0; i < data.numDetections; i++) { 241 | int idx = floor(i / 4); 242 | boxMatrix[idx][i % 4] = data.boxes[i]; 243 | } 244 | 245 | Napi::Array detected = Napi::Array::New(env); 246 | int goodDetections = 0; // the number of detections which passed the confidence threshold 247 | for (int i = 0; i < data.numDetections; i++) { 248 | if (data.scores[i] >= detectThreshold) { 249 | if (debug) cout << "Found entity with score of: " << data.scores[i] << ". Its % bounds are: [" 250 | << boxMatrix[i][1] << ", " << boxMatrix[i][0] << "] -> [" << boxMatrix[i][3] 251 | << ", " << boxMatrix[i][2] << "]" << endl; 252 | 253 | // construct a node object with the response data, including bounds and confidence 254 | Napi::Object detection = Napi::Object::New(env); 255 | detection.Set("confidence", data.scores[i]); 256 | detection.Set("xmin", boxMatrix[i][1]); 257 | detection.Set("ymin", boxMatrix[i][0]); 258 | detection.Set("xmax", boxMatrix[i][3]); 259 | detection.Set("ymax", boxMatrix[i][2]); 260 | 261 | // add the object to the array at the index corresponding to goodDetections 262 | // and increment, so we add items to the array without any gaps 263 | detected.Set(goodDetections, detection); 264 | goodDetections++; 265 | } 266 | } 267 | 268 | return detected; 269 | } 270 | 271 | void Detection::cleanup () { 272 | for (TF_Tensor* tensor : sessionArgs.outputVals) { 273 | TF_DeleteTensor(tensor); 274 | } 275 | TF_DeleteGraph(graph); 276 | TF_DeleteStatus(status); 277 | free(encodedImage); // since we 'malloc'ed this earlier 278 | } -------------------------------------------------------------------------------- /cc/detection.h: -------------------------------------------------------------------------------- 1 | #ifndef TF_DETECTION 2 | #define TF_DETECTION 3 | 4 | #include 5 | #include 6 | #include "../node_modules/@tensorflow/tfjs-node/deps/include/tensorflow/c/c_api.h" 7 | 8 | using namespace std; 9 | 10 | // Container for a detection result, which could be a failure with an error message 11 | struct DetectResponse { 12 | // result status: 0 for success, -1 for failure 13 | int status; 14 | 15 | // error message 16 | string message; 17 | 18 | // array of detected object data 19 | Napi::Array detected; 20 | }; 21 | 22 | // Container for the arguments to the TF run session command 23 | struct SessionArgs { 24 | // TF operations representing the inputs 25 | vector inputOps; 26 | 27 | // Actual input tensors 28 | vector inputVals; 29 | 30 | // TF operations representing the outputs 31 | vector outputOps; 32 | 33 | // Empty array of output tensors (will be filled with output data by TF) 34 | vector outputVals; 35 | }; 36 | 37 | // Container for the result of a TF run 38 | struct DetectionData { 39 | // the number of objects detected in the image 40 | int numDetections; 41 | 42 | // flat array of coordinate data of detected objects 43 | // coordinates are in ratio of image height/width 44 | vector boxes; 45 | 46 | // array of score (confidence) data (0-1) 47 | vector scores; 48 | }; 49 | 50 | // Container for the image's data and size 51 | struct ImageBuffer { 52 | char* buffer; 53 | int bufferLen; 54 | }; 55 | 56 | // Function that will be called by TF when it wants to delete a tensor's data 57 | void Deallocator(void* data, size_t size, void* arg); 58 | 59 | // (currently-unused) helper function to print the dimensions of a tensor 60 | void printTensorDims(vector tensors); 61 | 62 | // Get a data buffer from reading a file. Used to get the raw binary data from the image 63 | ImageBuffer readFile(string file); 64 | 65 | // Class representing an attempt to detect objects in an image using TF 66 | class Detection { 67 | public: 68 | // Constructor for the Detection class 69 | Detection (string modelPath, string imgPath, float detectThreshold, bool debug); 70 | 71 | // Run through the detection process and short-circuit if any step fails 72 | void detect (); 73 | 74 | // Get the result of the detection 75 | DetectResponse getDetectResponse (Napi::Env env); 76 | 77 | private: 78 | TF_Graph* graph; // TF graph populated by loading the saved model 79 | TF_Status* status; // TF status object reused in most TF commands 80 | TF_Session* session; // TF session used to run the detection 81 | float detectThreshold; // Confidence (0-1) below which results will be ignored 82 | string modelPath; // Path to the saved TF model 83 | string imgPath; // Path to the image in which to detect objects 84 | DetectionData data; // Data output from the TF session 85 | DetectResponse response; // Container for response to the main process 86 | char* encodedImage; // Image data encoded in TF format for use as input in the graph 87 | TF_Tensor* imageTensor; // Tensor version of the input image 88 | SessionArgs sessionArgs; // Container for arguments to the run session command 89 | bool debug; // Whether or not to show debug output 90 | 91 | // Helper function to set the class-internal response message 92 | bool setError(string msg); 93 | 94 | // Helper function to extract error message from TF status and clean up 95 | bool setErrorWithStatus (string msg); 96 | 97 | // Set up the TF graph and session 98 | bool initSession (); 99 | 100 | // Initialize the inputs and outputs that will be used with the TF session 101 | bool initOperations(); 102 | 103 | // Get binary data from an image path and turn it into a TF tensor 104 | bool setImageTensor (); 105 | 106 | // Run the TF session and collect the output data 107 | bool runSession(); 108 | 109 | // Ensure we clean up objects and free necessary memory 110 | void cleanup(); 111 | 112 | // Turn the TF output into Napi values suitable for sending back to Node 113 | Napi::Array buildNodeValues(Napi::Env env); 114 | }; 115 | 116 | #endif -------------------------------------------------------------------------------- /cc/main.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include "napi.h" 3 | #include "detection.h" 4 | #include "../node_modules/@tensorflow/tfjs-node/deps/include/tensorflow/c/c_api.h" 5 | 6 | Napi::Value Detect(const Napi::CallbackInfo &info) { 7 | Napi::Env env = info.Env(); 8 | if (info.Length() < 3) { 9 | Napi::TypeError::New(env, "You need to pass in the path to a TF model, a path to the image to detect in, and a detection threshold (0-1)").ThrowAsJavaScriptException(); 10 | return env.Null(); 11 | } 12 | if (!info[0].IsString()) { 13 | Napi::TypeError::New(env, "Path to TF model should be a string").ThrowAsJavaScriptException(); 14 | return env.Null(); 15 | } 16 | if (!info[1].IsString()) { 17 | Napi::TypeError::New(env, "Image path should be passed as a string").ThrowAsJavaScriptException(); 18 | return env.Null(); 19 | } 20 | if (!info[2].IsNumber()) { 21 | Napi::TypeError::New(env, "Threshold should be a number").ThrowAsJavaScriptException(); 22 | return env.Null(); 23 | } 24 | Napi::String modelPath = info[0].As(); 25 | Napi::String imgPath = info[1].As(); 26 | Napi::Number detectThreshold = info[2].As(); 27 | 28 | bool debug = false; 29 | 30 | if (info.Length() == 4 && info[3].IsBoolean()) { 31 | debug = info[3].As(); 32 | } 33 | 34 | Detection detection = Detection(modelPath.Utf8Value(), imgPath.Utf8Value(), detectThreshold.FloatValue(), debug); 35 | detection.detect(); 36 | DetectResponse res = detection.getDetectResponse(env); 37 | 38 | if (res.status == 0) { 39 | return res.detected; 40 | } 41 | 42 | Napi::Error::New(env, res.message).ThrowAsJavaScriptException(); 43 | return env.Null(); 44 | } 45 | 46 | Napi::Object Init(Napi::Env env, Napi::Object exports) { 47 | exports.Set(Napi::String::New(env, "TF_VERSION"), Napi::String::New(env, TF_Version())); 48 | exports.Set(Napi::String::New(env, "detect"), Napi::Function::New(env, Detect)); 49 | return exports; 50 | } 51 | 52 | NODE_API_MODULE(testaddon, Init) 53 | -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | 4 | let gulp = require('gulp'), 5 | boilerplate = require('appium-gulp-plugins').boilerplate.use(gulp); 6 | 7 | boilerplate({ 8 | build: 'test-ai-classifier', 9 | transpileOut: 'build-js', 10 | coverage: { 11 | files: ['./test/unit/**/*-specs.js', '!./test/functional/**'], 12 | verbose: true 13 | }, 14 | }); 15 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | import find from './lib/classifier'; 2 | 3 | export { find }; 4 | -------------------------------------------------------------------------------- /lib/classifier.js: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | import { ImageElement } from 'appium-base-driver'; 5 | import { imageFromScreenshot, getCanvasByRect, canvasFromImageData, 6 | elementImageFromScreenshot } from './image'; 7 | import { asyncmap, retry } from 'asyncbox'; 8 | import labels from './labels'; 9 | import * as tf from '@tensorflow/tfjs-node'; 10 | import { detect } from './detector'; 11 | 12 | const DEBUG_IMAGES = process.env.DEBUG_IMAGES || false; // truthy to write out images 13 | const DEBUG_IMAGE_DIR = process.env.DEBUG_IMAGE_DIR || `${process.env.HOME}/elements`; 14 | 15 | const DEFAULT_CONFIDENCE_THRESHOLD = 0.2; 16 | const OBJ_DETECTION_MODE = "object_detection"; 17 | const ELEMENT_LOOKUP_MODE = "element_lookup"; 18 | 19 | const TF_MODEL = path.resolve(__dirname, "..", "..", "model", "model.json"); 20 | 21 | const IMG_CHANNELS = 3; 22 | 23 | let _cached_model = null; 24 | 25 | async function getModel () { 26 | if (!_cached_model) { 27 | _cached_model = await tf.loadGraphModel(`file://${TF_MODEL}`); 28 | } 29 | return _cached_model; 30 | } 31 | 32 | async function tensorFromImage (canvas, height=224, width=224, mean=0, 33 | std=255) { 34 | let t = await tf.browser.fromPixels(canvas, IMG_CHANNELS); 35 | 36 | // convert to grayscale 37 | t = t.mean(2); // average down the r/g/b values 38 | t = tf.stack([t, t, t], 2); // then repeat each monochrome value 3 times to turn it back to rgb 39 | 40 | // change type to floats because we eventually want to normalize values in 41 | // the 0-1 range 42 | t = tf.cast(t, 'float32'); 43 | 44 | // now actually do the normalize 45 | t = tf.div(tf.sub(t, [mean]), [std]); 46 | 47 | // resize the image to the specified height and width 48 | t = tf.image.resizeBilinear(t, [height, width]); 49 | 50 | return t; 51 | } 52 | 53 | async function tensorFromImages (canvases, height=224, width=224, mean=0, std=255) { 54 | const tensors = await asyncmap(canvases, async (canvas) => { 55 | return tensorFromImage(canvas, height, width, mean, std); 56 | }); 57 | return tf.stack(tensors); 58 | } 59 | 60 | async function saveImageFromTensor (tensor, imgFile) { 61 | if (tensor.shape.length === 4) { 62 | // if we have the tensor we get in tensorFromImage, it has an extra dim, so 63 | // squeeze it out 64 | tensor = tensor.squeeze(); 65 | } 66 | const [w, h] = tensor.shape; 67 | const pxArray = await tf.toPixels(tensor); 68 | const cvs = canvasFromImageData(pxArray, w, h); 69 | fs.writeFileSync(imgFile, cvs.toBuffer('image/png')); 70 | } 71 | 72 | async function predictionFromImage (imgData, confidence, labelHint, imgExt = ".png") { 73 | const model = await getModel(); 74 | let t = await tensorFromImage(imgData); 75 | // if we're just finding a prediction for a single image, we need to add 76 | // a dimension on the front end because the model is looking for an array of 77 | // images 78 | t = t.expandDims(0); 79 | 80 | if (DEBUG_IMAGES) { 81 | await saveImageFromTensor(t, path.resolve(DEBUG_IMAGE_DIR, `tensor-for-${labelHint}.${imgExt}`)); 82 | } 83 | let pred = await model.executeAsync({Placeholder: t}, ['final_result']); 84 | pred = pred.squeeze(); 85 | const confMap = getConfidenceMap(await pred.data()); 86 | return predictionFromConfMap(confMap, confidence, labelHint); 87 | } 88 | 89 | async function predictionsFromImages (imgDatas, confidence, labelHint) { 90 | const model = await getModel(); 91 | const tensors = await tensorFromImages(imgDatas); 92 | const predTensors = await model.executeAsync({Placeholder: tensors}, ['final_result']); 93 | let preds = []; 94 | for (let i = 0; i < imgDatas.length; i++) { 95 | const confMapTensor = tf.slice(predTensors, [i, 0], 1).squeeze(); 96 | const confMap = getConfidenceMap(await confMapTensor.data()); 97 | preds.push(predictionFromConfMap(confMap, confidence, labelHint)); 98 | } 99 | return preds; 100 | } 101 | 102 | function predictionFromConfMap (confMap, confidence, desiredLabel) { 103 | // keep track of the confidence for the label the user is looking for so we 104 | // can provide that feedback, if an element is not ultimately found 105 | let confForDesiredLabel = 0; 106 | let onlyDesiredLabel = confMap.filter(i => i[0] === desiredLabel); 107 | if (onlyDesiredLabel.length > 0) { 108 | confForDesiredLabel = onlyDesiredLabel[0][1]; 109 | } 110 | confMap.sort((a, b) => b[1] - a[1]); 111 | 112 | // if the most likely classified label is below our confidence threshold, 113 | // say it's unclassified 114 | let [foundLabel, foundConfidence] = confMap[0]; 115 | if (foundConfidence < confidence) { 116 | foundLabel = "unclassified"; 117 | } 118 | return [foundLabel, confForDesiredLabel, foundConfidence]; 119 | } 120 | 121 | function getConfidenceMap (predArr) { 122 | if (predArr.length !== labels.length) { 123 | throw new Error(`Prediction result array had ${predArr.length} elements ` + 124 | `but labels list had ${labels.length} elements. They ` + 125 | `need to match.`); 126 | } 127 | let map = []; 128 | for (let i = 0; i < labels.length; i++) { 129 | if (labels[i].trim() !== "unclassified") { 130 | map.push([labels[i], predArr[i]]); 131 | } 132 | } 133 | return map; 134 | } 135 | 136 | async function getAllElements (driver, logger) { 137 | // TODO find a more performant way to do this (potentially requiring 138 | // platform-specific calls) 139 | logger.info("Retrieving data for all leaf-node elements on screen"); 140 | 141 | // retry since we can often get a staleelementexception when trying to find 142 | // all elements 143 | const els = await retry(5, driver.findElements.bind(driver), "xpath", "//*[not(child::*)]"); 144 | 145 | return els; 146 | } 147 | 148 | async function getScreenshot (driver, logger) { 149 | logger.info("Getting window size in case we need to scale screenshot"); 150 | const size = await driver.getWindowSize(); 151 | 152 | logger.info("Getting screenshot to use for classifier"); 153 | const screenshot = await driver.getScreenshot(); 154 | 155 | logger.info("Turning screenshot into HTML image for use with canvas"); 156 | const {canvas, image, imageData} = await imageFromScreenshot(screenshot, size, logger); 157 | 158 | return {canvas, image, imageData, size}; 159 | } 160 | 161 | async function getElementImages (els, screenshotImg, logger) { 162 | // keep a cache of images based on the rect that defines them, so we don't 163 | // make image slices we've already made before if elements have the same 164 | // rect 165 | let elImgCache = []; 166 | 167 | // match up each element with its slice of the screenshot 168 | logger.info("Getting screenshot slices for each element"); 169 | const elsAndImages = await asyncmap(els, async (e) => { 170 | // if we've already got the image slice in the cache, return it 171 | const existingCanvas = getCanvasByRect(elImgCache, e.rect); 172 | if (existingCanvas) { 173 | return [e, existingCanvas]; 174 | } 175 | 176 | // otherwise actually do the slicing, get the image, and add it to the 177 | // cache before returning it 178 | let res; 179 | try { 180 | res = await elementImageFromScreenshot(e, screenshotImg); 181 | if (DEBUG_IMAGES) { 182 | const imgFile = `${DEBUG_IMAGE_DIR}/element-(${e.rect.x}, ${e.rect.y}) ` + 183 | `[${e.rect.width} x ${e.rect.height}].png`; 184 | fs.writeFileSync(imgFile, res.canvas.toBuffer('image/png')); 185 | } 186 | } catch (err) { 187 | logger.warn(`Could not get element image from screenshot; its rect was ` + 188 | `${JSON.stringify(e.rect)}. Original err: ${err}`); 189 | return false; 190 | } 191 | 192 | elImgCache.push(res); 193 | return [e, res.canvas]; 194 | }); 195 | 196 | // filter out any elements for whom we could not extract images 197 | return elsAndImages.filter(Boolean); 198 | } 199 | 200 | async function getMatchingElements ({ 201 | elsAndImages, 202 | label, 203 | confidence, 204 | allowWeakerMatches, 205 | logger, 206 | returnMetadata = false 207 | }) { 208 | // turn each screenshot slice into a label prediction, still linked up with 209 | // the appium element 210 | logger.info("Making label predictions based on element images"); 211 | const start = Date.now(); 212 | const preds = await predictionsFromImages(elsAndImages.map(ei => ei[1]), confidence, label); 213 | const elapsed = Date.now() - start; 214 | logger.info(`Predictions for ${elsAndImages.length} element(s) took ${elapsed / 1000} seconds`); 215 | let elsAndPreds = []; 216 | for (let i = 0; i < elsAndImages.length; i++) { 217 | elsAndPreds.push([elsAndImages[i][0], ...preds[i]]); 218 | } 219 | 220 | // make sure we sort elsAndPreds by the confidence for the desired label; it 221 | // could be sorted by highest confidence in general, but we care most about 222 | // the confidence ranking for the label in question 223 | elsAndPreds.sort((a, b) => b[2] - a[2]); 224 | 225 | let matchingEls; 226 | if (allowWeakerMatches) { 227 | // if the user has elected to allow weaker matches, return any matches 228 | // whose conf for desired label is above the threshold, regardless of 229 | // whether it was the strongest match for that element 230 | matchingEls = elsAndPreds.filter(ep => ep[2] >= confidence); 231 | } else { 232 | // otherwise, get rid of any elements whose top label prediction doesn't 233 | // match what the user has requested 234 | matchingEls = elsAndPreds.filter(ep => ep[1] === label); 235 | } 236 | logger.info(`Found ${matchingEls.length} matching elements`); 237 | 238 | // short-circuit if we found no matching elements 239 | if (matchingEls.length < 1) { 240 | return []; 241 | } 242 | 243 | // sort the matching elements by confidence 244 | matchingEls.sort((a, b) => b[2] - a[2]); 245 | 246 | logger.info(`Highest confidence of any element for desired label ` + 247 | `'${label}' was ${matchingEls[0][2]}`); 248 | 249 | // if we're only returning elements because we found a match that was weaker 250 | // than some other label, let the user know 251 | if (matchingEls[0][1] !== label) { 252 | logger.warn(`Found element whose confidence for label '${label}' is above ` + 253 | `the confidence threshold, but the most likely label for this ` + 254 | `element is actually '${matchingEls[0][1]}'. Proceeding ` + 255 | `nonetheless since allowWeakerMatches is true.`); 256 | } 257 | 258 | // if the caller wants all the confidence data, send everything back 259 | if (returnMetadata) { 260 | return matchingEls; 261 | } 262 | 263 | // otherwise just return matching elements (letting appium decide whether to return one 264 | // or more to the user) 265 | return matchingEls.map(ep => ep[0]); 266 | } 267 | 268 | function getConfidenceThreshold (driver, logger) { 269 | let confidence = DEFAULT_CONFIDENCE_THRESHOLD; 270 | const confCap = driver.opts.testaiConfidenceThreshold; 271 | if (confCap) { 272 | if (!_.isNumber(confCap) || confCap < 0 || confCap > 1) { 273 | throw new Error(`The 'testaiConfidenceThreshold' capability must be a ` + 274 | `number between 0 and 1`); 275 | } 276 | confidence = confCap; 277 | logger.info(`Setting confidence threshold to overridden value of ${confCap}`); 278 | } else { 279 | logger.info(`Setting confidence threshold to default value of ${DEFAULT_CONFIDENCE_THRESHOLD}`); 280 | } 281 | return confidence; 282 | } 283 | 284 | async function findViaElementScreenshots (driver, logger, label, allowWeakerMatches) { 285 | // first make sure that we can get the 'rect' setting with the element 286 | // response, so we don't have to make additional queries to appium for that 287 | // information 288 | logger.info("Retrieving current settings to check element response attributes"); 289 | const curSetting = (await driver.getSettings()).elementResponseAttributes; 290 | const needToChangeSetting = !curSetting || curSetting.indexOf("rect") === -1; 291 | 292 | if (needToChangeSetting) { 293 | logger.info("We will need to update settings to include element response " + 294 | "attributes"); 295 | await driver.updateSettings({elementResponseAttributes: "rect"}); 296 | } 297 | 298 | const confidence = getConfidenceThreshold(driver, logger); 299 | 300 | try { 301 | const els = await getAllElements(driver, logger); 302 | const {image} = await getScreenshot(driver, logger); 303 | const elsAndImages = await getElementImages(els, image, logger); 304 | return await getMatchingElements({elsAndImages, label, confidence, allowWeakerMatches, logger}); 305 | } finally { 306 | // always clean up setting after the find 307 | if (needToChangeSetting) { 308 | logger.info(`Resetting element response attribute setting to original ` + 309 | `value: ${JSON.stringify(curSetting)}`); 310 | await driver.updateSettings({elementResponseAttributes: curSetting}); 311 | } 312 | } 313 | } 314 | 315 | async function findByObjectDetector (driver, logger, label, allowWeakerMatches) { 316 | logger.info("Will use object detection method for finding elements via AI"); 317 | const confidence = getConfidenceThreshold(driver, logger); 318 | 319 | const {image, imageData, size} = await getScreenshot(driver, logger); 320 | 321 | // TODO don't hardcode screenshot path; instead we want to find a way to pass 322 | // an image data buffer to the native c code 323 | const screenshotPath = path.resolve(__dirname, `${new Date().toUTCString()}_classifiertest.png`); 324 | logger.info(`Writing screenshot to ${screenshotPath} for use in object detection`); 325 | fs.writeFileSync(screenshotPath, imageData); 326 | 327 | // TODO don't hardcode threshold, allow to come from a setting 328 | logger.info("Gathering object candidates from tensorflow native"); 329 | const candidates = await detect(screenshotPath, driver.opts.testaiObjDetectionThreshold || 0.95, driver.opts.testaiObjDetectionDebug); 330 | logger.info("Candidates retrieved"); 331 | // we need to turn the obj detection candidates into objects that look 332 | // somewhat like actual elements with a 'rect' property 333 | let els = candidates.map((c) => { 334 | const {width: w, height: h} = size; 335 | const [xmin, ymin, xmax, ymax] = [c.xmin * w, c.ymin * h, c.xmax * w, c.ymax * h]; 336 | return {rect: {x: xmin, y: ymin, width: xmax - xmin, height: ymax - ymin}}; 337 | }); 338 | 339 | const elsAndImages = await asyncmap(els, async (el) => { 340 | const img = await elementImageFromScreenshot(el, image); 341 | if (DEBUG_IMAGES) { 342 | const imgFile = `${DEBUG_IMAGE_DIR}/element-(${el.rect.x}, ${el.rect.y}) ` + 343 | `[${el.rect.width} x ${el.rect.height}].png`; 344 | fs.writeFileSync(imgFile, img.canvas.toBuffer('image/png')); 345 | } 346 | return [el, img.canvas]; 347 | }); 348 | 349 | for (let i = 0; i < elsAndImages.length; i++) { 350 | // add an id to both the fake element and the image 351 | elsAndImages[i][0]._id = i; 352 | elsAndImages[i][1]._id = i; 353 | } 354 | 355 | els = await getMatchingElements({elsAndImages, label, confidence, allowWeakerMatches, logger}); 356 | logger.info("Registering image elements with driver"); 357 | const imageEls = els.map((el) => { 358 | const canvases = elsAndImages.map((ci) => ci[1]).filter((i) => i._id === el._id); 359 | if (canvases.length < 1) { 360 | throw new Error(`Could not find canvas image to map to element with fake id ${el._id}`); 361 | } 362 | 363 | const b64Template = canvases[0].toBuffer('image/png').toString('base64'); 364 | const imgEl = new ImageElement(b64Template, el.rect); 365 | return driver.registerImageElement(imgEl); 366 | }); 367 | 368 | return imageEls; 369 | } 370 | 371 | async function find (driver, logger, label, /* multiple */) { 372 | const allowWeakerMatches = driver.opts.testaiAllowWeakerMatches; 373 | const mode = driver.opts.testaiFindMode; 374 | if (mode === OBJ_DETECTION_MODE) { 375 | return await findByObjectDetector(driver, logger, label, allowWeakerMatches); 376 | } else if (!mode || mode === ELEMENT_LOOKUP_MODE) { 377 | return await findViaElementScreenshots(driver, logger, label, allowWeakerMatches); 378 | } else { 379 | throw new Error(`Invalid testAiFindMode '${mode}'. Must be one of: ` + 380 | `'${OBJ_DETECTION_MODE}', '${ELEMENT_LOOKUP_MODE}'`); 381 | } 382 | } 383 | 384 | export { getModel, tensorFromImage, predictionFromImage, saveImageFromTensor, 385 | tensorFromImages, getMatchingElements, predictionsFromImages, 386 | DEFAULT_CONFIDENCE_THRESHOLD }; 387 | export default find; 388 | -------------------------------------------------------------------------------- /lib/detector.js: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | 3 | // need to initialize tfjs before calling the classifier bindings, otherwise it 4 | // cannot find the tf c lib 5 | import '@tensorflow/tfjs-node'; 6 | 7 | const { TF_VERSION, detect: _detect } = require('bindings')('test-ai-classifier'); 8 | 9 | const MODEL = path.resolve(__dirname, "..", "..", "model", "obj_detection_model"); 10 | 11 | function detect (imgPath, confidence = 0.95, debug = false) { 12 | return _detect(MODEL, imgPath, confidence, debug); 13 | } 14 | 15 | export { TF_VERSION, detect }; 16 | -------------------------------------------------------------------------------- /lib/download.js: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | import request from 'request-promise'; 3 | import { mkdirp, fs } from 'appium-support'; 4 | 5 | const log = console.log; // eslint-disable-line no-console 6 | 7 | const MODEL_URL = "https://data.test.ai/appium-plugin/object-detection-model.pb"; 8 | const MODEL_MD5 = "365e4be71e9b31ab8408b20e5fb90da6"; 9 | const MODEL_DIR = path.resolve(__dirname, '..', '..', 'model', 'obj_detection_model'); 10 | const MODEL = path.resolve(MODEL_DIR, 'saved_model.pb'); 11 | 12 | export async function downloadObjDetectionModel (overwrite = false) { 13 | log(`Will download object detection model from remote host`); 14 | log(`Checking whether ${MODEL_DIR} exists...`); 15 | await mkdirp(MODEL_DIR); 16 | if (await fs.exists(MODEL)) { 17 | if ((await fs.md5(MODEL)) === MODEL_MD5) { 18 | log('Model matches md5 hash, will not re-download'); 19 | return; 20 | } 21 | 22 | if (!overwrite) { 23 | log(`${MODEL} already exists and we did not specify overwrite, not re-downloading`); 24 | return; 25 | } 26 | 27 | log(`Model already exists, but will re-download`); 28 | } 29 | 30 | log(`Downloading model from ${MODEL_URL}...`); 31 | const body = await request.get({url: MODEL_URL, encoding: 'binary'}); 32 | log(`Writing binary content to ${MODEL}...`); 33 | await fs.writeFile(MODEL, body, {encoding: 'binary'}); 34 | await fs.chmod(MODEL, 0o0755); 35 | log(`Download complete, verifying hash`); 36 | const dlMd5 = await fs.md5(MODEL); 37 | if (dlMd5 === MODEL_MD5) { 38 | log('Downloaded file content verified'); 39 | } else { 40 | throw new Error(`Could not verify downloaded file. Downloaded file had ` + 41 | `md5 hash ${dlMd5} but we expected ${MODEL_MD5}`); 42 | } 43 | } 44 | 45 | if (module === require.main) { 46 | downloadObjDetectionModel(true).catch((err) => { // eslint-disable-line promise/prefer-await-to-callbacks 47 | log(err.stack); // eslint 48 | process.exit(1); 49 | }); 50 | } 51 | -------------------------------------------------------------------------------- /lib/image.js: -------------------------------------------------------------------------------- 1 | import { createCanvas, createImageData, loadImage } from 'canvas'; 2 | 3 | async function canvasFromImage (imgData) { 4 | let img = await loadImage(imgData); 5 | let cvs = createCanvas(img.width, img.height); 6 | let ctx = cvs.getContext('2d'); 7 | ctx.drawImage(img, 0, 0, img.width, img.height); 8 | return cvs; 9 | } 10 | 11 | function canvasFromImageData (imgData, width, height) { 12 | const cvs = createCanvas(width, height); 13 | const data = createImageData(imgData, width, height); 14 | const ctx = cvs.getContext('2d'); 15 | ctx.putImageData(data, 0, 0); 16 | return cvs; 17 | } 18 | 19 | async function imageFromScreenshot (screenshot, size, logger) { 20 | let image = await loadImage(Buffer.from(screenshot, 'base64')); 21 | const canvas = createCanvas(size.width, size.height); 22 | const ctx = canvas.getContext('2d'); 23 | 24 | ctx.drawImage(image, 0, 0, image.width, image.height, 0, 0, size.width, size.height); 25 | 26 | const imageData = canvas.toBuffer('image/png'); 27 | 28 | if (image.width !== size.width) { 29 | logger.info(`Screenshot and screen size did not match. Screen size is ` + 30 | `${size.width}x${size.height} but screenshot size is ` + 31 | `${image.width}x${image.height}. Scaled screenshot to match screen size`); 32 | image = await loadImage(imageData); 33 | } 34 | return {canvas, image, imageData}; 35 | } 36 | 37 | async function elementImageFromScreenshot (el, screenshotImg) { 38 | if (!el.rect) { 39 | throw new Error(`Unable to retrieve rect of element. Ensure that your ` + 40 | `Appium session has set the shouldUseCompactResponses ` + 41 | `capability to false`); 42 | } 43 | 44 | if (el.rect.width === 0 || el.rect.height === 0) { 45 | throw new Error("Element had a width or height of zero; cannot slice " + 46 | "such an image"); 47 | } 48 | 49 | const {x, y, width, height} = el.rect; 50 | let canvas = createCanvas(width, height); 51 | let ctx = canvas.getContext('2d'); 52 | ctx.drawImage(screenshotImg, x, y, width, height, 0, 0, width, height); 53 | return {canvas, x, y, width, height}; 54 | } 55 | 56 | function getCanvasByRect (cache, rect) { 57 | const {x, y, width, height} = rect; 58 | const existingCanvas = cache.filter((ei) => { 59 | return ei.x === x && ei.y === y && 60 | ei.width === width && ei.height === height; 61 | })[0]; 62 | 63 | if (existingCanvas) { 64 | return existingCanvas.canvas; 65 | } 66 | } 67 | 68 | export { canvasFromImage, imageFromScreenshot, elementImageFromScreenshot, 69 | canvasFromImageData, getCanvasByRect }; 70 | -------------------------------------------------------------------------------- /lib/labels.js: -------------------------------------------------------------------------------- 1 | const LABELS = [ 2 | "unclassified", 3 | "add", 4 | "airplane", 5 | "alarm", 6 | "arrow down", 7 | "arrow left", 8 | "arrow right", 9 | "arrow up", 10 | "attach", 11 | "bag", 12 | "barcode", 13 | "battery", 14 | "bluetooth", 15 | "bookmark", 16 | "brightness", 17 | "calculator", 18 | "calendar", 19 | "call", 20 | "camera", 21 | "car", 22 | "cart", 23 | "chart", 24 | "check mark", 25 | "clock", 26 | "close", 27 | "cloud", 28 | "computer", 29 | "contrast", 30 | "credit card", 31 | "crop", 32 | "cursor", 33 | "cut", 34 | "dashboard", 35 | "delete", 36 | "dollar", 37 | "download", 38 | "edit", 39 | "external link", 40 | "eye", 41 | "fab", 42 | "facebook", 43 | "fast forward", 44 | "favorite", 45 | "file", 46 | "filter", 47 | "fingerprint", 48 | "fire", 49 | "flag", 50 | "flashlight", 51 | "folder", 52 | "gift", 53 | "globe", 54 | "gmail", 55 | "google", 56 | "grid", 57 | "headphones", 58 | "home", 59 | "inbox", 60 | "info", 61 | "laptop", 62 | "light bulb", 63 | "link", 64 | "location", 65 | "lock", 66 | "mail", 67 | "map", 68 | "maximize", 69 | "megaphone", 70 | "menu", 71 | "microphone", 72 | "minimize", 73 | "mobile", 74 | "moon", 75 | "music", 76 | "mute", 77 | "notifications", 78 | "overflow menu", 79 | "pinterest", 80 | "play", 81 | "printer", 82 | "profile avatar", 83 | "qr code", 84 | "question", 85 | "refresh", 86 | "reply", 87 | "rewind", 88 | "save", 89 | "search", 90 | "send", 91 | "settings", 92 | "share", 93 | "signal", 94 | "sort", 95 | "tag", 96 | "television", 97 | "thumbs up", 98 | "ticket", 99 | "trash", 100 | "trophy", 101 | "twitter", 102 | "unlock", 103 | "upload", 104 | "user", 105 | "video camera", 106 | "volume", 107 | "warning" 108 | ]; 109 | 110 | export default LABELS; 111 | -------------------------------------------------------------------------------- /lib/rpc.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import _ from 'lodash'; 4 | import yargs from 'yargs'; 5 | import path from 'path'; 6 | import grpc from 'grpc'; 7 | import npmlog from 'npmlog'; 8 | import { asyncmap } from 'asyncbox'; 9 | 10 | import { canvasFromImage } from './image'; 11 | import { getMatchingElements } from './classifier'; 12 | 13 | const PROTO = path.resolve(__dirname, '..', '..', 'classifier-proto', 'classifier.proto'); 14 | const DEFAULT_PORT = 50051; 15 | const DEFAULT_HOST = "0.0.0.0"; 16 | 17 | const log = new Proxy({}, { 18 | get (target, name) { 19 | return function (...args) { 20 | npmlog[name]('ai-rpc', ...args); 21 | }; 22 | } 23 | }); 24 | 25 | export function main (host = DEFAULT_HOST, port = DEFAULT_PORT) { 26 | const server = new grpc.Server(); 27 | const protoLoader = require('@grpc/proto-loader'); 28 | const packageDef = protoLoader.loadSync(PROTO, { 29 | keepCase: true, 30 | defaults: true, 31 | oneofs: true 32 | }); 33 | const protoDesc = grpc.loadPackageDefinition(packageDef); 34 | server.addService(protoDesc.Classifier.service, { 35 | classifyElements 36 | }); 37 | server.bind(`${host}:${port}`, grpc.ServerCredentials.createInsecure()); 38 | server.start(); 39 | log.info(`Classification RPC server started on ${host}:${port}`); 40 | return server; 41 | } 42 | 43 | async function classifyElements (call, cb) { // eslint-disable-line promise/prefer-await-to-callbacks 44 | const { 45 | labelHint, 46 | elementImages, 47 | confidenceThreshold, 48 | allowWeakerMatches 49 | } = call.request; 50 | log.info(`Classifying ${_.size(elementImages)} elements with desired label ${labelHint}`); 51 | log.info(`Using threshold ${confidenceThreshold}`); 52 | if (allowWeakerMatches) { 53 | log.info('Elements whose most likely classification does not match the ' + 54 | 'label hint will be included in the response'); 55 | } 56 | 57 | const classifications = {}; 58 | try { 59 | // TODO implementation 60 | const elsAndImages = await asyncmap(_.keys(elementImages), async (k) => { 61 | return [k, await canvasFromImage(elementImages[k])]; 62 | }); 63 | const matchingEls = await getMatchingElements({ 64 | elsAndImages, 65 | label: labelHint, 66 | confidence: confidenceThreshold, 67 | allowWeakerMatches, 68 | logger: log, 69 | returnMetadata: true 70 | }); 71 | for (const [elId, label, confidenceForHint, confidence] of matchingEls) { 72 | classifications[elId] = {label, confidenceForHint, confidence}; 73 | } 74 | } catch (err) { 75 | return cb(err); // eslint-disable-line promise/prefer-await-to-callbacks 76 | } 77 | 78 | cb(null, {classifications}); // eslint-disable-line promise/prefer-await-to-callbacks 79 | } 80 | 81 | if (module === require.main) { 82 | const { argv } = yargs 83 | .option('host', { 84 | alias: 'h', 85 | type: 'string', 86 | description: 'Host address to bind to', 87 | }) 88 | .option('port', { 89 | alias: 'p', 90 | type: 'string', 91 | description: 'Port to listen on', 92 | }); 93 | main(argv.host, argv.port); 94 | } 95 | -------------------------------------------------------------------------------- /model/.npmignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | obj_detection_model 3 | -------------------------------------------------------------------------------- /model/group1-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/model/group1-shard1of1 -------------------------------------------------------------------------------- /model/model.json: -------------------------------------------------------------------------------- 1 | {"modelTopology":{"node":[{"input":[],"attr":{"dtype":{"type":1},"shape":{"shape":{"dim":[{"size":"-1"},{"size":"224"},{"size":"224"},{"size":"3"}]}}},"name":"Placeholder","op":"Placeholder"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[2],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[]}}},"dtype":{"type":1}},"name":"module_apply_default/hub_input/Mul/y","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[1],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[]}}},"dtype":{"type":1}},"name":"module_apply_default/hub_input/Sub/y","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"3"},{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_0/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_0/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_0/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_0/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_0/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"16"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"16"}]}}}},"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"16"},{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}}},"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"32"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}}},"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"32"},{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}}},"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"64"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"64"},{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}}},"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"64"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"64"},{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"128"},{"size":"1"}]}}}},"name":"module/MobilenetV1/Conv2d_5_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}}},"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}}},"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"128"},{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"128"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"128"}]}}}},"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"128"},{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_pointwise/weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_8_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}}},"name":"module/MobilenetV1/Conv2d_9_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_10_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_11_pointwise/weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"256"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}}},"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"256"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"256"},{"size":"512"}]}}}},"name":"module/MobilenetV1/Conv2d_12_pointwise/weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}}},"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}}},"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"512"},{"size":"1"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_depthwise/depthwise_weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"1"},{"size":"1"},{"size":"512"},{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_pointwise/weights","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/gamma","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}}},"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/beta","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}}},"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_mean","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"}]}}},"dtype":{"type":1}},"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_variance","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":3,"tensorShape":{"dim":[{"size":"2"}]}}},"dtype":{"type":3}},"name":"module_apply_default/MobilenetV1/Logits/global_pool/reduction_indices","op":"Const"},{"input":[],"attr":{"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"512"},{"size":"106"}]}}},"dtype":{"type":1}},"name":"final_retrain_ops/weights/final_weights","op":"Const"},{"input":[],"attr":{"dtype":{"type":1},"value":{"tensor":{"floatVal":[],"doubleVal":[],"intVal":[],"stringVal":[],"scomplexVal":[],"int64Val":[],"boolVal":[],"uint32Val":[],"uint64Val":[],"dtype":1,"tensorShape":{"dim":[{"size":"106"}]}}}},"name":"final_retrain_ops/biases/final_biases","op":"Const"},{"input":["Placeholder","module_apply_default/hub_input/Mul/y"],"attr":{"T":{"type":1}},"name":"module_apply_default/hub_input/Mul","op":"Mul"},{"input":["module_apply_default/hub_input/Mul","module_apply_default/hub_input/Sub/y"],"attr":{"T":{"type":1}},"name":"module_apply_default/hub_input/Sub","op":"Sub"},{"input":["module_apply_default/hub_input/Sub","module/MobilenetV1/Conv2d_0/weights"],"attr":{"strides":{"list":{"s":[],"i":["1","2","2","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D","module/MobilenetV1/Conv2d_0/BatchNorm/gamma","module/MobilenetV1/Conv2d_0/BatchNorm/beta","module/MobilenetV1/Conv2d_0/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_0/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_0/Relu6","module/MobilenetV1/Conv2d_1_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/depthwise","module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6","module/MobilenetV1/Conv2d_1_pointwise/weights"],"attr":{"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D","module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6","module/MobilenetV1/Conv2d_2_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","2","2","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/depthwise","module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6","module/MobilenetV1/Conv2d_2_pointwise/weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Conv2D","module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6","module/MobilenetV1/Conv2d_3_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/depthwise","module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6","module/MobilenetV1/Conv2d_3_pointwise/weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Conv2D","module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6","module/MobilenetV1/Conv2d_4_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","2","2","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/depthwise","module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6","module/MobilenetV1/Conv2d_4_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Conv2D","module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6","module/MobilenetV1/Conv2d_5_depthwise/depthwise_weights"],"attr":{"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/depthwise","module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6","module/MobilenetV1/Conv2d_5_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Conv2D","module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6","module/MobilenetV1/Conv2d_6_depthwise/depthwise_weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","2","2","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/depthwise","module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6","module/MobilenetV1/Conv2d_6_pointwise/weights"],"attr":{"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Conv2D","module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6","module/MobilenetV1/Conv2d_7_depthwise/depthwise_weights"],"attr":{"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/depthwise","module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6","module/MobilenetV1/Conv2d_7_pointwise/weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Conv2D","module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6","module/MobilenetV1/Conv2d_8_depthwise/depthwise_weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/depthwise","module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6","module/MobilenetV1/Conv2d_8_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Conv2D","module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6","module/MobilenetV1/Conv2d_9_depthwise/depthwise_weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/depthwise","module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6","module/MobilenetV1/Conv2d_9_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Conv2D","module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6","module/MobilenetV1/Conv2d_10_depthwise/depthwise_weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/depthwise","module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6","module/MobilenetV1/Conv2d_10_pointwise/weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Conv2D","module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6","module/MobilenetV1/Conv2d_11_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/depthwise","module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_variance"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6","module/MobilenetV1/Conv2d_11_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Conv2D","module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6","module/MobilenetV1/Conv2d_12_depthwise/depthwise_weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","2","2","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/depthwise","module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6","module/MobilenetV1/Conv2d_12_pointwise/weights"],"attr":{"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"use_cudnn_on_gpu":{"b":true},"padding":{"s":[83,65,77,69]}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Conv2D","module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6","module/MobilenetV1/Conv2d_13_depthwise/depthwise_weights"],"attr":{"T":{"type":1},"data_format":{"s":[78,72,87,67]},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/depthwise","op":"DepthwiseConv2dNative"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/depthwise","module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/beta","module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_variance"],"attr":{"epsilon":{"f":0.0010000000474974513},"T":{"type":1},"data_format":{"s":[78,72,87,67]},"is_training":{"b":false}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6","module/MobilenetV1/Conv2d_13_pointwise/weights"],"attr":{"padding":{"s":[83,65,77,69]},"dilations":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"T":{"type":1},"strides":{"list":{"s":[],"i":["1","1","1","1"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}},"data_format":{"s":[78,72,87,67]},"use_cudnn_on_gpu":{"b":true}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Conv2D","op":"Conv2D"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Conv2D","module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/gamma","module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/beta","module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_mean","module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_variance"],"attr":{"data_format":{"s":[78,72,87,67]},"is_training":{"b":false},"epsilon":{"f":0.0010000000474974513},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/BatchNorm/FusedBatchNorm","op":"FusedBatchNorm"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/BatchNorm/FusedBatchNorm"],"attr":{"T":{"type":1}},"name":"module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6","op":"Relu6"},{"input":["module_apply_default/MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6","module_apply_default/MobilenetV1/Logits/global_pool/reduction_indices"],"attr":{"Tidx":{"type":3},"keep_dims":{"b":true},"T":{"type":1}},"name":"module_apply_default/MobilenetV1/Logits/global_pool","op":"Mean"},{"input":["module_apply_default/MobilenetV1/Logits/global_pool"],"attr":{"T":{"type":1},"squeeze_dims":{"list":{"s":[],"i":["1","2"],"f":[],"b":[],"type":[],"shape":[],"tensor":[],"func":[]}}},"name":"module_apply_default/hub_output/feature_vector/SpatialSqueeze","op":"Squeeze"},{"input":["module_apply_default/hub_output/feature_vector/SpatialSqueeze"],"attr":{"dtype":{"type":1},"shape":{"shape":{"dim":[{"size":"-1"},{"size":"512"}]}}},"name":"input/BottleneckInputPlaceholder","op":"PlaceholderWithDefault"},{"input":["input/BottleneckInputPlaceholder","final_retrain_ops/weights/final_weights"],"attr":{"transpose_a":{"b":false},"transpose_b":{"b":false},"T":{"type":1}},"name":"final_retrain_ops/Wx_plus_b/MatMul","op":"MatMul"},{"input":["final_retrain_ops/Wx_plus_b/MatMul","final_retrain_ops/biases/final_biases"],"attr":{"T":{"type":1}},"name":"final_retrain_ops/Wx_plus_b/add","op":"Add"},{"input":["final_retrain_ops/Wx_plus_b/add"],"attr":{"T":{"type":1}},"name":"final_result","op":"Softmax"}],"library":{"function":[],"gradient":[]},"versions":{"badConsumers":[]}},"weightsManifest":[{"paths":["group1-shard1of1"],"weights":[{"dtype":"float32","shape":[],"name":"module_apply_default/hub_input/Mul/y"},{"dtype":"float32","shape":[],"name":"module_apply_default/hub_input/Sub/y"},{"dtype":"float32","shape":[3,3,3,16],"name":"module/MobilenetV1/Conv2d_0/weights"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_0/BatchNorm/gamma"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_0/BatchNorm/beta"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_0/BatchNorm/moving_mean"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_0/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,16,1],"name":"module/MobilenetV1/Conv2d_1_depthwise/depthwise_weights"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[16],"name":"module/MobilenetV1/Conv2d_1_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,16,32],"name":"module/MobilenetV1/Conv2d_1_pointwise/weights"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_1_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,32,1],"name":"module/MobilenetV1/Conv2d_2_depthwise/depthwise_weights"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[32],"name":"module/MobilenetV1/Conv2d_2_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,32,64],"name":"module/MobilenetV1/Conv2d_2_pointwise/weights"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_2_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,64,1],"name":"module/MobilenetV1/Conv2d_3_depthwise/depthwise_weights"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,64,64],"name":"module/MobilenetV1/Conv2d_3_pointwise/weights"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_3_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,64,1],"name":"module/MobilenetV1/Conv2d_4_depthwise/depthwise_weights"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[64],"name":"module/MobilenetV1/Conv2d_4_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,64,128],"name":"module/MobilenetV1/Conv2d_4_pointwise/weights"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_4_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,128,1],"name":"module/MobilenetV1/Conv2d_5_depthwise/depthwise_weights"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,128,128],"name":"module/MobilenetV1/Conv2d_5_pointwise/weights"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_5_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,128,1],"name":"module/MobilenetV1/Conv2d_6_depthwise/depthwise_weights"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[128],"name":"module/MobilenetV1/Conv2d_6_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,128,256],"name":"module/MobilenetV1/Conv2d_6_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_6_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_7_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,256],"name":"module/MobilenetV1/Conv2d_7_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_7_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_8_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,256],"name":"module/MobilenetV1/Conv2d_8_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_8_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_9_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,256],"name":"module/MobilenetV1/Conv2d_9_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_9_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_10_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,256],"name":"module/MobilenetV1/Conv2d_10_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_10_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_11_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,256],"name":"module/MobilenetV1/Conv2d_11_pointwise/weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_11_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,256,1],"name":"module/MobilenetV1/Conv2d_12_depthwise/depthwise_weights"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[256],"name":"module/MobilenetV1/Conv2d_12_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,256,512],"name":"module/MobilenetV1/Conv2d_12_pointwise/weights"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_12_pointwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[3,3,512,1],"name":"module/MobilenetV1/Conv2d_13_depthwise/depthwise_weights"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/gamma"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/beta"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_depthwise/BatchNorm/moving_variance"},{"dtype":"float32","shape":[1,1,512,512],"name":"module/MobilenetV1/Conv2d_13_pointwise/weights"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/gamma"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/beta"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_mean"},{"dtype":"float32","shape":[512],"name":"module/MobilenetV1/Conv2d_13_pointwise/BatchNorm/moving_variance"},{"dtype":"int32","shape":[2],"name":"module_apply_default/MobilenetV1/Logits/global_pool/reduction_indices"},{"dtype":"float32","shape":[512,106],"name":"final_retrain_ops/weights/final_weights"},{"dtype":"float32","shape":[106],"name":"final_retrain_ops/biases/final_biases"}]}]} -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-ai-classifier", 3 | "version": "4.0.2", 4 | "description": "Appium semantic label plugin", 5 | "homepage": "https://github.com/testdotai/appium-classifier-plugin#readme", 6 | "bugs": "https://github.com/testdotai/appium-classifier-plugin/issues", 7 | "main": "./build-js/index.js", 8 | "bin": { 9 | "test-ai-classifier": "./build-js/lib/rpc.js" 10 | }, 11 | "gypfile": true, 12 | "scripts": { 13 | "clean-js": "rm -rf node_modules && rm -f package-lock.json && rm -rf build-js && npm install", 14 | "clean-cc": "node-gyp clean && rm -rf build", 15 | "clean": "npm run clean-js && npm run clean-cc", 16 | "build-js": "gulp transpile", 17 | "build-cc": "node-gyp rebuild", 18 | "build": "npm run build-cc && npm run build-js", 19 | "mocha": "mocha", 20 | "prepublish": "gulp prepublish", 21 | "postinstall": "node ./postinstall.js", 22 | "test": "gulp once", 23 | "e2e-test": "gulp e2e-test", 24 | "watch": "gulp watch", 25 | "coverage": "gulp coveralls", 26 | "lint": "gulp eslint", 27 | "lint:fix": "gulp eslint --fix" 28 | }, 29 | "files": [ 30 | "build-js", 31 | "cc", 32 | "lib", 33 | "model", 34 | "index.js", 35 | "postinstall.js", 36 | "binding.gyp", 37 | "gulpfile.js", 38 | "classifier-proto/classifier.proto", 39 | "README.md" 40 | ], 41 | "keywords": [ 42 | "test", 43 | "automation", 44 | "appium", 45 | "ai", 46 | "ml" 47 | ], 48 | "repository": "github:testdotai/appium-classifier-plugin", 49 | "author": "Jonathan Lipps ", 50 | "license": "Apache-2.0", 51 | "dependencies": { 52 | "@grpc/proto-loader": "0.x", 53 | "@tensorflow/tfjs-node": "1.x", 54 | "appium-base-driver": "3.x", 55 | "appium-support": "2.x", 56 | "asyncbox": "2.x", 57 | "bindings": "1.x", 58 | "bluebird": "3.x", 59 | "canvas": "2.x", 60 | "grpc": "^1.24.2", 61 | "lodash": "4.x", 62 | "node-addon-api": "1.x", 63 | "npmlog": "4.x", 64 | "request-promise": "4.x", 65 | "yargs": "15.x" 66 | }, 67 | "devDependencies": { 68 | "appium-gulp-plugins": "^3.0.0", 69 | "babel-eslint": "^7.1.1", 70 | "chai": "^4.1.2", 71 | "eslint": "^3.18.0", 72 | "eslint-config-appium": "^2.0.1", 73 | "eslint-plugin-babel": "^3.3.0", 74 | "eslint-plugin-import": "^2.2.0", 75 | "eslint-plugin-mocha": "^4.7.0", 76 | "eslint-plugin-promise": "^3.3.1", 77 | "mocha": "^5.1.1", 78 | "should": "^13.2.3", 79 | "wd": "^1.11.0" 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /postinstall.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | /* eslint-disable no-console */ 3 | /* eslint-disable promise/prefer-await-to-then */ 4 | /* eslint-disable promise/prefer-await-to-callbacks */ 5 | 6 | const path = require('path'); 7 | const fs = require('fs'); 8 | const DOWNLOAD_JS = path.resolve(__dirname, 'build-js', 'lib', 'download.js'); 9 | 10 | if (module === require.main) { 11 | if (fs.existsSync(DOWNLOAD_JS)) { 12 | require(DOWNLOAD_JS).downloadObjDetectionModel().catch((err) => { 13 | console.error(err.stack); 14 | console.error("WARN: Download of object detection model failed. Object " + 15 | "detection mode will not work."); 16 | }); 17 | } else { 18 | console.error("Not downloading object detection model because built code " + 19 | "doesn't exist. First run `npm run build` and then " + 20 | "`./postinstall.js`"); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test/find-e2e-specs.js: -------------------------------------------------------------------------------- 1 | import wd from 'wd'; 2 | import path from 'path'; 3 | import chai from 'chai'; 4 | import should from 'should'; 5 | import B from 'bluebird'; 6 | 7 | chai.use(should); 8 | 9 | const APPIUM = "http://localhost:4723/wd/hub"; 10 | 11 | // since when running tests we want to use the currently transpiled version of 12 | // the module with Appium, and not a version which is installed or linked into 13 | // the Appium tree, make sure we pass an absolute path as the module name so 14 | // Appium is always picking up the latest from this project. 15 | const MODULE_PATH = path.resolve(__dirname, '..'); 16 | 17 | const GENERAL = { 18 | customFindModules: {ai: MODULE_PATH}, 19 | shouldUseCompactResponses: false, 20 | testaiAllowWeakerMatches: true, 21 | noReset: true, 22 | }; 23 | 24 | const ANDROID = { 25 | platformName: 'Android', 26 | deviceName: 'Android Emulator', 27 | automationName: 'UiAutomator2', 28 | ...GENERAL, 29 | }; 30 | 31 | const FILES = { 32 | appPackage: 'com.android.documentsui', 33 | appActivity: '.files.FilesActivity', 34 | ...ANDROID, 35 | }; 36 | 37 | const IOS = { 38 | automationName: 'XCUITest', 39 | platformName: 'iOS', 40 | deviceName: 'iPhone 8', 41 | platformVersion: '11.4', 42 | ...GENERAL, 43 | }; 44 | 45 | const PHOTOS = { 46 | bundleId: 'com.apple.mobileslideshow', 47 | ...IOS, 48 | }; 49 | 50 | const FILES_IOS = { 51 | bundleId: 'com.apple.DocumentsApp', 52 | ...IOS, 53 | }; 54 | 55 | function setup (caps, testTimeout = 180000, implicitWaitTimeout = 40000) { 56 | let test = {}; 57 | 58 | before(async function () { 59 | this.timeout(testTimeout); 60 | test.driver = wd.promiseChainRemote(APPIUM); 61 | await test.driver.init(caps); 62 | await test.driver.setImplicitWaitTimeout(implicitWaitTimeout); 63 | }); 64 | 65 | after(async function () { 66 | if (test.driver) { 67 | await test.driver.quit(); 68 | } 69 | }); 70 | 71 | return test; 72 | } 73 | 74 | describe('Finding by element - Android', function () { 75 | const t = setup(FILES); 76 | 77 | it('should find an element by its label', async function () { 78 | this.timeout(90000); 79 | await t.driver.elementByAccessibilityId('More options'); 80 | await t.driver.elementByCustom('ai:menu').click(); 81 | await t.driver.elementByXPath('//android.widget.TextView[@text="SDCARD"]'); 82 | }); 83 | 84 | }); 85 | 86 | describe('Finding by object detection - Android', function () { 87 | const t = setup({ 88 | testaiFindMode: 'object_detection', 89 | testaiObjDetectionDebug: true, 90 | ...FILES 91 | }, 180000, 180000); 92 | 93 | it('should find an element using the object detection strategy', async function () { 94 | this.timeout(180000); 95 | await t.driver.updateSettings({checkForImageElementStaleness: false}); 96 | await t.driver.elementByAccessibilityId('More options'); 97 | await t.driver.elementByCustom('ai:menu').click(); 98 | await t.driver.elementByXPath('//android.widget.TextView[@text="SDCARD"]'); 99 | }); 100 | }); 101 | 102 | describe('Finding by element - iOS', function () { 103 | const t = setup(PHOTOS, 120000, 20000); 104 | 105 | // this test assumes you've launched the app and hit 'continue' to the 106 | // 'what's new in photos' interstitial 107 | it('should find an element by its label', async function () { 108 | this.timeout(90000); 109 | await t.driver.elementByCustom('ai:search').click(); 110 | await t.driver.elementByAccessibilityId('Cancel'); 111 | }); 112 | }); 113 | 114 | describe('Finding by object detection - iOS', function () { 115 | const t = setup({ 116 | testaiFindMode: 'object_detection', 117 | testaiObjDetectionDebug: true, 118 | testaiObjDetectionThreshold: 0.9, 119 | ...PHOTOS 120 | }, 120000, 20000); 121 | 122 | // this test assumes you've launched the app and hit 'continue' to the 123 | // 'what's new in photos' interstitial 124 | it('should find an element by its label', async function () { 125 | await t.driver.updateSettings({checkForImageElementStaleness: false}); 126 | this.timeout(90000); 127 | await t.driver.elementByCustom('ai:search').click(); 128 | await B.delay(5000); 129 | await t.driver.elementByAccessibilityId('October 2009'); 130 | }); 131 | }); 132 | 133 | describe('Finding grouped icon - iOS', function () { 134 | const t = setup({ 135 | testaiFindMode: 'object_detection', 136 | testaiObjDetectionDebug: true, 137 | testaiObjDetectionThreshold: 0.9, 138 | ...FILES_IOS 139 | }, 120000, 20000); 140 | 141 | it('should find an element by its label', async function () { 142 | this.timeout(90000); 143 | await t.driver.updateSettings({checkForImageElementStaleness: false}); 144 | await t.driver.elementByAccessibilityId('Browse').click(); 145 | await t.driver.elementByCustom('ai:clock').click(); 146 | await t.driver.elementByAccessibilityId('No Recents'); 147 | }); 148 | }); 149 | -------------------------------------------------------------------------------- /test/fixtures/ap-mail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/ap-mail.png -------------------------------------------------------------------------------- /test/fixtures/birds.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/birds.jpg -------------------------------------------------------------------------------- /test/fixtures/cart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/cart.png -------------------------------------------------------------------------------- /test/fixtures/folder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/folder.png -------------------------------------------------------------------------------- /test/fixtures/menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/menu.png -------------------------------------------------------------------------------- /test/fixtures/menu_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/menu_small.png -------------------------------------------------------------------------------- /test/fixtures/microphone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/microphone.png -------------------------------------------------------------------------------- /test/fixtures/screen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testdotai/appium-classifier-plugin/105c7e67e6b7a25907dc17ee62e943c512f7c603/test/fixtures/screen.jpg -------------------------------------------------------------------------------- /test/model-e2e-specs.js: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | import chai from 'chai'; 3 | import should from 'should'; 4 | import { asyncmap } from 'asyncbox'; 5 | import { getModel, tensorFromImage, saveImageFromTensor, predictionFromImage, 6 | predictionsFromImages, DEFAULT_CONFIDENCE_THRESHOLD } from '../lib/classifier'; 7 | import { canvasFromImage } from '../lib/image'; 8 | 9 | chai.use(should); 10 | 11 | const FIXTURES = path.resolve(__dirname, "..", "..", "test", "fixtures"); 12 | 13 | const CART_IMG = path.resolve(FIXTURES, "cart.png"); 14 | const MIC_IMG = path.resolve(FIXTURES, "microphone.png"); 15 | const FOLDER_IMG = path.resolve(FIXTURES, "folder.png"); 16 | const MENU_IMG = path.resolve(FIXTURES, "menu.png"); 17 | const TINY_MENU_IMG = path.resolve(FIXTURES, "menu_small.png"); 18 | const MAIL_IMG = path.resolve(FIXTURES, "ap-mail.png"); 19 | 20 | describe('Model', function () { 21 | it('should load the model', async function () { 22 | await getModel(); 23 | }); 24 | 25 | it.skip('should load and save a tensor', async function () { 26 | // use for debugging 27 | const t = await tensorFromImage(await canvasFromImage(MENU_IMG)); 28 | await saveImageFromTensor(t, "debug.png"); 29 | }); 30 | 31 | it('should make predictions based on model - cart', async function () { 32 | let pred = await predictionFromImage(await canvasFromImage(CART_IMG), DEFAULT_CONFIDENCE_THRESHOLD, "cart"); 33 | pred[0].should.eql("cart"); 34 | }); 35 | 36 | it('should make predictions based on model - mail', async function () { 37 | let pred = await predictionFromImage(await canvasFromImage(MAIL_IMG), DEFAULT_CONFIDENCE_THRESHOLD, "mail"); 38 | pred[0].should.eql("mail"); 39 | }); 40 | 41 | it('should make predictions based on model - mic', async function () { 42 | let pred = await predictionFromImage(await canvasFromImage(MIC_IMG), DEFAULT_CONFIDENCE_THRESHOLD, "microphone"); 43 | pred[0].should.eql("microphone"); 44 | }); 45 | 46 | it('should make predictions based on model - menu', async function () { 47 | let pred = await predictionFromImage(await canvasFromImage(MENU_IMG), DEFAULT_CONFIDENCE_THRESHOLD, "menu"); 48 | pred[0].should.eql("menu"); 49 | }); 50 | 51 | it('should make predictions based on model - tiny menu', async function () { 52 | let pred = await predictionFromImage(await canvasFromImage(TINY_MENU_IMG), DEFAULT_CONFIDENCE_THRESHOLD, "menu"); 53 | pred[1].should.be.above(DEFAULT_CONFIDENCE_THRESHOLD); 54 | }); 55 | 56 | it('should make predictions based on model - unclassified', async function () { 57 | let pred = await predictionFromImage(await canvasFromImage(FOLDER_IMG), 0.8, "folder"); 58 | pred[0].should.eql("unclassified"); 59 | }); 60 | 61 | it('should make multiple predictions at a time', async function () { 62 | const imgs = await asyncmap([CART_IMG, MIC_IMG, MENU_IMG], (img) => { 63 | return canvasFromImage(img); 64 | }); 65 | const preds = await predictionsFromImages(imgs, DEFAULT_CONFIDENCE_THRESHOLD, "cart"); 66 | preds.should.have.length(3); 67 | preds[0][0].should.eql("cart"); 68 | preds[1][0].should.eql("microphone"); 69 | preds[2][0].should.eql("menu"); 70 | }); 71 | 72 | it('should obey a confidence threshold', async function () { 73 | let pred = await predictionFromImage(await canvasFromImage(CART_IMG), 1, "cart"); 74 | pred[0].should.eql("unclassified"); 75 | 76 | pred = await predictionFromImage(await canvasFromImage(FOLDER_IMG), 0.01, "folder"); 77 | pred[0].should.eql("fire"); 78 | }); 79 | }); 80 | 81 | describe('Image Tensor', function () { 82 | it('should get a tensor for an image', async function () { 83 | await tensorFromImage(await canvasFromImage(CART_IMG)); 84 | }); 85 | }); 86 | -------------------------------------------------------------------------------- /test/native-e2e-specs.js: -------------------------------------------------------------------------------- 1 | import path from 'path'; 2 | import chai from 'chai'; 3 | import should from 'should'; 4 | 5 | import { TF_VERSION, detect } from '../lib/detector'; 6 | 7 | chai.use(should); 8 | 9 | const BIRDS_IMG = path.resolve(__dirname, "..", "..", "test", "fixtures", "birds.jpg"); 10 | const SCREEN_IMG = path.resolve(__dirname, "..", "..", "test", "fixtures", "screen.jpg"); 11 | 12 | describe('Native object detection', function () { 13 | 14 | it('should get the tensorflow version', function () { 15 | TF_VERSION.should.match(/^1\./); 16 | }); 17 | 18 | it('should detect objects in an image of birds', function () { 19 | const res = detect(BIRDS_IMG, 0.95, true); 20 | res.length.should.eql(2); 21 | should.exist(res[0].confidence); 22 | res[0].ymin.should.be.above(0.5); 23 | res[0].ymin.should.be.below(0.6); 24 | res[1].ymin.should.be.above(0.1); 25 | res[1].ymin.should.be.below(0.2); 26 | }); 27 | 28 | it('should detect objects in a mobile app screenshot', function () { 29 | const res = detect(SCREEN_IMG, 0.95, true); 30 | res.length.should.be.above(1); 31 | }); 32 | 33 | }); 34 | -------------------------------------------------------------------------------- /test/test-android.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | const wd = require('wd'); 3 | 4 | const APPIUM = "http://localhost:4723/wd/hub"; 5 | const MODULE_PATH = path.resolve(__dirname, '..'); 6 | 7 | const ANDROID_CAPS = { 8 | platformName: 'Android', 9 | deviceName: 'Android Emulator', 10 | automationName: 'UiAutomator2', 11 | noReset: true, 12 | appPackage: 'com.android.documentsui', 13 | appActivity: '.files.FilesActivity', 14 | customFindModules: {ai: MODULE_PATH}, 15 | testaiObjDetectionDebug: true, 16 | shouldUseCompactResponses: false, 17 | }; 18 | 19 | describe('Finding an Android element with machine learning magic', function () { 20 | let driver; 21 | 22 | before(async function () { 23 | driver = wd.promiseChainRemote(APPIUM); 24 | await driver.init(ANDROID_CAPS); 25 | await driver.setImplicitWaitTimeout(20000); 26 | }); 27 | 28 | after(async function () { 29 | if (driver) { 30 | await driver.quit(); 31 | } 32 | }); 33 | 34 | it('should find the cart button', async function () { 35 | // ensure we loaded the app 36 | await driver.elementByAccessibilityId('More options'); 37 | 38 | // click on the menu button using the ai finder 39 | await driver.elementByCustom('ai:menu').click(); 40 | 41 | // prove the menu opened by finding a menu item 42 | await driver.elementByXPath('//android.widget.TextView[@text="SDCARD"]'); 43 | }); 44 | }); 45 | --------------------------------------------------------------------------------