├── test.png ├── landmark.jpg ├── trainSTD.png ├── trainMean.png ├── vanilla-0001.params ├── vanillaCNN.caffemodel ├── README.md ├── vanilla_deploy.prototxt ├── Trans.ipynb └── vanilla-symbol.json /test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/test.png -------------------------------------------------------------------------------- /landmark.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/landmark.jpg -------------------------------------------------------------------------------- /trainSTD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/trainSTD.png -------------------------------------------------------------------------------- /trainMean.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/trainMean.png -------------------------------------------------------------------------------- /vanilla-0001.params: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/vanilla-0001.params -------------------------------------------------------------------------------- /vanillaCNN.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flyingzhao/mxnet_VanillaCNN/HEAD/vanillaCNN.caffemodel -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VanillaCNN 2 | 3 | This is a mxnet implementation of the Vanilla CNN. 4 | 5 | VanillaCNN is used to detect facial landmarks. 6 | 7 | ## Result 8 | 9 | Original image: 10 | 11 | ![origin](https://github.com/flyingzhao/mxnet_VanillaCNN/blob/master/test.png) 12 | 13 | Landmarks: 14 | 15 | ![landmark](https://github.com/flyingzhao/mxnet_VanillaCNN/blob/master/landmark.jpg) 16 | 17 | ## Reference 18 | 19 | Yue Wu and Tal Hassner, "Facial Landmark Detection with Tweaked Convolutional Neural Networks", arXiv preprint arXiv:1511.04031, 12 Nov. 2015. 20 | 21 | See project page for more information: 22 | 23 | >http://www.openu.ac.il/home/hassner/projects/tcnn_landmarks/ 24 | -------------------------------------------------------------------------------- /vanilla_deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "vanila" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 40 6 | input_dim: 40 7 | ######################################## 8 | # the actual net 9 | # layer 1 10 | layer { 11 | name: "Conv1" 12 | type: "Convolution" 13 | bottom: "data" 14 | top: "Conv1" 15 | param { 16 | lr_mult: 1 17 | decay_mult: 1 18 | } 19 | param { 20 | lr_mult: 2 21 | decay_mult: 0 22 | } 23 | convolution_param { 24 | num_output: 16 25 | pad: 2 26 | kernel_size: 5 27 | stride: 1 28 | weight_filler { 29 | type: "xavier" 30 | std: 0.1 31 | } 32 | bias_filler { 33 | type: "constant" 34 | value: 0.2 35 | } 36 | } 37 | } 38 | 39 | layer { 40 | name: "ActivationTangH1" 41 | bottom: "Conv1" 42 | top: "ActivationTangH1" 43 | type: "TanH" 44 | } 45 | 46 | layer { 47 | name: "ActivationAbs1" 48 | bottom: "ActivationTangH1" 49 | top: "Abs1" 50 | type: "AbsVal" 51 | } 52 | 53 | layer { 54 | name: "Pool1" 55 | type: "Pooling" 56 | bottom: "Abs1" 57 | top: "Pool1" 58 | pooling_param { 59 | pool: MAX 60 | kernel_size: 2 61 | stride: 2 62 | } 63 | } 64 | 65 | layer { 66 | name: "Conv2" 67 | type: "Convolution" 68 | bottom: "Pool1" 69 | top: "Conv2" 70 | param { 71 | lr_mult: 1 72 | decay_mult: 1 73 | } 74 | param { 75 | lr_mult: 2 76 | decay_mult: 0 77 | } 78 | convolution_param { 79 | num_output: 48 80 | pad: 1 81 | kernel_size: 3 82 | stride: 1 83 | weight_filler { 84 | type: "xavier" 85 | std: 0.1 86 | } 87 | bias_filler { 88 | type: "constant" 89 | value: 0.2 90 | } 91 | } 92 | } 93 | 94 | layer { 95 | name: "ActivationTangH2" 96 | bottom: "Conv2" 97 | top: "ActivationTangH2" 98 | type: "TanH" 99 | } 100 | 101 | layer { 102 | name: "ActivationAbs2" 103 | bottom: "ActivationTangH2" 104 | top: "Abs2" 105 | type: "AbsVal" 106 | } 107 | 108 | 109 | layer { 110 | name: "Pool2" 111 | type: "Pooling" 112 | bottom: "Abs2" 113 | top: "Pool2" 114 | pooling_param { 115 | pool: MAX 116 | kernel_size: 2 117 | stride: 2 118 | } 119 | } 120 | 121 | # layer 3 122 | layer { 123 | name: "Conv3" 124 | type: "Convolution" 125 | bottom: "Pool2" 126 | top: "Conv3" 127 | param { 128 | lr_mult: 1 129 | decay_mult: 1 130 | } 131 | param { 132 | lr_mult: 2 133 | decay_mult: 0 134 | } 135 | convolution_param { 136 | num_output: 64 137 | pad: 0 138 | kernel_size: 3 139 | stride: 1 140 | weight_filler { 141 | type: "xavier" 142 | std: 0.1 143 | } 144 | bias_filler { 145 | type: "constant" 146 | value: 0.2 147 | } 148 | } 149 | } 150 | 151 | 152 | layer { 153 | name: "ActivationTangH3" 154 | bottom: "Conv3" 155 | top: "ActivationTangH3" 156 | type: "TanH" 157 | } 158 | 159 | layer { 160 | name: "ActivationAbs3" 161 | bottom: "ActivationTangH3" 162 | top: "Abs3" 163 | type: "AbsVal" 164 | } 165 | 166 | layer { 167 | name: "Pool3" 168 | type: "Pooling" 169 | bottom: "Abs3" 170 | top: "Pool3" 171 | pooling_param { 172 | pool: MAX 173 | kernel_size: 3 174 | stride: 2 175 | } 176 | } 177 | 178 | # layer 4 179 | layer { 180 | name: "Conv4" 181 | type: "Convolution" 182 | bottom: "Pool3" 183 | top: "Conv4" 184 | param { 185 | lr_mult: 1 186 | decay_mult: 1 187 | } 188 | param { 189 | lr_mult: 2 190 | decay_mult: 0 191 | } 192 | convolution_param { 193 | num_output: 64 194 | pad: 0 195 | kernel_size: 2 196 | stride: 1 197 | weight_filler { 198 | type: "xavier" 199 | std: 0.1 200 | } 201 | bias_filler { 202 | type: "constant" 203 | value: 0.2 204 | } 205 | } 206 | } 207 | 208 | 209 | layer { 210 | name: "ActivationTangH4" 211 | bottom: "Conv4" 212 | top: "ActivationTangH4" 213 | type: "TanH" 214 | } 215 | 216 | layer { 217 | name: "ActivationAbs4" 218 | bottom: "ActivationTangH4" 219 | top: "Abs4" 220 | type: "AbsVal" 221 | } 222 | 223 | 224 | ######################################## 225 | 226 | layer { 227 | name: "Dense1" 228 | type: "InnerProduct" 229 | bottom: "Abs4" 230 | top: "Dense1" 231 | param { 232 | lr_mult: 1 233 | decay_mult: 1 234 | } 235 | param { 236 | lr_mult: 2 237 | decay_mult: 0 238 | } 239 | inner_product_param { 240 | num_output: 100 241 | weight_filler { 242 | type: "xavier" 243 | } 244 | bias_filler { 245 | type: "constant" 246 | value: 0 247 | } 248 | } 249 | } 250 | 251 | 252 | layer { 253 | name: "ActivationTangH5" 254 | bottom: "Dense1" 255 | top: "ActivationTangH5" 256 | type: "TanH" 257 | } 258 | 259 | layer { 260 | name: "ActivationAbs5" 261 | bottom: "ActivationTangH5" 262 | top: "Abs5" 263 | type: "AbsVal" 264 | } 265 | 266 | 267 | layer { 268 | name: "Dense2" 269 | type: "InnerProduct" 270 | bottom: "Abs5" 271 | top: "Dense2" 272 | param { 273 | lr_mult: 1 274 | decay_mult: 1 275 | } 276 | param { 277 | lr_mult: 2 278 | decay_mult: 0 279 | } 280 | inner_product_param { 281 | num_output: 10 282 | weight_filler { 283 | type: "xavier" 284 | } 285 | bias_filler { 286 | type: "constant" 287 | value: 0 288 | } 289 | } 290 | } 291 | 292 | -------------------------------------------------------------------------------- /Trans.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import caffe\n", 12 | "import cv2\n", 13 | "import numpy as np" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "# Load caffe model" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": { 27 | "collapsed": false 28 | }, 29 | "outputs": [], 30 | "source": [ 31 | "class Predictor:\n", 32 | " \n", 33 | " def preprocess(self, resized):\n", 34 | " ret = resized.astype('f4')\n", 35 | " ret -= self.mean\n", 36 | " ret /= (1.e-6+ self.std)\n", 37 | " return ret\n", 38 | " \n", 39 | " def predict(self, resized):\n", 40 | " \"\"\"\n", 41 | " @resized: image 40,40 already pre processed \n", 42 | " \"\"\" \n", 43 | " self.net.blobs['data'].data[...] = cv2.split(resized)\n", 44 | " prediction = self.net.forward()['Dense2'][0]\n", 45 | " return prediction\n", 46 | " \n", 47 | " def __init__(self, protoTXTPath, weightsPath):\n", 48 | " caffe.set_mode_cpu()\n", 49 | " self.net = caffe.Net(protoTXTPath,weightsPath,caffe.TEST)\n", 50 | " self.mean = cv2.imread('./trainMean.png').astype('float')\n", 51 | " self.std = cv2.imread('./trainSTD.png').astype('float')" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "[-0.22493494 -0.24115832 0.18771622 -0.25451189 0.02025338 -0.02075372\n", 66 | " -0.23037848 0.14349605 0.20071335 0.13110712]\n" 67 | ] 68 | } 69 | ], 70 | "source": [ 71 | "predictor = Predictor(\"./vanilla_deploy.prototxt\",\"./vanillaCNN.caffemodel\") #load caffe model and weights\n", 72 | "\n", 73 | "a=cv2.imread(\"./test.png\")\n", 74 | "a=cv2.resize(a,(40,40)) #load test image\n", 75 | "\n", 76 | "img=predictor.preprocess(a) #pre processing\n", 77 | "\n", 78 | "points=predictor.predict(img) #predict facial landmark\n", 79 | "\n", 80 | "print(points)\n" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "## Draw landmarks in image" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 4, 93 | "metadata": { 94 | "collapsed": false 95 | }, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "[ 35.20832825 33.13173676 88.02767944 31.42247772 66.59243011\n", 102 | " 61.34352493 34.51155472 82.36749268 89.69130707 80.78170776]\n" 103 | ] 104 | } 105 | ], 106 | "source": [ 107 | "\n", 108 | "landmark=(points+0.5)*128\n", 109 | "print(landmark)\n", 110 | "\n", 111 | "a=cv2.resize(a,(128,128))\n", 112 | "\n", 113 | "cv2.circle(a,(landmark[0],landmark[1]),2,(0,255,0))\n", 114 | "cv2.circle(a,(landmark[2],landmark[3]),2,(0,255,0))\n", 115 | "cv2.circle(a,(landmark[4],landmark[5]),2,(0,255,0))\n", 116 | "cv2.circle(a,(landmark[6],landmark[7]),2,(0,255,0))\n", 117 | "cv2.circle(a,(landmark[8],landmark[9]),2,(0,255,0))\n", 118 | "\n", 119 | "# cv2.imshow(\"1\",a)\n", 120 | "# cv2.waitKey(0)" 121 | ] 122 | }, 123 | { 124 | "cell_type": "markdown", 125 | "metadata": {}, 126 | "source": [ 127 | "# Load MXNet model" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 5, 133 | "metadata": { 134 | "collapsed": false 135 | }, 136 | "outputs": [], 137 | "source": [ 138 | "import mxnet as mx\n", 139 | "\n", 140 | "model=mx.model.FeedForward.load('vanilla',1,num_batch_size=1) #load model" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 6, 146 | "metadata": { 147 | "collapsed": false 148 | }, 149 | "outputs": [], 150 | "source": [ 151 | "b=np.zeros((1,3,40,40),dtype=np.float32) #change chanels into shape[1,3,40,40]\n", 152 | "b[0,0,:,:]=img[:,:,2]\n", 153 | "b[0,1,:,:]=img[:,:,1]\n", 154 | "b[0,2,:,:]=img[:,:,0] #Swapping BGR of caffe into RGB in mxnet" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 7, 160 | "metadata": { 161 | "collapsed": false 162 | }, 163 | "outputs": [ 164 | { 165 | "name": "stdout", 166 | "output_type": "stream", 167 | "text": [ 168 | "[[-0.22493497 -0.2411584 0.18771638 -0.25451207 0.0202535 -0.02075362\n", 169 | " -0.23037851 0.14349589 0.2007134 0.13110715]]\n", 170 | "[ 35.20832443 33.13172531 88.0276947 31.42245483 66.59244537\n", 171 | " 61.34353638 34.5115509 82.36747742 89.6913147 80.78171539]\n" 172 | ] 173 | } 174 | ], 175 | "source": [ 176 | "points=model.predict(b)\n", 177 | "print(points)\n", 178 | "\n", 179 | "landmark=(points+0.5)*128 #print landmarks\n", 180 | "print(landmark[0])\n" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 8, 186 | "metadata": { 187 | "collapsed": false 188 | }, 189 | "outputs": [ 190 | { 191 | "data": { 192 | "text/plain": [ 193 | "True" 194 | ] 195 | }, 196 | "execution_count": 8, 197 | "metadata": {}, 198 | "output_type": "execute_result" 199 | } 200 | ], 201 | "source": [ 202 | "a=cv2.imread(\"./test.png\")\n", 203 | "\n", 204 | "landmark=landmark[0]\n", 205 | "cv2.circle(a,(landmark[0],landmark[1]),2,(0,255,0))\n", 206 | "cv2.circle(a,(landmark[2],landmark[3]),2,(0,255,0))\n", 207 | "cv2.circle(a,(landmark[4],landmark[5]),2,(0,255,0))\n", 208 | "cv2.circle(a,(landmark[6],landmark[7]),2,(0,255,0))\n", 209 | "cv2.circle(a,(landmark[8],landmark[9]),2,(0,255,0))\n", 210 | "\n", 211 | "cv2.imwrite(\"landmark.jpg\",a) #save landmarks" 212 | ] 213 | } 214 | ], 215 | "metadata": { 216 | "kernelspec": { 217 | "display_name": "Python 2", 218 | "language": "python", 219 | "name": "python2" 220 | }, 221 | "language_info": { 222 | "codemirror_mode": { 223 | "name": "ipython", 224 | "version": 2 225 | }, 226 | "file_extension": ".py", 227 | "mimetype": "text/x-python", 228 | "name": "python", 229 | "nbconvert_exporter": "python", 230 | "pygments_lexer": "ipython2", 231 | "version": "2.7.6" 232 | } 233 | }, 234 | "nbformat": 4, 235 | "nbformat_minor": 0 236 | } 237 | -------------------------------------------------------------------------------- /vanilla-symbol.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": [ 3 | { 4 | "op": "null", 5 | "param": {}, 6 | "name": "data", 7 | "inputs": [], 8 | "backward_source_id": -1 9 | }, 10 | { 11 | "op": "null", 12 | "param": {}, 13 | "name": "Conv1_weight", 14 | "inputs": [], 15 | "backward_source_id": -1 16 | }, 17 | { 18 | "op": "null", 19 | "param": {}, 20 | "name": "Conv1_bias", 21 | "inputs": [], 22 | "backward_source_id": -1 23 | }, 24 | { 25 | "op": "Convolution", 26 | "param": { 27 | "dilate": "(1,1)", 28 | "kernel": "(5,5)", 29 | "no_bias": "False", 30 | "num_filter": "16", 31 | "num_group": "1", 32 | "pad": "(2,2)", 33 | "stride": "(1,1)", 34 | "workspace": "512" 35 | }, 36 | "name": "Conv1", 37 | "inputs": [[0, 0], [1, 0], [2, 0]], 38 | "backward_source_id": -1 39 | }, 40 | { 41 | "op": "Activation", 42 | "param": {"act_type": "tanh"}, 43 | "name": "ActivationTangH1", 44 | "inputs": [[3, 0]], 45 | "backward_source_id": -1 46 | }, 47 | { 48 | "op": "abs", 49 | "param": {}, 50 | "name": "ActivationAbs1", 51 | "inputs": [[4, 0]], 52 | "backward_source_id": -1 53 | }, 54 | { 55 | "op": "Pooling", 56 | "param": { 57 | "kernel": "(2,2)", 58 | "pad": "(0,0)", 59 | "pool_type": "max", 60 | "stride": "(2,2)" 61 | }, 62 | "name": "Pool1", 63 | "inputs": [[5, 0]], 64 | "backward_source_id": -1 65 | }, 66 | { 67 | "op": "null", 68 | "param": {}, 69 | "name": "Conv2_weight", 70 | "inputs": [], 71 | "backward_source_id": -1 72 | }, 73 | { 74 | "op": "null", 75 | "param": {}, 76 | "name": "Conv2_bias", 77 | "inputs": [], 78 | "backward_source_id": -1 79 | }, 80 | { 81 | "op": "Convolution", 82 | "param": { 83 | "dilate": "(1,1)", 84 | "kernel": "(3,3)", 85 | "no_bias": "False", 86 | "num_filter": "48", 87 | "num_group": "1", 88 | "pad": "(1,1)", 89 | "stride": "(1,1)", 90 | "workspace": "512" 91 | }, 92 | "name": "Conv2", 93 | "inputs": [[6, 0], [7, 0], [8, 0]], 94 | "backward_source_id": -1 95 | }, 96 | { 97 | "op": "Activation", 98 | "param": {"act_type": "tanh"}, 99 | "name": "ActivationTangH2", 100 | "inputs": [[9, 0]], 101 | "backward_source_id": -1 102 | }, 103 | { 104 | "op": "abs", 105 | "param": {}, 106 | "name": "ActivationAbs2", 107 | "inputs": [[10, 0]], 108 | "backward_source_id": -1 109 | }, 110 | { 111 | "op": "Pooling", 112 | "param": { 113 | "kernel": "(2,2)", 114 | "pad": "(0,0)", 115 | "pool_type": "max", 116 | "stride": "(2,2)" 117 | }, 118 | "name": "Pool2", 119 | "inputs": [[11, 0]], 120 | "backward_source_id": -1 121 | }, 122 | { 123 | "op": "null", 124 | "param": {}, 125 | "name": "Conv3_weight", 126 | "inputs": [], 127 | "backward_source_id": -1 128 | }, 129 | { 130 | "op": "null", 131 | "param": {}, 132 | "name": "Conv3_bias", 133 | "inputs": [], 134 | "backward_source_id": -1 135 | }, 136 | { 137 | "op": "Convolution", 138 | "param": { 139 | "dilate": "(1,1)", 140 | "kernel": "(3,3)", 141 | "no_bias": "False", 142 | "num_filter": "64", 143 | "num_group": "1", 144 | "pad": "(0,0)", 145 | "stride": "(1,1)", 146 | "workspace": "512" 147 | }, 148 | "name": "Conv3", 149 | "inputs": [[12, 0], [13, 0], [14, 0]], 150 | "backward_source_id": -1 151 | }, 152 | { 153 | "op": "Activation", 154 | "param": {"act_type": "tanh"}, 155 | "name": "ActivationTangH3", 156 | "inputs": [[15, 0]], 157 | "backward_source_id": -1 158 | }, 159 | { 160 | "op": "abs", 161 | "param": {}, 162 | "name": "ActivationAbs3", 163 | "inputs": [[16, 0]], 164 | "backward_source_id": -1 165 | }, 166 | { 167 | "op": "Pooling", 168 | "param": { 169 | "kernel": "(3,3)", 170 | "pad": "(0,0)", 171 | "pool_type": "max", 172 | "stride": "(2,2)" 173 | }, 174 | "name": "Pool3", 175 | "inputs": [[17, 0]], 176 | "backward_source_id": -1 177 | }, 178 | { 179 | "op": "null", 180 | "param": {}, 181 | "name": "Conv4_weight", 182 | "inputs": [], 183 | "backward_source_id": -1 184 | }, 185 | { 186 | "op": "null", 187 | "param": {}, 188 | "name": "Conv4_bias", 189 | "inputs": [], 190 | "backward_source_id": -1 191 | }, 192 | { 193 | "op": "Convolution", 194 | "param": { 195 | "dilate": "(1,1)", 196 | "kernel": "(2,2)", 197 | "no_bias": "False", 198 | "num_filter": "64", 199 | "num_group": "1", 200 | "pad": "(0,0)", 201 | "stride": "(1,1)", 202 | "workspace": "512" 203 | }, 204 | "name": "Conv4", 205 | "inputs": [[18, 0], [19, 0], [20, 0]], 206 | "backward_source_id": -1 207 | }, 208 | { 209 | "op": "Activation", 210 | "param": {"act_type": "tanh"}, 211 | "name": "ActivationTangH4", 212 | "inputs": [[21, 0]], 213 | "backward_source_id": -1 214 | }, 215 | { 216 | "op": "abs", 217 | "param": {}, 218 | "name": "ActivationAbs4", 219 | "inputs": [[22, 0]], 220 | "backward_source_id": -1 221 | }, 222 | { 223 | "op": "Flatten", 224 | "param": {}, 225 | "name": "flatten_0", 226 | "inputs": [[23, 0]], 227 | "backward_source_id": -1 228 | }, 229 | { 230 | "op": "null", 231 | "param": {}, 232 | "name": "Dense1_weight", 233 | "inputs": [], 234 | "backward_source_id": -1 235 | }, 236 | { 237 | "op": "null", 238 | "param": {}, 239 | "name": "Dense1_bias", 240 | "inputs": [], 241 | "backward_source_id": -1 242 | }, 243 | { 244 | "op": "FullyConnected", 245 | "param": { 246 | "no_bias": "False", 247 | "num_hidden": "100" 248 | }, 249 | "name": "Dense1", 250 | "inputs": [[24, 0], [25, 0], [26, 0]], 251 | "backward_source_id": -1 252 | }, 253 | { 254 | "op": "Activation", 255 | "param": {"act_type": "tanh"}, 256 | "name": "ActivationTangH5", 257 | "inputs": [[27, 0]], 258 | "backward_source_id": -1 259 | }, 260 | { 261 | "op": "abs", 262 | "param": {}, 263 | "name": "ActivationAbs5", 264 | "inputs": [[28, 0]], 265 | "backward_source_id": -1 266 | }, 267 | { 268 | "op": "null", 269 | "param": {}, 270 | "name": "Dense2_weight", 271 | "inputs": [], 272 | "backward_source_id": -1 273 | }, 274 | { 275 | "op": "null", 276 | "param": {}, 277 | "name": "Dense2_bias", 278 | "inputs": [], 279 | "backward_source_id": -1 280 | }, 281 | { 282 | "op": "FullyConnected", 283 | "param": { 284 | "no_bias": "False", 285 | "num_hidden": "10" 286 | }, 287 | "name": "Dense2", 288 | "inputs": [[29, 0], [30, 0], [31, 0]], 289 | "backward_source_id": -1 290 | } 291 | ], 292 | "arg_nodes": [ 293 | 0, 294 | 1, 295 | 2, 296 | 7, 297 | 8, 298 | 13, 299 | 14, 300 | 19, 301 | 20, 302 | 25, 303 | 26, 304 | 30, 305 | 31 306 | ], 307 | "heads": [[32, 0]] 308 | } --------------------------------------------------------------------------------