├── .gitignore ├── LICENSE ├── README.md ├── config ├── README_FOR_COMMON_ISSUE └── test.json ├── data ├── 0.jpg ├── 1.jpg └── 2.jpg ├── data_loader.py ├── figures ├── shuffle.PNG └── unit.PNG ├── layers.py ├── main.py ├── model.py ├── summarizer.py ├── train.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | experiments/ 3 | prepare_data.py 4 | floyd_requirements.txt 5 | .floyd* 6 | __pycache__/ 7 | extracted_model/ 8 | checkpoint 9 | tester.py 10 | converter.py 11 | *.pkl 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ShuffleNet 2 | An implementation of `ShuffleNet` introduced in TensorFlow. According to the authors, `ShuffleNet` is a computationally efficient CNN architecture designed specifically for mobile devices with very limited computing power. It outperforms `Google MobileNet` by 3 | small error percentage at much lower FLOPs. 4 | 5 | Link to the original paper: [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://arxiv.org/abs/1707.01083) 6 | 7 | 8 | ## ShuffleNet Unit 9 |
10 |

11 |
12 | 13 | ### Group Convolutions 14 | The paper uses the group convolution operator. However, that operator is not implemented in TensorFlow backend. So, I implemented the operator using graph operations. 15 | 16 | This issue was discussed here: [Support Channel groups in convolutional layers #10482](https://github.com/tensorflow/tensorflow/pull/10482) 17 | ## Channel Shuffling 18 |
19 |

20 |
21 | 22 | ### Channel Shuffling can be achieved by applying three operations: 23 | 1. Reshaping the input tensor from (N, H, W, C) into (N, H, W, G, C'). 24 | 2. Performing matrix transpose operation on the two dimensions (G, C'). 25 | 3. Reshaping the tensor back into (N, H, W, C). 26 | 27 | N: Batch size, 28 | H: Feature map height, 29 | W: Feature map width, 30 | C: Number of channels, 31 | G: Number of groups, 32 | C': Number of channels / Number of groups 33 | 34 | Note that: The number of channels should be divisible by the number of groups. 35 | 36 | ## Usage 37 | ### Main Dependencies 38 | ``` 39 | Python 3 or above 40 | tensorflow 1.3.0 41 | numpy 1.13.1 42 | tqdm 4.15.0 43 | easydict 1.7 44 | matplotlib 2.0.2 45 | ``` 46 | ### Train and Test 47 | 1. Prepare your data, and modify the data_loader.py/DataLoader/load_data() method. 48 | 2. Modify the config/test.json to meet your needs. 49 | 50 | ### Run 51 | ``` 52 | python main.py --config config/test.json 53 | ``` 54 | 55 | ## Results 56 | The model have successfully overfitted TinyImageNet-200 that was presented in [CS231n - Convolutional Neural Networks for Visual Recognition](https://tiny-imagenet.herokuapp.com/). I'm working on ImageNet training.. 57 | 58 | ## Benchmarking 59 | The paper has achieved 140 MFLOPs using the vanilla version. Using the group convolution operator implemented in TensorFlow, I have achieved approximately 270 MFLOPs. The paper counts multiplication+addition as one unit, so roughly dividing 270 by two, I have achieved what the paper proposes. 60 | 61 | To calculate the FLOPs in TensorFlow, make sure to set the batch size equal to 1, and execute the following line when the model is loaded into memory. 62 | ``` 63 | tf.profiler.profile( 64 | tf.get_default_graph(), 65 | options=tf.profiler.ProfileOptionBuilder.float_operation(), cmd='scope') 66 | ``` 67 | 68 | ## TODO 69 | * Training on ImageNet dataset. In progress... 70 | 71 | ## Updates 72 | * Inference and training are working properly. 73 | 74 | ## License 75 | This project is licensed under the Apache License 2.0 - see the LICENSE file for details. 76 | 77 | ## Acknowledgments 78 | Thanks for all who helped me in my work and special thanks for my colleagues: [Mo'men Abdelrazek](https://github.com/moemen95), and [Mohamed Zahran](https://github.com/moh3th1). 79 | 80 | -------------------------------------------------------------------------------- /config/README_FOR_COMMON_ISSUE: -------------------------------------------------------------------------------- 1 | Please ignore the line "pretrained_path: weights.pkl" in test.json. 2 | I used weights.pkl to save/load weights from/into the model. 3 | Don't worry its presense in the config file is useless now, because of the try-except on this file in train.py. 4 | When I finish training on ImageNet, I will upload the file immediately. 5 | -------------------------------------------------------------------------------- /config/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "experiment_dir": "test_experiment", 3 | "num_epochs": 100, 4 | "num_classes": 1000, 5 | "batch_size": 1, 6 | "num_groups": 3, 7 | "shuffle": true, 8 | "l2_strength": 4e-5, 9 | "bias": 0.0, 10 | "learning_rate": 1e-3, 11 | "batchnorm_enabled": true, 12 | "max_to_keep": 4, 13 | "save_model_every": 5, 14 | "test_every": 5, 15 | "train_or_test": "train", 16 | "pretrained_path": "weights.pkl" 17 | } -------------------------------------------------------------------------------- /data/0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MG2033/ShuffleNet/b686a2d92e1636c3bdbaec006664a1502ead2040/data/0.jpg -------------------------------------------------------------------------------- /data/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MG2033/ShuffleNet/b686a2d92e1636c3bdbaec006664a1502ead2040/data/1.jpg -------------------------------------------------------------------------------- /data/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MG2033/ShuffleNet/b686a2d92e1636c3bdbaec006664a1502ead2040/data/2.jpg -------------------------------------------------------------------------------- /data_loader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class DataLoader: 5 | """Data Loader class. As a simple case, the model is tried on TinyImageNet. For larger datasets, 6 | you may need to adapt this class to use the Tensorflow Dataset API""" 7 | 8 | def __init__(self, batch_size, shuffle=False): 9 | self.X_train = None 10 | self.X_mean = None 11 | self.y_train = None 12 | self.train_data_len = 0 13 | 14 | self.X_val = None 15 | self.y_val = None 16 | self.val_data_len = 0 17 | 18 | self.X_test = None 19 | self.y_test = None 20 | self.test_data_len = 0 21 | 22 | self.shuffle = shuffle 23 | self.batch_size = batch_size 24 | 25 | def load_data(self): 26 | # This method is an example of loading a dataset. Change it to suit your needs.. 27 | import matplotlib.pyplot as plt 28 | # For going in the same experiment as the paper. Resizing the input image data to 224x224 is done. 29 | train_data = np.array([plt.imread('./data/0.jpg')], dtype=np.float32) 30 | self.X_train = train_data 31 | self.y_train = np.array([283], dtype=np.int32) 32 | 33 | val_data = np.array([plt.imread('./data/0.jpg')], dtype=np.float32) 34 | self.X_val = val_data 35 | self.y_val = np.array([283]) 36 | 37 | self.train_data_len = self.X_train.shape[0] 38 | self.val_data_len = self.X_val.shape[0] 39 | img_height = 224 40 | img_width = 224 41 | num_channels = 3 42 | return img_height, img_width, num_channels, self.train_data_len, self.val_data_len 43 | 44 | def generate_batch(self, type='train'): 45 | """Generate batch from X_train/X_test and y_train/y_test using a python DataGenerator""" 46 | if type == 'train': 47 | # Training time! 48 | new_epoch = True 49 | start_idx = 0 50 | mask = None 51 | while True: 52 | if new_epoch: 53 | start_idx = 0 54 | if self.shuffle: 55 | mask = np.random.choice(self.train_data_len, self.train_data_len, replace=False) 56 | else: 57 | mask = np.arange(self.train_data_len) 58 | new_epoch = False 59 | 60 | # Batch mask selection 61 | X_batch = self.X_train[mask[start_idx:start_idx + self.batch_size]] 62 | y_batch = self.y_train[mask[start_idx:start_idx + self.batch_size]] 63 | start_idx += self.batch_size 64 | 65 | # Reset everything after the end of an epoch 66 | if start_idx >= self.train_data_len: 67 | new_epoch = True 68 | mask = None 69 | yield X_batch, y_batch 70 | elif type == 'test': 71 | # Testing time! 72 | start_idx = 0 73 | while True: 74 | # Batch mask selection 75 | X_batch = self.X_test[start_idx:start_idx + self.batch_size] 76 | y_batch = self.y_test[start_idx:start_idx + self.batch_size] 77 | start_idx += self.batch_size 78 | 79 | # Reset everything 80 | if start_idx >= self.test_data_len: 81 | start_idx = 0 82 | yield X_batch, y_batch 83 | elif type == 'val': 84 | # Testing time! 85 | start_idx = 0 86 | while True: 87 | # Batch mask selection 88 | X_batch = self.X_val[start_idx:start_idx + self.batch_size] 89 | y_batch = self.y_val[start_idx:start_idx + self.batch_size] 90 | start_idx += self.batch_size 91 | 92 | # Reset everything 93 | if start_idx >= self.val_data_len: 94 | start_idx = 0 95 | yield X_batch, y_batch 96 | else: 97 | raise ValueError("Please select a type from \'train\', \'val\', or \'test\'") 98 | -------------------------------------------------------------------------------- /figures/shuffle.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MG2033/ShuffleNet/b686a2d92e1636c3bdbaec006664a1502ead2040/figures/shuffle.PNG -------------------------------------------------------------------------------- /figures/unit.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MG2033/ShuffleNet/b686a2d92e1636c3bdbaec006664a1502ead2040/figures/unit.PNG -------------------------------------------------------------------------------- /layers.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | ############################################################################################################ 6 | # Convolution layer Methods 7 | def __conv2d_p(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1), 8 | initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0): 9 | """ 10 | Convolution 2D Wrapper 11 | :param name: (string) The name scope provided by the upper tf.name_scope('name') as scope. 12 | :param x: (tf.tensor) The input to the layer (N, H, W, C). 13 | :param w: (tf.tensor) pretrained weights (if None, it means no pretrained weights) 14 | :param num_filters: (integer) No. of filters (This is the output depth) 15 | :param kernel_size: (integer tuple) The size of the convolving kernel. 16 | :param padding: (string) The amount of padding required. 17 | :param stride: (integer tuple) The stride required. 18 | :param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended. 19 | :param l2_strength:(weight decay) (float) L2 regularization parameter. 20 | :param bias: (float) Amount of bias. (if not float, it means pretrained bias) 21 | :return out: The output of the layer. (N, H', W', num_filters) 22 | """ 23 | with tf.variable_scope(name): 24 | stride = [1, stride[0], stride[1], 1] 25 | kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters] 26 | 27 | with tf.name_scope('layer_weights'): 28 | if w == None: 29 | w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength) 30 | __variable_summaries(w) 31 | with tf.name_scope('layer_biases'): 32 | if isinstance(bias, float): 33 | bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias)) 34 | __variable_summaries(bias) 35 | with tf.name_scope('layer_conv2d'): 36 | conv = tf.nn.conv2d(x, w, stride, padding) 37 | out = tf.nn.bias_add(conv, bias) 38 | 39 | return out 40 | 41 | 42 | def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1), 43 | initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0, 44 | activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1, 45 | is_training=True): 46 | """ 47 | This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling). 48 | Note that: "is_training" should be passed by a correct value based on being in either training or testing. 49 | :param name: (string) The name scope provided by the upper tf.name_scope('name') as scope. 50 | :param x: (tf.tensor) The input to the layer (N, H, W, C). 51 | :param num_filters: (integer) No. of filters (This is the output depth) 52 | :param kernel_size: (integer tuple) The size of the convolving kernel. 53 | :param padding: (string) The amount of padding required. 54 | :param stride: (integer tuple) The stride required. 55 | :param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended. 56 | :param l2_strength:(weight decay) (float) L2 regularization parameter. 57 | :param bias: (float) Amount of bias. 58 | :param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied. 59 | :param batchnorm_enabled: (boolean) for enabling batch normalization. 60 | :param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2. 61 | :param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout 62 | :param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout) 63 | :return: The output tensor of the layer (N, H', W', C'). 64 | """ 65 | with tf.variable_scope(name) as scope: 66 | conv_o_b = __conv2d_p('conv', x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride, 67 | padding=padding, 68 | initializer=initializer, l2_strength=l2_strength, bias=bias) 69 | 70 | if batchnorm_enabled: 71 | conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5) 72 | if not activation: 73 | conv_a = conv_o_bn 74 | else: 75 | conv_a = activation(conv_o_bn) 76 | else: 77 | if not activation: 78 | conv_a = conv_o_b 79 | else: 80 | conv_a = activation(conv_o_b) 81 | 82 | def dropout_with_keep(): 83 | return tf.nn.dropout(conv_a, dropout_keep_prob) 84 | 85 | def dropout_no_keep(): 86 | return tf.nn.dropout(conv_a, 1.0) 87 | 88 | if dropout_keep_prob != -1: 89 | conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep) 90 | else: 91 | conv_o_dr = conv_a 92 | 93 | conv_o = conv_o_dr 94 | if max_pool_enabled: 95 | conv_o = max_pool_2d(conv_o_dr) 96 | 97 | return conv_o 98 | 99 | 100 | def grouped_conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1), 101 | initializer=tf.contrib.layers.xavier_initializer(), num_groups=1, l2_strength=0.0, bias=0.0, 102 | activation=None, batchnorm_enabled=False, dropout_keep_prob=-1, 103 | is_training=True): 104 | with tf.variable_scope(name) as scope: 105 | sz = x.get_shape()[3].value // num_groups 106 | conv_side_layers = [ 107 | conv2d(name + "_" + str(i), x[:, :, :, i * sz:i * sz + sz], w, num_filters // num_groups, kernel_size, 108 | padding, 109 | stride, 110 | initializer, 111 | l2_strength, bias, activation=None, 112 | batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=dropout_keep_prob, 113 | is_training=is_training) for i in 114 | range(num_groups)] 115 | conv_g = tf.concat(conv_side_layers, axis=-1) 116 | 117 | if batchnorm_enabled: 118 | conv_o_bn = tf.layers.batch_normalization(conv_g, training=is_training, epsilon=1e-5) 119 | if not activation: 120 | conv_a = conv_o_bn 121 | else: 122 | conv_a = activation(conv_o_bn) 123 | else: 124 | if not activation: 125 | conv_a = conv_g 126 | else: 127 | conv_a = activation(conv_g) 128 | 129 | return conv_a 130 | 131 | 132 | def __depthwise_conv2d_p(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1), 133 | initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0): 134 | with tf.variable_scope(name): 135 | stride = [1, stride[0], stride[1], 1] 136 | kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], 1] 137 | 138 | with tf.name_scope('layer_weights'): 139 | if w is None: 140 | w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength) 141 | __variable_summaries(w) 142 | with tf.name_scope('layer_biases'): 143 | if isinstance(bias, float): 144 | bias = tf.get_variable('biases', [x.shape[-1]], initializer=tf.constant_initializer(bias)) 145 | __variable_summaries(bias) 146 | with tf.name_scope('layer_conv2d'): 147 | conv = tf.nn.depthwise_conv2d(x, w, stride, padding) 148 | out = tf.nn.bias_add(conv, bias) 149 | 150 | return out 151 | 152 | 153 | def depthwise_conv2d(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1), 154 | initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0, activation=None, 155 | batchnorm_enabled=False, is_training=True): 156 | with tf.variable_scope(name) as scope: 157 | conv_o_b = __depthwise_conv2d_p(name='conv', x=x, w=w, kernel_size=kernel_size, padding=padding, 158 | stride=stride, initializer=initializer, l2_strength=l2_strength, bias=bias) 159 | 160 | if batchnorm_enabled: 161 | conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5) 162 | if not activation: 163 | conv_a = conv_o_bn 164 | else: 165 | conv_a = activation(conv_o_bn) 166 | else: 167 | if not activation: 168 | conv_a = conv_o_b 169 | else: 170 | conv_a = activation(conv_o_b) 171 | return conv_a 172 | 173 | 174 | ############################################################################################################ 175 | # ShuffleNet unit methods 176 | 177 | def shufflenet_unit(name, x, w=None, num_groups=1, group_conv_bottleneck=True, num_filters=16, stride=(1, 1), 178 | l2_strength=0.0, bias=0.0, batchnorm_enabled=True, is_training=True, fusion='add'): 179 | # Paper parameters. If you want to change them feel free to pass them as method parameters. 180 | activation = tf.nn.relu 181 | 182 | with tf.variable_scope(name) as scope: 183 | residual = x 184 | bottleneck_filters = (num_filters // 4) if fusion == 'add' else (num_filters - residual.get_shape()[ 185 | 3].value) // 4 186 | 187 | if group_conv_bottleneck: 188 | bottleneck = grouped_conv2d('Gbottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1), 189 | padding='VALID', 190 | num_groups=num_groups, l2_strength=l2_strength, bias=bias, 191 | activation=activation, 192 | batchnorm_enabled=batchnorm_enabled, is_training=is_training) 193 | shuffled = channel_shuffle('channel_shuffle', bottleneck, num_groups) 194 | else: 195 | bottleneck = conv2d('bottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1), 196 | padding='VALID', l2_strength=l2_strength, bias=bias, activation=activation, 197 | batchnorm_enabled=batchnorm_enabled, is_training=is_training) 198 | shuffled = bottleneck 199 | padded = tf.pad(shuffled, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT") 200 | depthwise = depthwise_conv2d('depthwise', x=padded, w=None, stride=stride, l2_strength=l2_strength, 201 | padding='VALID', bias=bias, 202 | activation=None, batchnorm_enabled=batchnorm_enabled, is_training=is_training) 203 | if stride == (2, 2): 204 | residual_pooled = avg_pool_2d(residual, size=(3, 3), stride=stride, padding='SAME') 205 | else: 206 | residual_pooled = residual 207 | 208 | if fusion == 'concat': 209 | group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None, 210 | num_filters=num_filters - residual.get_shape()[3].value, 211 | kernel_size=(1, 1), 212 | padding='VALID', 213 | num_groups=num_groups, l2_strength=l2_strength, bias=bias, 214 | activation=None, 215 | batchnorm_enabled=batchnorm_enabled, is_training=is_training) 216 | return activation(tf.concat([residual_pooled, group_conv1x1], axis=-1)) 217 | elif fusion == 'add': 218 | group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None, 219 | num_filters=num_filters, 220 | kernel_size=(1, 1), 221 | padding='VALID', 222 | num_groups=num_groups, l2_strength=l2_strength, bias=bias, 223 | activation=None, 224 | batchnorm_enabled=batchnorm_enabled, is_training=is_training) 225 | residual_match = residual_pooled 226 | # This is used if the number of filters of the residual block is different from that 227 | # of the group convolution. 228 | if num_filters != residual_pooled.get_shape()[3].value: 229 | residual_match = conv2d('residual_match', x=residual_pooled, w=None, num_filters=num_filters, 230 | kernel_size=(1, 1), 231 | padding='VALID', l2_strength=l2_strength, bias=bias, activation=None, 232 | batchnorm_enabled=batchnorm_enabled, is_training=is_training) 233 | return activation(group_conv1x1 + residual_match) 234 | else: 235 | raise ValueError("Specify whether the fusion is \'concat\' or \'add\'") 236 | 237 | 238 | def channel_shuffle(name, x, num_groups): 239 | with tf.variable_scope(name) as scope: 240 | n, h, w, c = x.shape.as_list() 241 | x_reshaped = tf.reshape(x, [-1, h, w, num_groups, c // num_groups]) 242 | x_transposed = tf.transpose(x_reshaped, [0, 1, 2, 4, 3]) 243 | output = tf.reshape(x_transposed, [-1, h, w, c]) 244 | return output 245 | 246 | 247 | ############################################################################################################ 248 | # Fully Connected layer Methods 249 | 250 | def __dense_p(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, 251 | bias=0.0): 252 | """ 253 | Fully connected layer 254 | :param name: (string) The name scope provided by the upper tf.name_scope('name') as scope. 255 | :param x: (tf.tensor) The input to the layer (N, D). 256 | :param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)] 257 | :param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended. 258 | :param l2_strength:(weight decay) (float) L2 regularization parameter. 259 | :param bias: (float) Amount of bias. (if not float, it means pretrained bias) 260 | :return out: The output of the layer. (N, H) 261 | """ 262 | n_in = x.get_shape()[-1].value 263 | with tf.variable_scope(name): 264 | if w == None: 265 | w = __variable_with_weight_decay([n_in, output_dim], initializer, l2_strength) 266 | __variable_summaries(w) 267 | if isinstance(bias, float): 268 | bias = tf.get_variable("layer_biases", [output_dim], tf.float32, tf.constant_initializer(bias)) 269 | __variable_summaries(bias) 270 | output = tf.nn.bias_add(tf.matmul(x, w), bias) 271 | return output 272 | 273 | 274 | def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, 275 | bias=0.0, 276 | activation=None, batchnorm_enabled=False, dropout_keep_prob=-1, 277 | is_training=True 278 | ): 279 | """ 280 | This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling). 281 | Note that: "is_training" should be passed by a correct value based on being in either training or testing. 282 | :param name: (string) The name scope provided by the upper tf.name_scope('name') as scope. 283 | :param x: (tf.tensor) The input to the layer (N, D). 284 | :param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)] 285 | :param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended. 286 | :param l2_strength:(weight decay) (float) L2 regularization parameter. 287 | :param bias: (float) Amount of bias. 288 | :param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied. 289 | :param batchnorm_enabled: (boolean) for enabling batch normalization. 290 | :param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout 291 | :param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout) 292 | :return out: The output of the layer. (N, H) 293 | """ 294 | with tf.variable_scope(name) as scope: 295 | dense_o_b = __dense_p(name='dense', x=x, w=w, output_dim=output_dim, initializer=initializer, 296 | l2_strength=l2_strength, 297 | bias=bias) 298 | 299 | if batchnorm_enabled: 300 | dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training, epsilon=1e-5) 301 | if not activation: 302 | dense_a = dense_o_bn 303 | else: 304 | dense_a = activation(dense_o_bn) 305 | else: 306 | if not activation: 307 | dense_a = dense_o_b 308 | else: 309 | dense_a = activation(dense_o_b) 310 | 311 | def dropout_with_keep(): 312 | return tf.nn.dropout(dense_a, dropout_keep_prob) 313 | 314 | def dropout_no_keep(): 315 | return tf.nn.dropout(dense_a, 1.0) 316 | 317 | if dropout_keep_prob != -1: 318 | dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep) 319 | else: 320 | dense_o_dr = dense_a 321 | 322 | dense_o = dense_o_dr 323 | return dense_o 324 | 325 | 326 | def flatten(x): 327 | """ 328 | Flatten a (N,H,W,C) input into (N,D) output. Used for fully connected layers after conolution layers 329 | :param x: (tf.tensor) representing input 330 | :return: flattened output 331 | """ 332 | all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]]) 333 | o = tf.reshape(x, [-1, all_dims_exc_first]) 334 | return o 335 | 336 | 337 | ############################################################################################################ 338 | # Pooling Methods 339 | 340 | def max_pool_2d(x, size=(2, 2), stride=(2, 2), name='pooling'): 341 | """ 342 | Max pooling 2D Wrapper 343 | :param x: (tf.tensor) The input to the layer (N,H,W,C). 344 | :param size: (tuple) This specifies the size of the filter as well as the stride. 345 | :param name: (string) Scope name. 346 | :return: The output is the same input but halfed in both width and height (N,H/2,W/2,C). 347 | """ 348 | size_x, size_y = size 349 | stride_x, stride_y = stride 350 | return tf.nn.max_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID', 351 | name=name) 352 | 353 | 354 | def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'): 355 | """ 356 | Average pooling 2D Wrapper 357 | :param x: (tf.tensor) The input to the layer (N,H,W,C). 358 | :param size: (tuple) This specifies the size of the filter as well as the stride. 359 | :param name: (string) Scope name. 360 | :return: The output is the same input but halfed in both width and height (N,H/2,W/2,C). 361 | """ 362 | size_x, size_y = size 363 | stride_x, stride_y = stride 364 | return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding, 365 | name=name) 366 | 367 | 368 | ############################################################################################################ 369 | # Utilities for layers 370 | 371 | def __variable_with_weight_decay(kernel_shape, initializer, wd): 372 | """ 373 | Create a variable with L2 Regularization (Weight Decay) 374 | :param kernel_shape: the size of the convolving weight kernel. 375 | :param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended. 376 | :param wd:(weight decay) L2 regularization parameter. 377 | :return: The weights of the kernel initialized. The L2 loss is added to the loss collection. 378 | """ 379 | w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer) 380 | 381 | collection_name = tf.GraphKeys.REGULARIZATION_LOSSES 382 | if wd and (not tf.get_variable_scope().reuse): 383 | weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss') 384 | tf.add_to_collection(collection_name, weight_decay) 385 | return w 386 | 387 | 388 | # Summaries for variables 389 | def __variable_summaries(var): 390 | """ 391 | Attach a lot of summaries to a Tensor (for TensorBoard visualization). 392 | :param var: variable to be summarized 393 | :return: None 394 | """ 395 | with tf.name_scope('summaries'): 396 | mean = tf.reduce_mean(var) 397 | tf.summary.scalar('mean', mean) 398 | with tf.name_scope('stddev'): 399 | stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) 400 | tf.summary.scalar('stddev', stddev) 401 | tf.summary.scalar('max', tf.reduce_max(var)) 402 | tf.summary.scalar('min', tf.reduce_min(var)) 403 | tf.summary.histogram('histogram', var) 404 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from utils import parse_args, create_experiment_dirs, calculate_flops, show_parameters 2 | from model import ShuffleNet 3 | from train import Train 4 | from data_loader import DataLoader 5 | from summarizer import Summarizer 6 | import tensorflow as tf 7 | 8 | 9 | def main(): 10 | # Parse the JSON arguments 11 | config_args = parse_args() 12 | 13 | # Create the experiment directories 14 | _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir) 15 | 16 | # Reset the default Tensorflow graph 17 | tf.reset_default_graph() 18 | 19 | # Tensorflow specific configuration 20 | config = tf.ConfigProto(allow_soft_placement=True) 21 | config.gpu_options.allow_growth = True 22 | sess = tf.Session(config=config) 23 | 24 | # Data loading 25 | # The batch size is equal to 1 when testing to simulate the real experiment. 26 | data_batch_size = config_args.batch_size if config_args.train_or_test == "train" else 1 27 | data = DataLoader(data_batch_size, config_args.shuffle) 28 | print("Loading Data...") 29 | config_args.img_height, config_args.img_width, config_args.num_channels, \ 30 | config_args.train_data_size, config_args.test_data_size = data.load_data() 31 | print("Data loaded\n\n") 32 | 33 | # Model creation 34 | print("Building the model...") 35 | model = ShuffleNet(config_args) 36 | print("Model is built successfully\n\n") 37 | 38 | # Parameters visualization 39 | show_parameters() 40 | 41 | # Summarizer creation 42 | summarizer = Summarizer(sess, config_args.summary_dir) 43 | # Train class 44 | trainer = Train(sess, model, data, summarizer) 45 | 46 | if config_args.train_or_test == 'train': 47 | try: 48 | # print("FLOPs for batch size = " + str(config_args.batch_size) + "\n") 49 | # calculate_flops() 50 | print("Training...") 51 | trainer.train() 52 | print("Training Finished\n\n") 53 | except KeyboardInterrupt: 54 | trainer.save_model() 55 | 56 | elif config_args.train_or_test == 'test': 57 | # print("FLOPs for single inference \n") 58 | # calculate_flops() 59 | # This can be 'val' or 'test' or even 'train' according to the needs. 60 | print("Testing...") 61 | trainer.test('val') 62 | print("Testing Finished\n\n") 63 | 64 | else: 65 | raise ValueError("Train or Test options only are allowed") 66 | 67 | 68 | if __name__ == '__main__': 69 | main() 70 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from layers import shufflenet_unit, conv2d, max_pool_2d, avg_pool_2d, dense, flatten 3 | 4 | 5 | class ShuffleNet: 6 | """ShuffleNet is implemented here!""" 7 | MEAN = [103.94, 116.78, 123.68] 8 | NORMALIZER = 0.017 9 | 10 | def __init__(self, args): 11 | self.args = args 12 | self.X = None 13 | self.y = None 14 | self.logits = None 15 | self.is_training = None 16 | self.loss = None 17 | self.regularization_loss = None 18 | self.cross_entropy_loss = None 19 | self.train_op = None 20 | self.accuracy = None 21 | self.y_out_argmax = None 22 | self.summaries_merged = None 23 | 24 | # A number stands for the num_groups 25 | # Output channels for conv1 layer 26 | self.output_channels = {'1': [144, 288, 576], '2': [200, 400, 800], '3': [240, 480, 960], '4': [272, 544, 1088], 27 | '8': [384, 768, 1536], 'conv1': 24} 28 | 29 | self.__build() 30 | 31 | def __init_input(self): 32 | batch_size = self.args.batch_size if self.args.train_or_test == 'train' else 1 33 | with tf.variable_scope('input'): 34 | # Input images 35 | self.X = tf.placeholder(tf.float32, 36 | [batch_size, self.args.img_height, self.args.img_width, 37 | self.args.num_channels]) 38 | # Classification supervision, it's an argmax. Feel free to change it to one-hot, 39 | # but don't forget to change the loss from sparse as well 40 | self.y = tf.placeholder(tf.int32, [batch_size]) 41 | # is_training is for batch normalization and dropout, if they exist 42 | self.is_training = tf.placeholder(tf.bool) 43 | 44 | def __resize(self, x): 45 | return tf.image.resize_bicubic(x, [224, 224]) 46 | 47 | def __stage(self, x, stage=2, repeat=3): 48 | if 2 <= stage <= 4: 49 | stage_layer = shufflenet_unit('stage' + str(stage) + '_0', x=x, w=None, 50 | num_groups=self.args.num_groups, 51 | group_conv_bottleneck=not (stage == 2), 52 | num_filters= 53 | self.output_channels[str(self.args.num_groups)][ 54 | stage - 2], 55 | stride=(2, 2), 56 | fusion='concat', l2_strength=self.args.l2_strength, 57 | bias=self.args.bias, 58 | batchnorm_enabled=self.args.batchnorm_enabled, 59 | is_training=self.is_training) 60 | for i in range(1, repeat + 1): 61 | stage_layer = shufflenet_unit('stage' + str(stage) + '_' + str(i), 62 | x=stage_layer, w=None, 63 | num_groups=self.args.num_groups, 64 | group_conv_bottleneck=True, 65 | num_filters=self.output_channels[ 66 | str(self.args.num_groups)][stage - 2], 67 | stride=(1, 1), 68 | fusion='add', 69 | l2_strength=self.args.l2_strength, 70 | bias=self.args.bias, 71 | batchnorm_enabled=self.args.batchnorm_enabled, 72 | is_training=self.is_training) 73 | return stage_layer 74 | else: 75 | raise ValueError("Stage should be from 2 -> 4") 76 | 77 | def __init_output(self): 78 | with tf.variable_scope('output'): 79 | # Losses 80 | self.regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) 81 | self.cross_entropy_loss = tf.reduce_mean( 82 | tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='loss')) 83 | self.loss = self.regularization_loss + self.cross_entropy_loss 84 | 85 | # Optimizer 86 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 87 | with tf.control_dependencies(update_ops): 88 | self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate) 89 | self.train_op = self.optimizer.minimize(self.loss) 90 | # This is for debugging NaNs. Check TensorFlow documentation. 91 | self.check_op = tf.add_check_numerics_ops() 92 | 93 | # Output and Metrics 94 | self.y_out_softmax = tf.nn.softmax(self.logits) 95 | self.y_out_argmax = tf.argmax(self.y_out_softmax, axis=-1, output_type=tf.int32) 96 | self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y, self.y_out_argmax), tf.float32)) 97 | 98 | with tf.name_scope('train-summary-per-iteration'): 99 | tf.summary.scalar('loss', self.loss) 100 | tf.summary.scalar('acc', self.accuracy) 101 | self.summaries_merged = tf.summary.merge_all() 102 | 103 | def __build(self): 104 | self.__init_global_epoch() 105 | self.__init_global_step() 106 | self.__init_input() 107 | 108 | with tf.name_scope('Preprocessing'): 109 | red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3) 110 | preprocessed_input = tf.concat([ 111 | tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER, 112 | tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER, 113 | tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER, 114 | ], 3) 115 | x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT") 116 | conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3), 117 | stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias, 118 | batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training, 119 | activation=tf.nn.relu, padding='VALID') 120 | padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT") 121 | max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool') 122 | stage2 = self.__stage(max_pool, stage=2, repeat=3) 123 | stage3 = self.__stage(stage2, stage=3, repeat=7) 124 | stage4 = self.__stage(stage3, stage=4, repeat=3) 125 | global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID') 126 | 127 | logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes, 128 | kernel_size=(1, 1), 129 | l2_strength=self.args.l2_strength, 130 | bias=self.args.bias, 131 | is_training=self.is_training) 132 | self.logits = flatten(logits_unflattened) 133 | 134 | self.__init_output() 135 | 136 | def __init_global_epoch(self): 137 | """ 138 | Create a global epoch tensor to totally save the process of the training 139 | :return: 140 | """ 141 | with tf.variable_scope('global_epoch'): 142 | self.global_epoch_tensor = tf.Variable(-1, trainable=False, name='global_epoch') 143 | self.global_epoch_input = tf.placeholder('int32', None, name='global_epoch_input') 144 | self.global_epoch_assign_op = self.global_epoch_tensor.assign(self.global_epoch_input) 145 | 146 | def __init_global_step(self): 147 | """ 148 | Create a global step variable to be a reference to the number of iterations 149 | :return: 150 | """ 151 | with tf.variable_scope('global_step'): 152 | self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step') 153 | self.global_step_input = tf.placeholder('int32', None, name='global_step_input') 154 | self.global_step_assign_op = self.global_step_tensor.assign(self.global_step_input) 155 | -------------------------------------------------------------------------------- /summarizer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class Summarizer: 5 | """The class responsible for Tensorboard summaries such as loss, and classification accuracy""" 6 | 7 | def __init__(self, sess, summary_dir): 8 | # Summaries 9 | self.sess = sess 10 | self.scalar_summary_tags = ['loss', 'acc', 'test-loss', 'test-acc'] 11 | self.summary_tags = [] 12 | self.summary_placeholders = {} 13 | self.summary_ops = {} 14 | self.summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph) 15 | self.__init_summaries() 16 | 17 | ############################################################################################################ 18 | # Summaries methods 19 | def __init_summaries(self): 20 | """ 21 | Create the summary part of the graph 22 | :return: 23 | """ 24 | with tf.variable_scope('train-summary-per-epoch'): 25 | for tag in self.scalar_summary_tags: 26 | self.summary_tags += tag 27 | self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag) 28 | self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag]) 29 | 30 | def add_summary(self, step, summaries_dict=None, summaries_merged=None): 31 | """ 32 | Add the summaries to tensorboard 33 | :param step: 34 | :param summaries_dict: 35 | :param summaries_merged: 36 | :return: 37 | """ 38 | if summaries_dict is not None: 39 | summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()], 40 | {self.summary_placeholders[tag]: value for tag, value in 41 | summaries_dict.items()}) 42 | for summary in summary_list: 43 | self.summary_writer.add_summary(summary, step) 44 | if summaries_merged is not None: 45 | self.summary_writer.add_summary(summaries_merged, step) 46 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tqdm import tqdm 3 | import numpy as np 4 | from utils import load_obj 5 | 6 | 7 | class Train: 8 | """Trainer class for the CNN. 9 | It's also responsible for loading/saving the model checkpoints from/to experiments/experiment_name/checkpoint_dir""" 10 | 11 | def __init__(self, sess, model, data, summarizer): 12 | self.sess = sess 13 | self.model = model 14 | self.args = self.model.args 15 | self.saver = tf.train.Saver(max_to_keep=self.args.max_to_keep, 16 | keep_checkpoint_every_n_hours=10, 17 | save_relative_paths=True) 18 | # Summarizer references 19 | self.data = data 20 | self.summarizer = summarizer 21 | 22 | # Initializing the model 23 | self.init = None 24 | self.__init_model() 25 | 26 | # Loading the model checkpoint if exists 27 | self.__load_imagenet_weights() 28 | self.__load_model() 29 | 30 | ############################################################################################################ 31 | # Model related methods 32 | def __init_model(self): 33 | print("Initializing the model...") 34 | self.init = tf.group(tf.global_variables_initializer()) 35 | self.sess.run(self.init) 36 | print("Model initialized\n\n") 37 | 38 | def save_model(self): 39 | """ 40 | Save Model Checkpoint 41 | :return: 42 | """ 43 | print("Saving a checkpoint") 44 | self.saver.save(self.sess, self.args.checkpoint_dir, self.model.global_step_tensor) 45 | print("Checkpoint Saved\n\n") 46 | 47 | def __load_model(self): 48 | latest_checkpoint = tf.train.latest_checkpoint(self.args.checkpoint_dir) 49 | if latest_checkpoint: 50 | print("Loading model checkpoint {} ...\n".format(latest_checkpoint)) 51 | self.saver.restore(self.sess, latest_checkpoint) 52 | print("Checkpoint loaded\n\n") 53 | else: 54 | print("First time to train!\n\n") 55 | 56 | def __load_imagenet_weights(self): 57 | variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 58 | try: 59 | print("Loading ImageNet pretrained weights...") 60 | dict = load_obj(self.args.pretrained_path) 61 | run_list = [] 62 | for variable in variables: 63 | for key, value in dict.items(): 64 | # Adding ':' means that we are interested in the variable itself and not the variable parameters 65 | # that are used in adaptive optimizers 66 | if key + ":" in variable.name: 67 | run_list.append(tf.assign(variable, value)) 68 | self.sess.run(run_list) 69 | print("Weights loaded\n\n") 70 | except KeyboardInterrupt: 71 | print("No pretrained ImageNet weights exist. Skipping...\n\n") 72 | 73 | ############################################################################################################ 74 | # Train and Test methods 75 | def train(self): 76 | for cur_epoch in range(self.model.global_epoch_tensor.eval(self.sess) + 1, self.args.num_epochs + 1, 1): 77 | 78 | # Initialize tqdm 79 | num_iterations = self.args.train_data_size // self.args.batch_size 80 | tqdm_batch = tqdm(self.data.generate_batch(type='train'), total=num_iterations, 81 | desc="Epoch-" + str(cur_epoch) + "-") 82 | 83 | # Initialize the current iterations 84 | cur_iteration = 0 85 | 86 | # Initialize classification accuracy and loss lists 87 | loss_list = [] 88 | acc_list = [] 89 | 90 | # Loop by the number of iterations 91 | for X_batch, y_batch in tqdm_batch: 92 | # Get the current iteration for summarizing it 93 | cur_step = self.model.global_step_tensor.eval(self.sess) 94 | 95 | # Feed this variables to the network 96 | feed_dict = {self.model.X: X_batch, 97 | self.model.y: y_batch, 98 | self.model.is_training: True 99 | } 100 | # Run the feed_forward 101 | _, loss, acc, summaries_merged = self.sess.run( 102 | [self.model.train_op, self.model.loss, self.model.accuracy, self.model.summaries_merged], 103 | feed_dict=feed_dict) 104 | # Append loss and accuracy 105 | loss_list += [loss] 106 | acc_list += [acc] 107 | 108 | # Update the Global step 109 | self.model.global_step_assign_op.eval(session=self.sess, 110 | feed_dict={self.model.global_step_input: cur_step + 1}) 111 | 112 | self.summarizer.add_summary(cur_step, summaries_merged=summaries_merged) 113 | 114 | if cur_iteration >= num_iterations - 1: 115 | avg_loss = np.mean(loss_list) 116 | avg_acc = np.mean(acc_list) 117 | # summarize 118 | summaries_dict = dict() 119 | summaries_dict['loss'] = avg_loss 120 | summaries_dict['acc'] = avg_acc 121 | 122 | # summarize 123 | self.summarizer.add_summary(cur_step, summaries_dict=summaries_dict) 124 | 125 | # Update the Current Epoch tensor 126 | self.model.global_epoch_assign_op.eval(session=self.sess, 127 | feed_dict={self.model.global_epoch_input: cur_epoch + 1}) 128 | 129 | # Print in console 130 | tqdm_batch.close() 131 | print("Epoch-" + str(cur_epoch) + " | " + "loss: " + str(avg_loss) + " -" + " acc: " + str( 132 | avg_acc)[ 133 | :7]) 134 | # Break the loop to finalize this epoch 135 | break 136 | 137 | # Update the current iteration 138 | cur_iteration += 1 139 | 140 | # Save the current checkpoint 141 | if cur_epoch % self.args.save_model_every == 0 and cur_epoch != 0: 142 | self.save_model() 143 | 144 | # Test the model on validation or test data 145 | if cur_epoch % self.args.test_every == 0: 146 | self.test('val') 147 | 148 | def test(self, test_type='val'): 149 | num_iterations = self.args.test_data_size // self.args.batch_size 150 | tqdm_batch = tqdm(self.data.generate_batch(type=test_type), total=num_iterations, 151 | desc='Testing') 152 | # Initialize classification accuracy and loss lists 153 | loss_list = [] 154 | acc_list = [] 155 | cur_iteration = 0 156 | 157 | for X_batch, y_batch in tqdm_batch: 158 | # Feed this variables to the network 159 | feed_dict = {self.model.X: X_batch, 160 | self.model.y: y_batch, 161 | self.model.is_training: False 162 | } 163 | # Run the feed_forward 164 | loss, acc = self.sess.run( 165 | [self.model.loss, self.model.accuracy], 166 | feed_dict=feed_dict) 167 | 168 | # Append loss and accuracy 169 | loss_list += [loss] 170 | acc_list += [acc] 171 | 172 | if cur_iteration >= num_iterations - 1: 173 | avg_loss = np.mean(loss_list) 174 | avg_acc = np.mean(acc_list) 175 | print('Test results | test_loss: ' + str(avg_loss) + ' - test_acc: ' + str(avg_acc)[:7]) 176 | break 177 | 178 | cur_iteration += 1 179 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import json 3 | import argparse 4 | import os 5 | import tensorflow as tf 6 | from pprint import pprint 7 | import sys 8 | 9 | 10 | def parse_args(): 11 | """ 12 | Parse the arguments of the program 13 | :return: (config_args) 14 | :rtype: tuple 15 | """ 16 | # Create a parser 17 | parser = argparse.ArgumentParser(description="ShuffleNet TensorFlow Implementation") 18 | parser.add_argument('--version', action='version', version='%(prog)s 1.0.0') 19 | parser.add_argument('--config', default=None, type=str, help='Configuration file') 20 | 21 | # Parse the arguments 22 | args = parser.parse_args() 23 | 24 | # Parse the configurations from the config json file provided 25 | try: 26 | if args.config is not None: 27 | with open(args.config, 'r') as config_file: 28 | config_args_dict = json.load(config_file) 29 | else: 30 | print("Add a config file using \'--config file_name.json\'", file=sys.stderr) 31 | exit(1) 32 | 33 | except FileNotFoundError: 34 | print("ERROR: Config file not found: {}".format(args.config), file=sys.stderr) 35 | exit(1) 36 | except json.decoder.JSONDecodeError: 37 | print("ERROR: Config file is not a proper JSON file!", file=sys.stderr) 38 | exit(1) 39 | 40 | config_args = edict(config_args_dict) 41 | 42 | pprint(config_args) 43 | print("\n") 44 | 45 | return config_args 46 | 47 | 48 | def create_experiment_dirs(exp_dir): 49 | """ 50 | Create Directories of a regular tensorflow experiment directory 51 | :param exp_dir: 52 | :return summary_dir, checkpoint_dir: 53 | """ 54 | experiment_dir = os.path.realpath(os.path.join(os.path.dirname(__file__))) + "/experiments/" + exp_dir + "/" 55 | summary_dir = experiment_dir + 'summaries/' 56 | checkpoint_dir = experiment_dir + 'checkpoints/' 57 | # output_dir = experiment_dir + 'output/' 58 | # test_dir = experiment_dir + 'test/' 59 | # dirs = [summary_dir, checkpoint_dir, output_dir, test_dir] 60 | dirs = [summary_dir, checkpoint_dir] 61 | try: 62 | for dir_ in dirs: 63 | if not os.path.exists(dir_): 64 | os.makedirs(dir_) 65 | print("Experiment directories created!") 66 | # return experiment_dir, summary_dir, checkpoint_dir, output_dir, test_dir 67 | return experiment_dir, summary_dir, checkpoint_dir 68 | except Exception as err: 69 | print("Creating directories error: {0}".format(err)) 70 | exit(-1) 71 | 72 | 73 | def calculate_flops(): 74 | # Print to stdout an analysis of the number of floating point operations in the 75 | # model broken down by individual operations. 76 | tf.profiler.profile( 77 | tf.get_default_graph(), 78 | options=tf.profiler.ProfileOptionBuilder.float_operation(), cmd='scope') 79 | 80 | 81 | def show_parameters(): 82 | tf.profiler.profile( 83 | tf.get_default_graph(), 84 | options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope') 85 | 86 | 87 | def load_obj(filename): 88 | import pickle 89 | with open(filename, 'rb') as file: 90 | return pickle.load(file) 91 | --------------------------------------------------------------------------------