├── .gitignore ├── LICENSE ├── README.md ├── figs └── ablation.png ├── ghostnet.py └── pretrained └── ghostnet_1x-f97d70db.pth /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Duo LI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyTorch Implementation of GhostNet 2 | Reproduction of GhostNet architecture as described in [GhostNet: More Features from Cheap Operations](https://arxiv.org/abs/1911.11907) by Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu on ILSVRC2012 benchmark with [PyTorch](pytorch.org) framework. 3 | 4 | # Pretrained Models 5 | | Architecture | # Parameters | MFLOPs | Top-1 / Top-5 Accuracy (%) | 6 | | ----------------- | ------------ | ------ | -------------------------- | 7 | | [GhostNet 1.0x](https://github.com/d-li14/ghostnet.pytorch/blob/master/pretrained/ghostnet_1x-f97d70db.pth) | 5.181M | 140.77 | 73.636 / 91.228 | 8 | 9 | ```python 10 | from ghostnet import ghostnet 11 | 12 | net = ghostnet() 13 | net.load_state_dict(torch.load('pretrained/ghostnet_1x-9c40f966.pth')) 14 | ``` 15 | 16 | # Training Strategy 17 | * *batch size* 1024 on 8 GPUs 18 | * *Initial learning rate* 0.4 19 | * *weight decay* 0.00004 20 | * *dropout rate* 0.2 21 | * **no** weight decay on BN 22 | 23 | We keep the above settings as the same and conduct experiments with different training techniques below for ablation and reproduction. During the warmup phase, learning rate linearly ramps up from 0.1 to 0.4. 24 | 25 | | epoch | LR annealing | warmup | label smooth | Top-1 / Top-5 Accuracy (%) | 26 | | :---: | :----------: | :----: | :----------: | :------------------------: | 27 | | 240 | linear | × | × | 72.318 / 90.670 | 28 | | 360 | linear | × | × | 72.458 / 90.780 | 29 | | 240 | cosine | √ | × | 72.772 / 90.902 | 30 | | 240 | cosine | √ | √ | 73.636 / 91.228 | 31 | 32 | 33 | 34 | # Citation 35 | ``` 36 | @inproceedings{Han_2020_CVPR, 37 | title={GhostNet: More Features from Cheap Operations}, 38 | author={Han, Kai and Wang, Yunhe and Tian, Qi and Guo, Jianyuan and Xu, Chunjing and Xu, Chang}, 39 | booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, 40 | month = {June}, 41 | year={2020} 42 | } 43 | ``` 44 | -------------------------------------------------------------------------------- /figs/ablation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/d-li14/ghostnet.pytorch/83274b7d33580b07d6629415570e894faada7b64/figs/ablation.png -------------------------------------------------------------------------------- /ghostnet.py: -------------------------------------------------------------------------------- 1 | """ 2 | Creates a GhostNet Model as defined in: 3 | GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu. 4 | https://arxiv.org/abs/1911.11907 5 | Modified from https://github.com/d-li14/mobilenetv3.pytorch 6 | """ 7 | import torch 8 | import torch.nn as nn 9 | import math 10 | 11 | 12 | __all__ = ['ghostnet'] 13 | 14 | 15 | def _make_divisible(v, divisor, min_value=None): 16 | """ 17 | This function is taken from the original tf repo. 18 | It ensures that all layers have a channel number that is divisible by 8 19 | It can be seen here: 20 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py 21 | """ 22 | if min_value is None: 23 | min_value = divisor 24 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 25 | # Make sure that round down does not go down by more than 10%. 26 | if new_v < 0.9 * v: 27 | new_v += divisor 28 | return new_v 29 | 30 | 31 | class SELayer(nn.Module): 32 | def __init__(self, channel, reduction=4): 33 | super(SELayer, self).__init__() 34 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 35 | self.fc = nn.Sequential( 36 | nn.Linear(channel, channel // reduction), 37 | nn.ReLU(inplace=True), 38 | nn.Linear(channel // reduction, channel), ) 39 | 40 | def forward(self, x): 41 | b, c, _, _ = x.size() 42 | y = self.avg_pool(x).view(b, c) 43 | y = self.fc(y).view(b, c, 1, 1) 44 | y = torch.clamp(y, 0, 1) 45 | return x * y 46 | 47 | 48 | def depthwise_conv(inp, oup, kernel_size=3, stride=1, relu=False): 49 | return nn.Sequential( 50 | nn.Conv2d(inp, oup, kernel_size, stride, kernel_size//2, groups=inp, bias=False), 51 | nn.BatchNorm2d(oup), 52 | nn.ReLU(inplace=True) if relu else nn.Sequential(), 53 | ) 54 | 55 | class GhostModule(nn.Module): 56 | def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): 57 | super(GhostModule, self).__init__() 58 | self.oup = oup 59 | init_channels = math.ceil(oup / ratio) 60 | new_channels = init_channels*(ratio-1) 61 | 62 | self.primary_conv = nn.Sequential( 63 | nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), 64 | nn.BatchNorm2d(init_channels), 65 | nn.ReLU(inplace=True) if relu else nn.Sequential(), 66 | ) 67 | 68 | self.cheap_operation = nn.Sequential( 69 | nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), 70 | nn.BatchNorm2d(new_channels), 71 | nn.ReLU(inplace=True) if relu else nn.Sequential(), 72 | ) 73 | 74 | def forward(self, x): 75 | x1 = self.primary_conv(x) 76 | x2 = self.cheap_operation(x1) 77 | out = torch.cat([x1,x2], dim=1) 78 | return out[:,:self.oup,:,:] 79 | 80 | 81 | class GhostBottleneck(nn.Module): 82 | def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se): 83 | super(GhostBottleneck, self).__init__() 84 | assert stride in [1, 2] 85 | 86 | self.conv = nn.Sequential( 87 | # pw 88 | GhostModule(inp, hidden_dim, kernel_size=1, relu=True), 89 | # dw 90 | depthwise_conv(hidden_dim, hidden_dim, kernel_size, stride, relu=False) if stride==2 else nn.Sequential(), 91 | # Squeeze-and-Excite 92 | SELayer(hidden_dim) if use_se else nn.Sequential(), 93 | # pw-linear 94 | GhostModule(hidden_dim, oup, kernel_size=1, relu=False), 95 | ) 96 | 97 | if stride == 1 and inp == oup: 98 | self.shortcut = nn.Sequential() 99 | else: 100 | self.shortcut = nn.Sequential( 101 | depthwise_conv(inp, inp, 3, stride, relu=True), 102 | nn.Conv2d(inp, oup, 1, 1, 0, bias=False), 103 | nn.BatchNorm2d(oup), 104 | ) 105 | 106 | def forward(self, x): 107 | return self.conv(x) + self.shortcut(x) 108 | 109 | 110 | class GhostNet(nn.Module): 111 | def __init__(self, cfgs, num_classes=1000, width_mult=1.): 112 | super(GhostNet, self).__init__() 113 | # setting of inverted residual blocks 114 | self.cfgs = cfgs 115 | 116 | # building first layer 117 | output_channel = _make_divisible(16 * width_mult, 4) 118 | layers = [nn.Sequential( 119 | nn.Conv2d(3, output_channel, 3, 2, 1, bias=False), 120 | nn.BatchNorm2d(output_channel), 121 | nn.ReLU(inplace=True) 122 | )] 123 | input_channel = output_channel 124 | 125 | # building inverted residual blocks 126 | block = GhostBottleneck 127 | for k, exp_size, c, use_se, s in self.cfgs: 128 | output_channel = _make_divisible(c * width_mult, 4) 129 | hidden_channel = _make_divisible(exp_size * width_mult, 4) 130 | layers.append(block(input_channel, hidden_channel, output_channel, k, s, use_se)) 131 | input_channel = output_channel 132 | self.features = nn.Sequential(*layers) 133 | 134 | # building last several layers 135 | output_channel = _make_divisible(exp_size * width_mult, 4) 136 | self.squeeze = nn.Sequential( 137 | nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False), 138 | nn.BatchNorm2d(output_channel), 139 | nn.ReLU(inplace=True), 140 | nn.AdaptiveAvgPool2d((1, 1)), 141 | ) 142 | input_channel = output_channel 143 | 144 | output_channel = 1280 145 | self.classifier = nn.Sequential( 146 | nn.Linear(input_channel, output_channel, bias=False), 147 | nn.BatchNorm1d(output_channel), 148 | nn.ReLU(inplace=True), 149 | nn.Dropout(0.2), 150 | nn.Linear(output_channel, num_classes), 151 | ) 152 | 153 | self._initialize_weights() 154 | 155 | def forward(self, x): 156 | x = self.features(x) 157 | x = self.squeeze(x) 158 | x = x.view(x.size(0), -1) 159 | x = self.classifier(x) 160 | return x 161 | 162 | def _initialize_weights(self): 163 | for m in self.modules(): 164 | if isinstance(m, nn.Conv2d): 165 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 166 | elif isinstance(m, nn.BatchNorm2d): 167 | m.weight.data.fill_(1) 168 | m.bias.data.zero_() 169 | 170 | 171 | def ghostnet(**kwargs): 172 | """ 173 | Constructs a GhostNet model 174 | """ 175 | cfgs = [ 176 | # k, t, c, SE, s 177 | [3, 16, 16, 0, 1], 178 | [3, 48, 24, 0, 2], 179 | [3, 72, 24, 0, 1], 180 | [5, 72, 40, 1, 2], 181 | [5, 120, 40, 1, 1], 182 | [3, 240, 80, 0, 2], 183 | [3, 200, 80, 0, 1], 184 | [3, 184, 80, 0, 1], 185 | [3, 184, 80, 0, 1], 186 | [3, 480, 112, 1, 1], 187 | [3, 672, 112, 1, 1], 188 | [5, 672, 160, 1, 2], 189 | [5, 960, 160, 0, 1], 190 | [5, 960, 160, 1, 1], 191 | [5, 960, 160, 0, 1], 192 | [5, 960, 160, 1, 1] 193 | ] 194 | return GhostNet(cfgs, **kwargs) 195 | -------------------------------------------------------------------------------- /pretrained/ghostnet_1x-f97d70db.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/d-li14/ghostnet.pytorch/83274b7d33580b07d6629415570e894faada7b64/pretrained/ghostnet_1x-f97d70db.pth --------------------------------------------------------------------------------