├── yolox_backbone ├── utils │ ├── __init__.py │ ├── utils.py │ └── torch_utils.py ├── __init__.py └── models │ ├── __init__.py │ ├── yolo_fpn.py │ ├── backbone.py │ ├── yolo_pafpn.py │ ├── darknet.py │ └── network_blocks.py ├── figures └── out_features.png ├── requirements.txt ├── setup.py ├── example.py ├── .gitignore ├── README.md └── LICENSE /yolox_backbone/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .torch_utils import * 2 | from .utils import * -------------------------------------------------------------------------------- /yolox_backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from .models.backbone import create_model, list_models 2 | __version__= "0.0.1.9" -------------------------------------------------------------------------------- /figures/out_features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/developer0hye/YOLOX-Backbone/HEAD/figures/out_features.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # TODO: Update with exact module version 2 | numpy 3 | torch>=1.7 4 | torchvision 5 | yolox 6 | -------------------------------------------------------------------------------- /yolox_backbone/utils/utils.py: -------------------------------------------------------------------------------- 1 | from urllib import request 2 | 3 | def download_from_url(url, filename): 4 | request.urlretrieve(url, filename) -------------------------------------------------------------------------------- /yolox_backbone/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbone import * 2 | from .darknet import * 3 | from .network_blocks import * 4 | from .yolo_fpn import * 5 | from .yolo_pafpn import * -------------------------------------------------------------------------------- /yolox_backbone/utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | def intersect_dicts(da, db, exclude=()): 2 | # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values 3 | return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | import re 3 | 4 | with open("yolox_backbone/__init__.py", "r") as f: 5 | version = re.search( 6 | r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', 7 | f.read(), re.MULTILINE 8 | ).group(1) 9 | 10 | with open("README.md", "r", encoding="utf-8") as f: 11 | long_description = f.read() 12 | 13 | setuptools.setup( 14 | name="yolox_backbone", 15 | version=version, 16 | license='Apache', 17 | python_requires=">=3.6", 18 | author="Yonghye Kwon", 19 | author_email="developer.0hye@gmail.com", 20 | description="yolox_backbone is a deep-learning library and is a collection of YOLOX Backbone models.", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | url="https://github.com/developer0hye/YOLOX-Backbone", 24 | packages=setuptools.find_packages(), 25 | zip_safe=False, 26 | keywords=['yolox'], 27 | classifiers=[ 28 | "Programming Language :: Python :: 3", 29 | "License :: OSI Approved :: Apache Software License", 30 | "Operating System :: OS Independent" 31 | ], 32 | ) 33 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | import yolox_backbone 2 | import torch 3 | from pprint import pprint 4 | 5 | pprint(yolox_backbone.list_models()) 6 | 7 | model_names = yolox_backbone.list_models() 8 | for model_name in model_names: 9 | print("model_name: ", model_name) 10 | 11 | model = yolox_backbone.create_model(model_name=model_name, 12 | pretrained=True, 13 | out_features=["C3", "C4", "C5"] 14 | ) 15 | model.eval() 16 | 17 | input_tensor = torch.randn((1, 3, 640, 640)) 18 | output_tensor = model(input_tensor) 19 | 20 | c3 = output_tensor["C3"] 21 | c4 = output_tensor["C4"] 22 | c5 = output_tensor["C5"] 23 | 24 | p3 = output_tensor["P3"] 25 | p4 = output_tensor["P4"] 26 | p5 = output_tensor["P5"] 27 | 28 | print("input_tensor.shape: ", input_tensor.shape) 29 | print("c3.shape: ", c3.shape) 30 | print("c4.shape: ", c4.shape) 31 | print("c5.shape: ", c5.shape) 32 | print("p3.shape: ", p3.shape) 33 | print("p4.shape: ", p4.shape) 34 | print("p5.shape: ", p5.shape) 35 | print("-" * 50) 36 | -------------------------------------------------------------------------------- /yolox_backbone/models/yolo_fpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | from .darknet import Darknet 9 | from .network_blocks import BaseConv 10 | 11 | 12 | class YOLOFPN(nn.Module): 13 | """ 14 | YOLOFPN module. Darknet 53 is the default backbone of this model. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | input_tensor_channels=3, 20 | depth=53, 21 | in_features=["C3", "C4", "C5"], 22 | out_features=["P3", "P4", "P5"], 23 | ): 24 | super().__init__() 25 | 26 | self.backbone = Darknet(depth, in_channels=input_tensor_channels) 27 | self.in_features = in_features 28 | self.out_features = out_features 29 | self.scaling_factor = {"depth": 1.0, "width": 1.0} 30 | self.out_channels = {"P3": 128, "P4": 256, "P5": 512} 31 | 32 | # out 1 33 | self.out1_cbl = self._make_cbl(512, 256, 1) 34 | self.out1 = self._make_embedding([256, 512], 512 + 256) 35 | 36 | # out 2 37 | self.out2_cbl = self._make_cbl(256, 128, 1) 38 | self.out2 = self._make_embedding([128, 256], 256 + 128) 39 | 40 | # upsample 41 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest") 42 | 43 | self.p3_exists = "P3" in self.out_features 44 | self.p4_exists = "P4" in self.out_features 45 | self.p5_exists = "P5" in self.out_features 46 | 47 | def _make_cbl(self, _in, _out, ks): 48 | return BaseConv(_in, _out, ks, stride=1, act="lrelu") 49 | 50 | def _make_embedding(self, filters_list, in_filters): 51 | m = nn.Sequential( 52 | *[ 53 | self._make_cbl(in_filters, filters_list[0], 1), 54 | self._make_cbl(filters_list[0], filters_list[1], 3), 55 | self._make_cbl(filters_list[1], filters_list[0], 1), 56 | self._make_cbl(filters_list[0], filters_list[1], 3), 57 | self._make_cbl(filters_list[1], filters_list[0], 1), 58 | ] 59 | ) 60 | return m 61 | 62 | def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"): 63 | with open(filename, "rb") as f: 64 | state_dict = torch.load(f, map_location="cpu") 65 | print("loading pretrained weights...") 66 | self.backbone.load_state_dict(state_dict) 67 | 68 | def forward(self, inputs): 69 | """ 70 | Args: 71 | inputs (Tensor): input image. 72 | Returns: 73 | Tuple[Tensor]: FPN output features.. 74 | """ 75 | # backbone 76 | out_features = self.backbone(inputs) 77 | x2, x1, x0 = [out_features[f] for f in self.in_features] 78 | 79 | if self.p3_exists or self.p4_exists or self.p5_exists: 80 | x2 = out_features["C3"] 81 | x1 = out_features["C4"] 82 | x0 = out_features["C5"] 83 | 84 | out_features["P5"] = x0 85 | 86 | if self.p3_exists or self.p4_exists: 87 | # yolo branch 1 88 | x1_in = self.out1_cbl(x0) 89 | x1_in = self.upsample(x1_in) 90 | x1_in = torch.cat([x1_in, x1], 1) 91 | out_dark4 = self.out1(x1_in) 92 | out_features["P4"] = out_dark4 93 | 94 | if self.p3_exists: 95 | # yolo branch 2 96 | x2_in = self.out2_cbl(out_dark4) 97 | x2_in = self.upsample(x2_in) 98 | x2_in = torch.cat([x2_in, x2], 1) 99 | out_dark3 = self.out2(x2_in) 100 | out_features["P3"] = out_dark3 101 | 102 | return {k:v for k, v in out_features.items() if k in self.out_features} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Linux ### 2 | *~ 3 | 4 | # user experiments directory 5 | YOLOX_outputs/ 6 | datasets/ 7 | 8 | # temporary files which can be created if a process still has a handle open of a deleted file 9 | .fuse_hidden* 10 | 11 | # KDE directory preferences 12 | .directory 13 | 14 | # Linux trash folder which might appear on any partition or disk 15 | .Trash-* 16 | 17 | # .nfs files are created when an open file is removed but is still being accessed 18 | .nfs* 19 | 20 | ### PyCharm ### 21 | # User-specific stuff 22 | .idea 23 | 24 | # CMake 25 | cmake-build-*/ 26 | 27 | # Mongo Explorer plugin 28 | .idea/**/mongoSettings.xml 29 | 30 | # File-based project format 31 | *.iws 32 | 33 | # IntelliJ 34 | out/ 35 | 36 | # mpeltonen/sbt-idea plugin 37 | .idea_modules/ 38 | 39 | # JIRA plugin 40 | atlassian-ide-plugin.xml 41 | 42 | # Cursive Clojure plugin 43 | .idea/replstate.xml 44 | 45 | # Crashlytics plugin (for Android Studio and IntelliJ) 46 | com_crashlytics_export_strings.xml 47 | crashlytics.properties 48 | crashlytics-build.properties 49 | fabric.properties 50 | 51 | # Editor-based Rest Client 52 | .idea/httpRequests 53 | 54 | # Android studio 3.1+ serialized cache file 55 | .idea/caches/build_file_checksums.ser 56 | 57 | # JetBrains templates 58 | **___jb_tmp___ 59 | 60 | ### Python ### 61 | # Byte-compiled / optimized / DLL files 62 | __pycache__/ 63 | *.py[cod] 64 | *$py.class 65 | 66 | # C extensions 67 | *.so 68 | 69 | # Distribution / packaging 70 | .Python 71 | build/ 72 | develop-eggs/ 73 | dist/ 74 | downloads/ 75 | eggs/ 76 | .eggs/ 77 | lib/ 78 | lib64/ 79 | parts/ 80 | sdist/ 81 | var/ 82 | wheels/ 83 | pip-wheel-metadata/ 84 | share/python-wheels/ 85 | *.egg-info/ 86 | .installed.cfg 87 | *.egg 88 | MANIFEST 89 | 90 | # PyInstaller 91 | # Usually these files are written by a python script from a template 92 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 93 | *.manifest 94 | *.spec 95 | 96 | # Installer logs 97 | pip-log.txt 98 | pip-delete-this-directory.txt 99 | 100 | # Unit test / coverage reports 101 | htmlcov/ 102 | .tox/ 103 | .nox/ 104 | .coverage 105 | .coverage.* 106 | .cache 107 | nosetests.xml 108 | coverage.xml 109 | *.cover 110 | .hypothesis/ 111 | .pytest_cache/ 112 | 113 | # Translations 114 | *.mo 115 | *.pot 116 | 117 | # Django stuff: 118 | *.log 119 | local_settings.py 120 | db.sqlite3 121 | 122 | # Flask stuff: 123 | instance/ 124 | .webassets-cache 125 | 126 | # Scrapy stuff: 127 | .scrapy 128 | 129 | # Sphinx documentation 130 | docs/_build/ 131 | docs/build/ 132 | 133 | # PyBuilder 134 | target/ 135 | 136 | # Jupyter Notebook 137 | .ipynb_checkpoints 138 | 139 | # IPython 140 | profile_default/ 141 | ipython_config.py 142 | 143 | # pyenv 144 | .python-version 145 | 146 | # pipenv 147 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 148 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 149 | # having no cross-platform support, pipenv may install dependencies that don’t work, or not 150 | # install all needed dependencies. 151 | #Pipfile.lock 152 | 153 | # celery beat schedule file 154 | celerybeat-schedule 155 | 156 | # SageMath parsed files 157 | *.sage.py 158 | 159 | # Environments 160 | .env 161 | .venv 162 | env/ 163 | venv/ 164 | ENV/ 165 | env.bak/ 166 | venv.bak/ 167 | 168 | # Spyder project settings 169 | .spyderproject 170 | .spyproject 171 | 172 | # Rope project settings 173 | .ropeproject 174 | 175 | # mkdocs documentation 176 | /site 177 | 178 | # mypy 179 | .mypy_cache/ 180 | .dmypy.json 181 | dmypy.json 182 | 183 | # Pyre type checker 184 | .pyre/ 185 | 186 | ### Vim ### 187 | # Swap 188 | [._]*.s[a-v][a-z] 189 | [._]*.sw[a-p] 190 | [._]s[a-rt-v][a-z] 191 | [._]ss[a-gi-z] 192 | [._]sw[a-p] 193 | 194 | # Session 195 | Session.vim 196 | 197 | # Temporary 198 | .netrwhist 199 | # Auto-generated tag files 200 | tags 201 | # Persistent undo 202 | [._]*.un~ 203 | 204 | # output 205 | docs/api 206 | .code-workspace.code-workspace 207 | *.pkl 208 | *.npy 209 | *.pth 210 | *.onnx 211 | *.engine 212 | events.out.tfevents* 213 | 214 | # vscode 215 | *.code-workspace 216 | .vscode 217 | 218 | # vim 219 | .vim 220 | -------------------------------------------------------------------------------- /yolox_backbone/models/backbone.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .yolo_pafpn import YOLOPAFPN 6 | from .yolo_fpn import YOLOFPN 7 | from ..utils.utils import download_from_url 8 | from ..utils.torch_utils import intersect_dicts 9 | 10 | import torch 11 | import torch.nn as nn 12 | import os 13 | 14 | model_dict = {"yolox-s": {"depth": 0.33, "width": 0.50, "depthwise": False}, 15 | "yolox-m": {"depth": 0.67, "width": 0.75, "depthwise": False}, 16 | "yolox-l": {"depth": 1.0, "width": 1.0, "depthwise": False}, 17 | "yolox-x": {"depth": 1.33, "width": 1.25, "depthwise": False}, 18 | "yolox-nano": {"depth": 0.33, "width": 0.25, "depthwise": True}, 19 | "yolox-tiny": {"depth": 0.33, "width": 0.375, "depthwise": False}, 20 | "yolox-darknet53": {"depth": 53} 21 | } 22 | 23 | model_urls = {"yolox-s": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.pth", 24 | "yolox-m": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m.pth", 25 | "yolox-l": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l.pth", 26 | "yolox-x": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.pth", 27 | "yolox-nano": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano.pth", 28 | "yolox-tiny": "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny.pth", 29 | "yolox-darknet53" : "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_darknet.pth" 30 | } 31 | 32 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 33 | 34 | def create_model(model_name, 35 | pretrained=False, 36 | input_tensor_channels=3, 37 | out_features=["P3", "P4", "P5"]): 38 | model_name = model_name.lower() 39 | if not model_name in model_dict.keys(): 40 | raise RuntimeError(f"Unknown model {model_name}") 41 | 42 | out_features = list(set(out_features)) 43 | if not all(out_feature in ["C3", "C4", "C5", "P3", "P4", "P5"] for out_feature in out_features): 44 | raise RuntimeError(f'The values in out_features must be one of ["C3", "C4", "C5", "P3", "P4", "P5"].') 45 | 46 | Backbone = YOLOFPN if model_name == "yolox-darknet53" else YOLOPAFPN 47 | 48 | model = Backbone(**model_dict[model_name], 49 | input_tensor_channels=input_tensor_channels, 50 | out_features=out_features) 51 | 52 | if pretrained: 53 | 54 | assert input_tensor_channels == 3, f"There are no pretrained weights for the model whose the number of input tensor's channel is {input_tensor_channels}" 55 | 56 | filename = os.path.join(BASE_DIR, model_name + ".pth") 57 | if not os.path.isfile(filename): 58 | download_from_url(url=model_urls[model_name], filename=filename) 59 | 60 | assert os.path.isfile(filename), f"{model_name} weights file doesn't exist" 61 | 62 | try: 63 | chkpt = torch.load(filename) 64 | except: 65 | os.remove(filename) 66 | raise RuntimeError(f'Pretrained {model_name} weights were not properly downloaded. Restart your program! Pretrained weights will be downloaded again.') 67 | 68 | state_dict = chkpt["model"] 69 | backbone_state_dict = {} 70 | for k, v in state_dict.items(): 71 | if "backbone." in k: 72 | # (1) k = backbone.backbone.* or (2) k = backbone.* 73 | k = k[9:] 74 | # (1) k = backbone.* or (2) k = * 75 | backbone_state_dict[k] = v 76 | pretrained_model_state_dict = intersect_dicts(backbone_state_dict, model.state_dict()) 77 | 78 | assert len(pretrained_model_state_dict) == len(model.state_dict()) 79 | model.load_state_dict(pretrained_model_state_dict, strict=True) 80 | 81 | 82 | def init_yolo(M): 83 | for m in M.modules(): 84 | if isinstance(m, nn.BatchNorm2d): 85 | m.eps = 1e-3 86 | m.momentum = 0.03 87 | 88 | model.apply(init_yolo) 89 | 90 | return model 91 | 92 | def list_models(): 93 | return [key for key in model_dict.keys()] 94 | -------------------------------------------------------------------------------- /yolox_backbone/models/yolo_pafpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | from .darknet import CSPDarknet 9 | from .network_blocks import BaseConv, CSPLayer, DWConv 10 | 11 | 12 | class YOLOPAFPN(nn.Module): 13 | """ 14 | YOLOv3 model. Darknet 53 is the default backbone of this model. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | input_tensor_channels=3, 20 | depth=1.0, 21 | width=1.0, 22 | in_features=("C3", "C4", "C5"), 23 | out_features=["P3", "P4", "P5"], 24 | in_channels=[256, 512, 1024], 25 | depthwise=False, 26 | act="silu", 27 | ): 28 | super().__init__() 29 | self.backbone = CSPDarknet(input_tensor_channels, depth, width, depthwise=depthwise, act=act) 30 | self.scaling_factor = {"depth": depth, "width": width} 31 | 32 | self.in_features = in_features 33 | self.out_features = out_features 34 | 35 | self.in_channels = in_channels 36 | self.out_channels = {} 37 | Conv = DWConv if depthwise else BaseConv 38 | 39 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest") 40 | self.lateral_conv0 = BaseConv( 41 | int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act 42 | ) 43 | self.C3_p4 = CSPLayer( 44 | int(2 * in_channels[1] * width), 45 | int(in_channels[1] * width), 46 | round(3 * depth), 47 | False, 48 | depthwise=depthwise, 49 | act=act, 50 | ) # cat 51 | 52 | self.reduce_conv1 = BaseConv( 53 | int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act 54 | ) 55 | self.C3_p3 = CSPLayer( 56 | int(2 * in_channels[0] * width), 57 | int(in_channels[0] * width), 58 | round(3 * depth), 59 | False, 60 | depthwise=depthwise, 61 | act=act, 62 | ) 63 | self.out_channels["P3"] = int(in_channels[0] * width) 64 | 65 | # bottom-up conv 66 | self.bu_conv2 = Conv( 67 | int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act 68 | ) 69 | self.C3_n3 = CSPLayer( 70 | int(2 * in_channels[0] * width), 71 | int(in_channels[1] * width), 72 | round(3 * depth), 73 | False, 74 | depthwise=depthwise, 75 | act=act, 76 | ) 77 | self.out_channels["P4"] = int(in_channels[1] * width) 78 | 79 | # bottom-up conv 80 | self.bu_conv1 = Conv( 81 | int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act 82 | ) 83 | self.C3_n4 = CSPLayer( 84 | int(2 * in_channels[1] * width), 85 | int(in_channels[2] * width), 86 | round(3 * depth), 87 | False, 88 | depthwise=depthwise, 89 | act=act, 90 | ) 91 | self.out_channels["P5"] = int(in_channels[2] * width) 92 | 93 | self.p3_exists = "P3" in self.out_features 94 | self.p4_exists = "P4" in self.out_features 95 | self.p5_exists = "P5" in self.out_features 96 | 97 | def forward(self, input): 98 | """ 99 | Args: 100 | inputs: input images. 101 | Returns: 102 | Tuple[Tensor]: FPN feature. 103 | """ 104 | 105 | # backbone 106 | out_features = self.backbone(input) 107 | 108 | if self.p3_exists or self.p4_exists or self.p5_exists: 109 | x2 = out_features["C3"] 110 | x1 = out_features["C4"] 111 | x0 = out_features["C5"] 112 | 113 | fpn_out0 = self.lateral_conv0(x0) # 1024->512/32 114 | f_out0 = self.upsample(fpn_out0) # 512/16 115 | f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16 116 | f_out0 = self.C3_p4(f_out0) # 1024->512/16 117 | 118 | fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16 119 | f_out1 = self.upsample(fpn_out1) # 256/8 120 | f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8 121 | pan_out2 = self.C3_p3(f_out1) # 512->256/8 122 | out_features["P3"] = pan_out2 123 | 124 | if self.p4_exists or self.p5_exists: 125 | p_out1 = self.bu_conv2(pan_out2) # 256->256/16 126 | p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16 127 | pan_out1 = self.C3_n3(p_out1) # 512->512/16 128 | out_features["P4"] = pan_out1 129 | 130 | if self.p5_exists: 131 | p_out0 = self.bu_conv1(pan_out1) # 512->512/32 132 | p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32 133 | pan_out0 = self.C3_n4(p_out0) # 1024->1024/32 134 | out_features["P5"] = pan_out0 135 | 136 | return {k:v for k, v in out_features.items() if k in self.out_features} 137 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # YOLOX-Backbone 2 | 3 | `yolox-backbone` is a deep-learning library and collection of [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) backbone models. 4 | 5 | ## Install 6 | 7 | ``` 8 | pip install yolox-backbone 9 | ``` 10 | 11 | 12 | ## Load a Pretrained Model 13 | 14 | Pretrained models can be loaded using yolox_backbone.create_model. 15 | 16 | ```python 17 | import yolox_backbone 18 | 19 | m = yolox_backbone.create_model('yolox-s', pretrained=True) 20 | m.eval() 21 | ``` 22 | 23 | ## Query the architecture information 24 | 25 | After a feature backbone has been created, it can be queried to provide architecture information. The `.scaling_factor` attribute is a dictionary encapsulating the information about the scaling factor. 26 | 27 | ```python 28 | import yolox_backbone 29 | 30 | m = yolox_backbone.create_model('yolox-s', pretrained=True) 31 | print('Network scaling factor: ', m.scaling_factor) 32 | ``` 33 | 34 | Output: 35 | 36 | ```python 37 | Network scaling factor: {'depth': 0.33, 'width': 0.5} 38 | ``` 39 | 40 | ## Query the feature information 41 | 42 | After a feature backbone has been created, it can be queried to provide channel information to the downstream heads without requiring static config or hardcoded constants. The `.out_channels` attribute is a dictionary encapsulating the information about the feature extraction points. 43 | 44 | ```python 45 | import yolox_backbone 46 | 47 | m = yolox_backbone.create_model('yolox-s', pretrained=True) 48 | print('Feature channels: ', m.out_channels) 49 | ``` 50 | 51 | Output: 52 | 53 | ```python 54 | Feature channels: {'P3': 128, 'P4': 256, 'P5': 512} 55 | ``` 56 | 57 | 58 | ## List Supported Models 59 | 60 | ```python 61 | import yolox_backbone 62 | from pprint import pprint 63 | 64 | model_names = yolox_backbone.list_models() 65 | pprint(model_names) 66 | ``` 67 | 68 | Output: 69 | 70 | ```python 71 | ['yolox-s', 72 | 'yolox-m', 73 | 'yolox-l', 74 | 'yolox-x', 75 | 'yolox-nano', 76 | 'yolox-tiny', 77 | 'yolox-darknet53'] 78 | ``` 79 | 80 | ## Select specific feature levels 81 | 82 | There is one creation argument impacting the output features. 83 | 84 | - `out_features` selects which FPN features to output 85 | 86 | 87 | 88 | ## Support for different number of input channels 89 | 90 | You can create the model without the constraint that the number of input channel is 3. 91 | 92 | But you have to set `pretrained` to `False`. 93 | 94 | ```python 95 | import yolox_backbone 96 | 97 | model = yolox_backbone.create_model(model_name=model_name, 98 | pretrained=False, 99 | input_tensor_channels=4, 100 | out_features=["P3", "P4", "P5"] 101 | ) 102 | ``` 103 | 104 | ## Example 105 | 106 | ```python 107 | import yolox_backbone 108 | import torch 109 | from pprint import pprint 110 | 111 | pprint(yolox_backbone.list_models()) 112 | 113 | model_names = yolox_backbone.list_models() 114 | for model_name in model_names: 115 | print("model_name: ", model_name) 116 | model = yolox_backbone.create_model(model_name=model_name, 117 | pretrained=True, 118 | out_features=["P3", "P4", "P5"] 119 | ) 120 | 121 | input_tensor = torch.randn((1, 3, 640, 640)) 122 | fpn_output_tensors = model(input_tensor) 123 | 124 | p3 = fpn_output_tensors["P3"] 125 | p4 = fpn_output_tensors["P4"] 126 | p5 = fpn_output_tensors["P5"] 127 | 128 | print("input_tensor.shape: ", input_tensor.shape) 129 | print("p3.shape: ", p3.shape) 130 | print("p4.shape: ", p4.shape) 131 | print("p5.shape: ", p5.shape) 132 | print("-" * 50) 133 | 134 | ``` 135 | 136 | Output: 137 | ```python 138 | ['yolox-s', 'yolox-m', 'yolox-l', 'yolox-x', 'yolox-nano', 'yolox-tiny', 'yolox-darknet53'] 139 | model_name: yolox-s 140 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 141 | p3.shape: torch.Size([1, 128, 80, 80]) 142 | p4.shape: torch.Size([1, 256, 40, 40]) 143 | p5.shape: torch.Size([1, 512, 20, 20]) 144 | -------------------------------------------------- 145 | model_name: yolox-m 146 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 147 | p3.shape: torch.Size([1, 192, 80, 80]) 148 | p4.shape: torch.Size([1, 384, 40, 40]) 149 | p5.shape: torch.Size([1, 768, 20, 20]) 150 | -------------------------------------------------- 151 | model_name: yolox-l 152 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 153 | p3.shape: torch.Size([1, 256, 80, 80]) 154 | p4.shape: torch.Size([1, 512, 40, 40]) 155 | p5.shape: torch.Size([1, 1024, 20, 20]) 156 | -------------------------------------------------- 157 | model_name: yolox-x 158 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 159 | p3.shape: torch.Size([1, 320, 80, 80]) 160 | p4.shape: torch.Size([1, 640, 40, 40]) 161 | p5.shape: torch.Size([1, 1280, 20, 20]) 162 | -------------------------------------------------- 163 | model_name: yolox-nano 164 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 165 | p3.shape: torch.Size([1, 64, 80, 80]) 166 | p4.shape: torch.Size([1, 128, 40, 40]) 167 | p5.shape: torch.Size([1, 256, 20, 20]) 168 | -------------------------------------------------- 169 | model_name: yolox-tiny 170 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 171 | p3.shape: torch.Size([1, 96, 80, 80]) 172 | p4.shape: torch.Size([1, 192, 40, 40]) 173 | p5.shape: torch.Size([1, 384, 20, 20]) 174 | -------------------------------------------------- 175 | model_name: yolox-darknet53 176 | input_tensor.shape: torch.Size([1, 3, 640, 640]) 177 | p3.shape: torch.Size([1, 128, 80, 80]) 178 | p4.shape: torch.Size([1, 256, 40, 40]) 179 | p5.shape: torch.Size([1, 512, 20, 20]) 180 | -------------------------------------------------- 181 | ``` 182 | 183 | # Acknowledgement 184 | 185 | The docs are heavily based on [timm docs](https://rwightman.github.io/pytorch-image-models/). Thanks for their awesome works. 186 | -------------------------------------------------------------------------------- /yolox_backbone/models/darknet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from torch import nn 6 | 7 | from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck 8 | 9 | 10 | class Darknet(nn.Module): 11 | # number of blocks from dark2 to dark5. 12 | depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} 13 | 14 | def __init__( 15 | self, 16 | depth, 17 | in_channels=3, 18 | stem_out_channels=32, 19 | out_features=["C3", "C4", "C5"], 20 | ): 21 | """ 22 | Args: 23 | depth (int): depth of darknet used in model, usually use [21, 53] for this param. 24 | in_channels (int): number of input channels, for example, use 3 for RGB image. 25 | stem_out_channels (int): number of output chanels of darknet stem. 26 | It decides channels of darknet layer2 to layer5. 27 | out_features (Tuple[str]): desired output layer name. 28 | """ 29 | super().__init__() 30 | assert out_features, "please provide output features of Darknet" 31 | self.out_features = out_features 32 | self.stem = nn.Sequential( 33 | BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"), 34 | *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2), 35 | ) 36 | in_channels = stem_out_channels * 2 # 64 37 | 38 | num_blocks = Darknet.depth2blocks[depth] 39 | # create darknet with `stem_out_channels` and `num_blocks` layers. 40 | # to make model structure more clear, we don't use `for` statement in python. 41 | self.dark2 = nn.Sequential( 42 | *self.make_group_layer(in_channels, num_blocks[0], stride=2) 43 | ) 44 | in_channels *= 2 # 128 45 | self.dark3 = nn.Sequential( 46 | *self.make_group_layer(in_channels, num_blocks[1], stride=2) 47 | ) 48 | in_channels *= 2 # 256 49 | self.dark4 = nn.Sequential( 50 | *self.make_group_layer(in_channels, num_blocks[2], stride=2) 51 | ) 52 | in_channels *= 2 # 512 53 | 54 | self.dark5 = nn.Sequential( 55 | *self.make_group_layer(in_channels, num_blocks[3], stride=2), 56 | *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2), 57 | ) 58 | 59 | def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1): 60 | "starts with conv layer then has `num_blocks` `ResLayer`" 61 | return [ 62 | BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"), 63 | *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)], 64 | ] 65 | 66 | def make_spp_block(self, filters_list, in_filters): 67 | m = nn.Sequential( 68 | *[ 69 | BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"), 70 | BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), 71 | SPPBottleneck( 72 | in_channels=filters_list[1], 73 | out_channels=filters_list[0], 74 | activation="lrelu", 75 | ), 76 | BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), 77 | BaseConv(filters_list[1], filters_list[0], 1, stride=1, act="lrelu"), 78 | ] 79 | ) 80 | return m 81 | 82 | def forward(self, x): 83 | outputs = {} 84 | x = self.stem(x) 85 | outputs["C1"] = x 86 | x = self.dark2(x) 87 | outputs["C2"] = x 88 | x = self.dark3(x) 89 | outputs["C3"] = x 90 | x = self.dark4(x) 91 | outputs["C4"] = x 92 | x = self.dark5(x) 93 | outputs["C5"] = x 94 | return outputs 95 | 96 | 97 | class CSPDarknet(nn.Module): 98 | def __init__( 99 | self, 100 | input_tensor_channels, 101 | dep_mul, 102 | wid_mul, 103 | out_features=["C3", "C4", "C5"], 104 | depthwise=False, 105 | act="silu", 106 | ): 107 | super().__init__() 108 | assert out_features, "please provide output features of Darknet" 109 | self.out_features = out_features 110 | Conv = DWConv if depthwise else BaseConv 111 | 112 | base_channels = int(wid_mul * 64) # 64 113 | base_depth = max(round(dep_mul * 3), 1) # 3 114 | 115 | # stem 116 | self.stem = Focus(input_tensor_channels, base_channels, ksize=3, act=act) 117 | 118 | # dark2 119 | self.dark2 = nn.Sequential( 120 | Conv(base_channels, base_channels * 2, 3, 2, act=act), 121 | CSPLayer( 122 | base_channels * 2, 123 | base_channels * 2, 124 | n=base_depth, 125 | depthwise=depthwise, 126 | act=act, 127 | ), 128 | ) 129 | 130 | # dark3 131 | self.dark3 = nn.Sequential( 132 | Conv(base_channels * 2, base_channels * 4, 3, 2, act=act), 133 | CSPLayer( 134 | base_channels * 4, 135 | base_channels * 4, 136 | n=base_depth * 3, 137 | depthwise=depthwise, 138 | act=act, 139 | ), 140 | ) 141 | 142 | # dark4 143 | self.dark4 = nn.Sequential( 144 | Conv(base_channels * 4, base_channels * 8, 3, 2, act=act), 145 | CSPLayer( 146 | base_channels * 8, 147 | base_channels * 8, 148 | n=base_depth * 3, 149 | depthwise=depthwise, 150 | act=act, 151 | ), 152 | ) 153 | 154 | # dark5 155 | self.dark5 = nn.Sequential( 156 | Conv(base_channels * 8, base_channels * 16, 3, 2, act=act), 157 | SPPBottleneck(base_channels * 16, base_channels * 16, activation=act), 158 | CSPLayer( 159 | base_channels * 16, 160 | base_channels * 16, 161 | n=base_depth, 162 | shortcut=False, 163 | depthwise=depthwise, 164 | act=act, 165 | ), 166 | ) 167 | 168 | def forward(self, x): 169 | outputs = {} 170 | x = self.stem(x) 171 | outputs["C1"] = x 172 | x = self.dark2(x) 173 | outputs["C2"] = x 174 | x = self.dark3(x) 175 | outputs["C3"] = x 176 | x = self.dark4(x) 177 | outputs["C4"] = x 178 | x = self.dark5(x) 179 | outputs["C5"] = x 180 | return outputs -------------------------------------------------------------------------------- /yolox_backbone/models/network_blocks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | 9 | class SiLU(nn.Module): 10 | """export-friendly version of nn.SiLU()""" 11 | 12 | @staticmethod 13 | def forward(x): 14 | return x * torch.sigmoid(x) 15 | 16 | 17 | def get_activation(name="silu", inplace=True): 18 | if name == "silu": 19 | module = nn.SiLU(inplace=inplace) 20 | elif name == "relu": 21 | module = nn.ReLU(inplace=inplace) 22 | elif name == "lrelu": 23 | module = nn.LeakyReLU(0.1, inplace=inplace) 24 | else: 25 | raise AttributeError("Unsupported act type: {}".format(name)) 26 | return module 27 | 28 | 29 | class BaseConv(nn.Module): 30 | """A Conv2d -> Batchnorm -> silu/leaky relu block""" 31 | 32 | def __init__( 33 | self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu" 34 | ): 35 | super().__init__() 36 | # same padding 37 | pad = (ksize - 1) // 2 38 | self.conv = nn.Conv2d( 39 | in_channels, 40 | out_channels, 41 | kernel_size=ksize, 42 | stride=stride, 43 | padding=pad, 44 | groups=groups, 45 | bias=bias, 46 | ) 47 | self.bn = nn.BatchNorm2d(out_channels) 48 | self.act = get_activation(act, inplace=True) 49 | 50 | def forward(self, x): 51 | return self.act(self.bn(self.conv(x))) 52 | 53 | def fuseforward(self, x): 54 | return self.act(self.conv(x)) 55 | 56 | 57 | class DWConv(nn.Module): 58 | """Depthwise Conv + Conv""" 59 | 60 | def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"): 61 | super().__init__() 62 | self.dconv = BaseConv( 63 | in_channels, 64 | in_channels, 65 | ksize=ksize, 66 | stride=stride, 67 | groups=in_channels, 68 | act=act, 69 | ) 70 | self.pconv = BaseConv( 71 | in_channels, out_channels, ksize=1, stride=1, groups=1, act=act 72 | ) 73 | 74 | def forward(self, x): 75 | x = self.dconv(x) 76 | return self.pconv(x) 77 | 78 | 79 | class Bottleneck(nn.Module): 80 | # Standard bottleneck 81 | def __init__( 82 | self, 83 | in_channels, 84 | out_channels, 85 | shortcut=True, 86 | expansion=0.5, 87 | depthwise=False, 88 | act="silu", 89 | ): 90 | super().__init__() 91 | hidden_channels = int(out_channels * expansion) 92 | Conv = DWConv if depthwise else BaseConv 93 | self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) 94 | self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act) 95 | self.use_add = shortcut and in_channels == out_channels 96 | 97 | def forward(self, x): 98 | y = self.conv2(self.conv1(x)) 99 | if self.use_add: 100 | y = y + x 101 | return y 102 | 103 | 104 | class ResLayer(nn.Module): 105 | "Residual layer with `in_channels` inputs." 106 | 107 | def __init__(self, in_channels: int): 108 | super().__init__() 109 | mid_channels = in_channels // 2 110 | self.layer1 = BaseConv( 111 | in_channels, mid_channels, ksize=1, stride=1, act="lrelu" 112 | ) 113 | self.layer2 = BaseConv( 114 | mid_channels, in_channels, ksize=3, stride=1, act="lrelu" 115 | ) 116 | 117 | def forward(self, x): 118 | out = self.layer2(self.layer1(x)) 119 | return x + out 120 | 121 | 122 | class SPPBottleneck(nn.Module): 123 | """Spatial pyramid pooling layer used in YOLOv3-SPP""" 124 | 125 | def __init__( 126 | self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu" 127 | ): 128 | super().__init__() 129 | hidden_channels = in_channels // 2 130 | self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation) 131 | self.m = nn.ModuleList( 132 | [ 133 | nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) 134 | for ks in kernel_sizes 135 | ] 136 | ) 137 | conv2_channels = hidden_channels * (len(kernel_sizes) + 1) 138 | self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation) 139 | 140 | def forward(self, x): 141 | x = self.conv1(x) 142 | x = torch.cat([x] + [m(x) for m in self.m], dim=1) 143 | x = self.conv2(x) 144 | return x 145 | 146 | 147 | class CSPLayer(nn.Module): 148 | """C3 in yolov5, CSP Bottleneck with 3 convolutions""" 149 | 150 | def __init__( 151 | self, 152 | in_channels, 153 | out_channels, 154 | n=1, 155 | shortcut=True, 156 | expansion=0.5, 157 | depthwise=False, 158 | act="silu", 159 | ): 160 | """ 161 | Args: 162 | in_channels (int): input channels. 163 | out_channels (int): output channels. 164 | n (int): number of Bottlenecks. Default value: 1. 165 | """ 166 | # ch_in, ch_out, number, shortcut, groups, expansion 167 | super().__init__() 168 | hidden_channels = int(out_channels * expansion) # hidden channels 169 | self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) 170 | self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) 171 | self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act) 172 | module_list = [ 173 | Bottleneck( 174 | hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act 175 | ) 176 | for _ in range(n) 177 | ] 178 | self.m = nn.Sequential(*module_list) 179 | 180 | def forward(self, x): 181 | x_1 = self.conv1(x) 182 | x_2 = self.conv2(x) 183 | x_1 = self.m(x_1) 184 | x = torch.cat((x_1, x_2), dim=1) 185 | return self.conv3(x) 186 | 187 | 188 | class Focus(nn.Module): 189 | """Focus width and height information into channel space.""" 190 | 191 | def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): 192 | super().__init__() 193 | self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) 194 | 195 | def forward(self, x): 196 | # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) 197 | patch_top_left = x[..., ::2, ::2] 198 | patch_top_right = x[..., ::2, 1::2] 199 | patch_bot_left = x[..., 1::2, ::2] 200 | patch_bot_right = x[..., 1::2, 1::2] 201 | x = torch.cat( 202 | ( 203 | patch_top_left, 204 | patch_bot_left, 205 | patch_top_right, 206 | patch_bot_right, 207 | ), 208 | dim=1, 209 | ) 210 | return self.conv(x) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Megvii, Base Detection 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------