├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── general_questions.md │ ├── feature_request.md │ └── error-report.md ├── CONTRIBUTING.md └── CODE_OF_CONDUCT.md ├── requirements ├── optional.txt ├── build.txt ├── runtime.txt └── tests.txt ├── demo ├── demo.jpg ├── loss_curve.png ├── data_pipeline.png ├── corruptions_sev_3.png ├── inference_demo.py └── webcam_demo.py ├── highlights.png ├── docs ├── requirements.txt ├── index.rst ├── Makefile ├── make.bat └── conf.py ├── mmdet ├── models │ ├── shared_heads │ │ ├── __init__.py │ │ └── res_layer.py │ ├── roi_extractors │ │ └── __init__.py │ ├── plugins │ │ └── __init__.py │ ├── necks │ │ └── __init__.py │ ├── backbones │ │ └── __init__.py │ ├── bbox_heads │ │ └── __init__.py │ ├── registry.py │ ├── utils │ │ ├── scale.py │ │ ├── __init__.py │ │ ├── conv_ws.py │ │ ├── weight_init.py │ │ └── norm.py │ ├── mask_heads │ │ ├── __init__.py │ │ └── htc_mask_head.py │ ├── detectors │ │ ├── atss.py │ │ ├── fcos.py │ │ ├── fovea.py │ │ ├── solo.py │ │ ├── retinanet.py │ │ ├── solov2.py │ │ ├── faster_rcnn.py │ │ ├── mask_rcnn.py │ │ ├── __init__.py │ │ ├── fast_rcnn.py │ │ ├── single_stage.py │ │ └── reppoints_detector.py │ ├── losses │ │ ├── mse_loss.py │ │ ├── accuracy.py │ │ ├── __init__.py │ │ ├── smooth_l1_loss.py │ │ ├── balanced_l1_loss.py │ │ ├── focal_loss.py │ │ └── utils.py │ ├── __init__.py │ ├── builder.py │ └── anchor_heads │ │ └── __init__.py ├── ops │ ├── nms │ │ ├── __init__.py │ │ └── src │ │ │ ├── nms_cuda.cpp │ │ │ └── nms_cpu.cpp │ ├── roi_pool │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── roi_pool.py │ │ └── src │ │ │ └── roi_pool_cuda.cpp │ ├── roi_align │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── src │ │ │ └── roi_align_cuda.cpp │ │ └── roi_align.py │ ├── masked_conv │ │ ├── __init__.py │ │ └── src │ │ │ └── masked_conv2d_cuda.cpp │ ├── sigmoid_focal_loss │ │ ├── __init__.py │ │ ├── sigmoid_focal_loss.py │ │ └── src │ │ │ └── sigmoid_focal_loss.cpp │ ├── utils │ │ ├── __init__.py │ │ └── src │ │ │ └── compiling_info.cpp │ ├── dcn │ │ └── __init__.py │ └── __init__.py ├── __init__.py ├── datasets │ ├── registry.py │ ├── loader │ │ ├── __init__.py │ │ └── build_loader.py │ ├── cityscapes.py │ ├── __init__.py │ ├── voc.py │ ├── pipelines │ │ ├── __init__.py │ │ ├── compose.py │ │ ├── test_aug.py │ │ └── instaboost.py │ ├── wider_face.py │ ├── builder.py │ ├── dataset_wrappers.py │ └── xml_style.py ├── core │ ├── mask │ │ ├── __init__.py │ │ ├── utils.py │ │ └── mask_target.py │ ├── fp16 │ │ ├── __init__.py │ │ └── utils.py │ ├── utils │ │ ├── __init__.py │ │ ├── misc.py │ │ └── dist_utils.py │ ├── bbox │ │ ├── assigners │ │ │ ├── base_assigner.py │ │ │ └── __init__.py │ │ ├── samplers │ │ │ ├── combined_sampler.py │ │ │ ├── __init__.py │ │ │ ├── pseudo_sampler.py │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ ├── random_sampler.py │ │ │ └── ohem_sampler.py │ │ ├── __init__.py │ │ ├── assign_sampling.py │ │ ├── demodata.py │ │ ├── bbox_target.py │ │ └── geometry.py │ ├── __init__.py │ ├── post_processing │ │ ├── __init__.py │ │ └── bbox_nms.py │ ├── anchor │ │ ├── __init__.py │ │ └── point_generator.py │ └── evaluation │ │ ├── __init__.py │ │ └── bbox_overlaps.py ├── utils │ ├── __init__.py │ ├── profiling.py │ ├── logger.py │ └── registry.py └── apis │ └── __init__.py ├── configs ├── reppoints │ └── reppoints.png ├── empirical_attention │ └── README.md ├── pascal_voc │ └── README.md ├── atss │ └── README.md ├── scratch │ └── README.md ├── wider_face │ └── README.md ├── nas_fpn │ └── README.md ├── free_anchor │ └── README.md ├── ghm │ └── README.md ├── grid_rcnn │ └── README.md ├── cityscapes │ └── README.md ├── libra_rcnn │ └── README.md ├── gn │ └── README.md ├── ms_rcnn │ └── README.md ├── fcos │ └── README.md └── htc │ └── README.md ├── requirements.txt ├── .style.yapf ├── .gitmodules ├── paddlepaddle └── __init__.py ├── tools ├── dist_train.sh ├── dist_test.sh ├── slurm_test.sh ├── slurm_train.sh ├── coco_eval.py ├── publish_model.py ├── upgrade_model_version.py ├── voc_eval.py ├── get_flops.py └── collect_env.py ├── pytest.ini ├── tests ├── test_utils.py ├── test_nms.py ├── test_async.py └── async_benchmark.py ├── .isort.cfg ├── .pre-commit-config.yaml ├── docker └── Dockerfile ├── LICENSE ├── .travis.yml └── .gitignore /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | imagecorruptions 3 | -------------------------------------------------------------------------------- /demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/demo/demo.jpg -------------------------------------------------------------------------------- /highlights.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/highlights.png -------------------------------------------------------------------------------- /demo/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/demo/loss_curve.png -------------------------------------------------------------------------------- /demo/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/demo/data_pipeline.png -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /demo/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/demo/corruptions_sev_3.png -------------------------------------------------------------------------------- /mmdet/models/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WXinlong/SOLO/HEAD/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_pool import RoIPool, roi_pool 2 | 3 | __all__ = ['roi_pool', 'RoIPool'] 4 | -------------------------------------------------------------------------------- /requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | cython 3 | numpy 4 | torch>=1.1 5 | -------------------------------------------------------------------------------- /mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align import RoIAlign, roi_align 2 | 3 | __all__ = ['roi_align', 'RoIAlign'] 4 | -------------------------------------------------------------------------------- /mmdet/models/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .single_level import SingleRoIExtractor 2 | 3 | __all__ = ['SingleRoIExtractor'] 4 | -------------------------------------------------------------------------------- /mmdet/datasets/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | DATASETS = Registry('dataset') 4 | PIPELINES = Registry('pipeline') 5 | -------------------------------------------------------------------------------- /mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .masked_conv import MaskedConv2d, masked_conv2d 2 | 3 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/runtime.txt 2 | -r requirements/optional.txt 3 | -r requirements/tests.txt 4 | -r requirements/build.txt 5 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "paddlepaddle/paddledetection"] 2 | path = paddlepaddle/paddledetection 3 | url = https://github.com/PaddlePaddle/PaddleDetection 4 | -------------------------------------------------------------------------------- /paddlepaddle/__init__.py: -------------------------------------------------------------------------------- 1 | # Author: Acer Zhang 2 | # Datetime: 2021/9/1 3 | # Copyright belongs to the author. 4 | # Please indicate the source for reprinting. -------------------------------------------------------------------------------- /mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .utils import split_combined_polys 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /mmdet/models/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .generalized_attention import GeneralizedAttention 2 | from .non_local import NonLocal2D 3 | 4 | __all__ = ['NonLocal2D', 'GeneralizedAttention'] 5 | -------------------------------------------------------------------------------- /mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .fpn import FPN 3 | from .hrfpn import HRFPN 4 | from .nas_fpn import NASFPN 5 | 6 | __all__ = ['FPN', 'BFP', 'HRFPN', 'NASFPN'] 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmcv==0.2.16 3 | numpy 4 | scipy 5 | # need older pillow until torchvision is fixed 6 | Pillow<=6.2.2 7 | six 8 | terminaltables 9 | torch>=1.1 10 | torchvision 11 | -------------------------------------------------------------------------------- /mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /mmdet/datasets/loader/__init__.py: -------------------------------------------------------------------------------- 1 | from .build_loader import build_dataloader 2 | from .sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader'] 5 | -------------------------------------------------------------------------------- /mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .hrnet import HRNet 2 | from .resnet import ResNet, make_res_layer 3 | from .resnext import ResNeXt 4 | from .ssd_vgg import SSDVGG 5 | 6 | __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet'] 7 | -------------------------------------------------------------------------------- /requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | isort 5 | pytest 6 | pytest-cov 7 | pytest-runner 8 | xdoctest >= 0.10.0 9 | yapf 10 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 11 | kwarray 12 | -------------------------------------------------------------------------------- /mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | from .double_bbox_head import DoubleConvFCBBoxHead 4 | 5 | __all__ = [ 6 | 'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'DoubleConvFCBBoxHead' 7 | ] 8 | -------------------------------------------------------------------------------- /tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | GPUS=$2 7 | PORT=${PORT:-29500} 8 | 9 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 11 | -------------------------------------------------------------------------------- /mmdet/datasets/cityscapes.py: -------------------------------------------------------------------------------- 1 | from .coco import CocoDataset 2 | from .registry import DATASETS 3 | 4 | 5 | @DATASETS.register_module 6 | class CityscapesDataset(CocoDataset): 7 | 8 | CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 9 | 'bicycle') 10 | -------------------------------------------------------------------------------- /mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .flops_counter import get_model_complexity_info 2 | from .logger import get_root_logger, print_log 3 | from .registry import Registry, build_from_cfg 4 | 5 | __all__ = [ 6 | 'Registry', 'build_from_cfg', 'get_model_complexity_info', 7 | 'get_root_logger', 'print_log' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | BACKBONES = Registry('backbone') 4 | NECKS = Registry('neck') 5 | ROI_EXTRACTORS = Registry('roi_extractor') 6 | SHARED_HEADS = Registry('shared_head') 7 | HEADS = Registry('head') 8 | LOSSES = Registry('loss') 9 | DETECTORS = Registry('detector') 10 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | CHECKPOINT=$2 7 | GPUS=$3 8 | PORT=${PORT:-29500} 9 | 10 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 11 | $(dirname "$0")/test_ins.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 12 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy.testing as npt 2 | 3 | from mmdet.utils.flops_counter import params_to_string 4 | 5 | 6 | def test_params_to_string(): 7 | npt.assert_equal(params_to_string(1e9), '1000.0 M') 8 | npt.assert_equal(params_to_string(2e5), '200.0 k') 9 | npt.assert_equal(params_to_string(3e-9), '3e-09') 10 | -------------------------------------------------------------------------------- /mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .fp16 import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | -------------------------------------------------------------------------------- /mmdet/ops/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from . import compiling_info 2 | from .compiling_info import get_compiler_version, get_compiling_cuda_version 3 | 4 | # get_compiler_version = compiling_info.get_compiler_version 5 | # get_compiling_cuda_version = compiling_info.get_compiling_cuda_version 6 | 7 | __all__ = ['get_compiler_version', 'get_compiling_cuda_version'] 8 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = Cython,asynctest,cv2,matplotlib,mmcv,numpy,pycocotools,robustness_eval,roi_align,roi_pool,seaborn,six,terminaltables,torch,torchvision 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to MMDetection's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | INSTALL.md 8 | GETTING_STARTED.md 9 | MODEL_ZOO.md 10 | TECHNICAL_DETAILS.md 11 | CHANGELOG.md 12 | 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`search` 20 | -------------------------------------------------------------------------------- /mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .matrix_nms import matrix_nms 3 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 4 | merge_aug_proposals, merge_aug_scores) 5 | 6 | __all__ = [ 7 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 8 | 'merge_aug_scores', 'merge_aug_masks', 'matrix_nms' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdet/models/utils/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | """ 7 | A learnable scale parameter 8 | """ 9 | 10 | def __init__(self, scale=1.0): 11 | super(Scale, self).__init__() 12 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 13 | 14 | def forward(self, x): 15 | return x * self.scale 16 | -------------------------------------------------------------------------------- /mmdet/models/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | from .fused_semantic_head import FusedSemanticHead 3 | from .grid_head import GridHead 4 | from .htc_mask_head import HTCMaskHead 5 | from .maskiou_head import MaskIoUHead 6 | from .mask_feat_head import MaskFeatHead 7 | 8 | __all__ = [ 9 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 10 | 'MaskIoUHead', 'MaskFeatHead' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .max_iou_assigner import MaxIoUAssigner 6 | from .point_assigner import PointAssigner 7 | 8 | __all__ = [ 9 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 10 | 'PointAssigner', 'ATSSAssigner' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result, show_result_pyplot, show_result_ins) 3 | from .train import get_root_logger, set_random_seed, train_detector 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 7 | 'async_inference_detector', 'inference_detector', 'show_result', 8 | 'show_result_pyplot', 'show_result_ins' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule, build_conv_layer 2 | from .conv_ws import ConvWS2d, conv_ws_2d 3 | from .norm import build_norm_layer 4 | from .scale import Scale 5 | from .weight_init import (bias_init_with_prob, kaiming_init, normal_init, 6 | uniform_init, xavier_init) 7 | 8 | __all__ = [ 9 | 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', 10 | 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init', 11 | 'kaiming_init', 'bias_init_with_prob', 'Scale' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import (anchor_inside_flags, anchor_target, 3 | images_to_levels, unmap) 4 | from .guided_anchor_target import ga_loc_target, ga_shape_target 5 | from .point_generator import PointGenerator 6 | from .point_target import point_target 7 | 8 | __all__ = [ 9 | 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target', 10 | 'ga_shape_target', 'PointGenerator', 'point_target', 'images_to_levels', 11 | 'unmap' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class ATSS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FCOS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FOVEA(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/solo.py: -------------------------------------------------------------------------------- 1 | from .single_stage_ins import SingleStageInsDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class SOLO(SingleStageInsDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(SOLO, self).__init__(backbone, neck, bbox_head, None, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class RetinaNet(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import torch 5 | from torch.autograd import gradcheck 6 | 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_pool import RoIPool # noqa: E402, isort:skip 9 | 10 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 11 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 12 | [1, 67, 40, 110, 120]]).cuda() 13 | inputs = (feat, rois) 14 | print('Gradcheck for roi pooling...') 15 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 16 | print(test) 17 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/asottile/seed-isort-config 3 | rev: v1.9.3 4 | hooks: 5 | - id: seed-isort-config 6 | - repo: https://github.com/pre-commit/mirrors-isort 7 | rev: v4.3.21 8 | hooks: 9 | - id: isort 10 | - repo: https://github.com/pre-commit/mirrors-yapf 11 | rev: v0.29.0 12 | hooks: 13 | - id: yapf 14 | - repo: https://github.com/pre-commit/pre-commit-hooks 15 | rev: v2.4.0 16 | hooks: 17 | - id: flake8 18 | - id: trailing-whitespace 19 | - id: check-yaml 20 | - id: end-of-file-fixer 21 | - id: requirements-txt-fixer 22 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..assign_sampling import build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/solov2.py: -------------------------------------------------------------------------------- 1 | from .single_stage_ins import SingleStageInsDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class SOLOv2(SingleStageInsDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | mask_feat_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(SOLOv2, self).__init__(backbone, neck, bbox_head, mask_feat_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /mmdet/ops/dcn/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, 2 | ModulatedDeformConvPack, deform_conv, 3 | modulated_deform_conv) 4 | from .deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, 5 | ModulatedDeformRoIPoolingPack, deform_roi_pooling) 6 | 7 | __all__ = [ 8 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 9 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 10 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 11 | 'deform_roi_pooling' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTORCH="1.3" 2 | ARG CUDA="10.1" 3 | ARG CUDNN="7" 4 | 5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel 6 | 7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" 8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" 10 | 11 | RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxrender-dev libxext6 \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Install SOLO 16 | RUN conda install cython -y && conda clean --all 17 | RUN git clone https://github.com:WXinlong/SOLO.git /SOLO 18 | WORKDIR /SOLO 19 | RUN pip install --no-cache-dir -e . 20 | -------------------------------------------------------------------------------- /mmdet/ops/nms/src/nms_cuda.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x, " must be a CUDAtensor ") 5 | 6 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); 7 | 8 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 9 | CHECK_CUDA(dets); 10 | if (dets.numel() == 0) 11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 12 | return nms_cuda(dets, threshold); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("nms", &nms, "non-maximum suppression"); 17 | } -------------------------------------------------------------------------------- /tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${5:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${PY_ARGS:-"--validate"} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /demo/inference_demo.py: -------------------------------------------------------------------------------- 1 | from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins 2 | import mmcv 3 | 4 | 5 | config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py' 6 | # download the checkpoint from model zoo and put it in `checkpoints/` 7 | checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth' 8 | 9 | # build the model from a config file and a checkpoint file 10 | model = init_detector(config_file, checkpoint_file, device='cuda:0') 11 | 12 | # test a single image 13 | img = 'demo.jpg' 14 | result = inference_detector(model, img) 15 | 16 | show_result_ins(img, result, model.CLASSES, score_thr=0.25, out_file="demo_out.jpg") 17 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_dataset 2 | from .cityscapes import CityscapesDataset 3 | from .coco import CocoDataset 4 | from .custom import CustomDataset 5 | from .dataset_wrappers import ConcatDataset, RepeatDataset 6 | from .loader import DistributedGroupSampler, GroupSampler, build_dataloader 7 | from .registry import DATASETS 8 | from .voc import VOCDataset 9 | from .wider_face import WIDERFaceDataset 10 | from .xml_style import XMLDataset 11 | 12 | __all__ = [ 13 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 14 | 'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler', 15 | 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset', 16 | 'DATASETS', 'build_dataset' 17 | ] 18 | -------------------------------------------------------------------------------- /mmdet/models/losses/mse_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | from ..registry import LOSSES 5 | from .utils import weighted_loss 6 | 7 | mse_loss = weighted_loss(F.mse_loss) 8 | 9 | 10 | @LOSSES.register_module 11 | class MSELoss(nn.Module): 12 | 13 | def __init__(self, reduction='mean', loss_weight=1.0): 14 | super().__init__() 15 | self.reduction = reduction 16 | self.loss_weight = loss_weight 17 | 18 | def forward(self, pred, target, weight=None, avg_factor=None): 19 | loss = self.loss_weight * mse_loss( 20 | pred, 21 | target, 22 | weight, 23 | reduction=self.reduction, 24 | avg_factor=avg_factor) 25 | return loss 26 | -------------------------------------------------------------------------------- /mmdet/core/fp16/utils.py: -------------------------------------------------------------------------------- 1 | from collections import abc 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def cast_tensor_type(inputs, src_type, dst_type): 8 | if isinstance(inputs, torch.Tensor): 9 | return inputs.to(dst_type) 10 | elif isinstance(inputs, str): 11 | return inputs 12 | elif isinstance(inputs, np.ndarray): 13 | return inputs 14 | elif isinstance(inputs, abc.Mapping): 15 | return type(inputs)({ 16 | k: cast_tensor_type(v, src_type, dst_type) 17 | for k, v in inputs.items() 18 | }) 19 | elif isinstance(inputs, abc.Iterable): 20 | return type(inputs)( 21 | cast_tensor_type(item, src_type, dst_type) for item in inputs) 22 | else: 23 | return inputs 24 | -------------------------------------------------------------------------------- /mmdet/datasets/voc.py: -------------------------------------------------------------------------------- 1 | from .registry import DATASETS 2 | from .xml_style import XMLDataset 3 | 4 | 5 | @DATASETS.register_module 6 | class VOCDataset(XMLDataset): 7 | 8 | CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 9 | 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 10 | 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 11 | 'tvmonitor') 12 | 13 | def __init__(self, **kwargs): 14 | super(VOCDataset, self).__init__(**kwargs) 15 | if 'VOC2007' in self.img_prefix: 16 | self.year = 2007 17 | elif 'VOC2012' in self.img_prefix: 18 | self.year = 2012 19 | else: 20 | raise ValueError('Cannot infer dataset year from img_prefix') 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature** 11 | 12 | **Motivation** 13 | A clear and concise description of the motivation of the feature. 14 | Ex1. It is inconvenient when [....]. 15 | Ex2. There is a recent paper [....], which is very helpful for [....]. 16 | 17 | **Related resources** 18 | If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. 23 | -------------------------------------------------------------------------------- /mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FasterRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | shared_head=None, 17 | pretrained=None): 18 | super(FasterRCNN, self).__init__( 19 | backbone=backbone, 20 | neck=neck, 21 | shared_head=shared_head, 22 | rpn_head=rpn_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /mmdet/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .compose import Compose 2 | from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, 3 | Transpose, to_tensor) 4 | from .instaboost import InstaBoost 5 | from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals 6 | from .test_aug import MultiScaleFlipAug 7 | from .transforms import (Albu, Expand, MinIoURandomCrop, Normalize, Pad, 8 | PhotoMetricDistortion, RandomCrop, RandomFlip, Resize, 9 | SegRescale) 10 | 11 | __all__ = [ 12 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 13 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 14 | 'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 15 | 'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand', 16 | 'PhotoMetricDistortion', 'Albu', 'InstaBoost' 17 | ] 18 | -------------------------------------------------------------------------------- /configs/empirical_attention/README.md: -------------------------------------------------------------------------------- 1 | # An Empirical Study of Spatial Attention Mechanisms in Deep Networks 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{zhu2019empirical, 7 | title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, 8 | author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, 9 | journal={arXiv preprint arXiv:1904.05873}, 10 | year={2019} 11 | } 12 | ``` 13 | 14 | 15 | ## Results and Models 16 | 17 | | Backbone | Attention Component | DCN | Lr schd | box AP | Download | 18 | |:---------:|:-------------------:|:----:|:-------:|:------:|:--------:| 19 | | R-50 | 1111 | N | 1x | 38.6 | - | 20 | | R-50 | 0010 | N | 1x | 38.2 | - | 21 | | R-50 | 1111 | Y | 1x | 41.0 | - | 22 | | R-50 | 0010 | Y | 1x | 40.8 | - | 23 | 24 | -------------------------------------------------------------------------------- /configs/pascal_voc/README.md: -------------------------------------------------------------------------------- 1 | ### SSD 2 | 3 | | Backbone | Size | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 4 | | :------: | :---: | :---: | :-----: | :------: | :-----------------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------: | 5 | | VGG16 | 300 | caffe | 240e | 2.5 | 0.159 | 35.7 / 53.6 | 77.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_voc_vgg16_caffe_240e_20190501-7160d09a.pth) | 6 | | VGG16 | 512 | caffe | 240e | 4.3 | 0.214 | 27.5 / 35.9 | 80.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd512_voc_vgg16_caffe_240e_20190501-ff194be1.pth) | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /mmdet/models/losses/accuracy.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def accuracy(pred, target, topk=1): 5 | assert isinstance(topk, (int, tuple)) 6 | if isinstance(topk, int): 7 | topk = (topk, ) 8 | return_single = True 9 | else: 10 | return_single = False 11 | 12 | maxk = max(topk) 13 | _, pred_label = pred.topk(maxk, dim=1) 14 | pred_label = pred_label.t() 15 | correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) 16 | 17 | res = [] 18 | for k in topk: 19 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) 20 | res.append(correct_k.mul_(100.0 / pred.size(0))) 21 | return res[0] if return_single else res 22 | 23 | 24 | class Accuracy(nn.Module): 25 | 26 | def __init__(self, topk=(1, )): 27 | super().__init__() 28 | self.topk = topk 29 | 30 | def forward(self, pred, target): 31 | return accuracy(pred, target, self.topk) 32 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /configs/atss/README.md: -------------------------------------------------------------------------------- 1 | # Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection 2 | 3 | 4 | ## Introduction 5 | 6 | ``` 7 | @article{zhang2019bridging, 8 | title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, 9 | author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, 10 | journal = {arXiv preprint arXiv:1912.02424}, 11 | year = {2019} 12 | } 13 | ``` 14 | 15 | 16 | ## Results and Models 17 | 18 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 19 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 20 | | R-50 | pytorch | 1x | 3.6 | 0.357 | 12.8 | 39.2 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/atss/atss_r50_fpn_1x_20200113-a7aa251e.pth)| 21 | -------------------------------------------------------------------------------- /mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_heads import * # noqa: F401,F403 2 | from .backbones import * # noqa: F401,F403 3 | from .bbox_heads import * # noqa: F401,F403 4 | from .builder import (build_backbone, build_detector, build_head, build_loss, 5 | build_neck, build_roi_extractor, build_shared_head) 6 | from .detectors import * # noqa: F401,F403 7 | from .losses import * # noqa: F401,F403 8 | from .mask_heads import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 11 | ROI_EXTRACTORS, SHARED_HEADS) 12 | from .roi_extractors import * # noqa: F401,F403 13 | from .shared_heads import * # noqa: F401,F403 14 | 15 | __all__ = [ 16 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 17 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 18 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 19 | ] 20 | -------------------------------------------------------------------------------- /configs/scratch/README.md: -------------------------------------------------------------------------------- 1 | # Rethinking ImageNet Pre-training 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{he2018rethinking, 7 | title={Rethinking imagenet pre-training}, 8 | author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, 9 | journal={arXiv preprint arXiv:1811.08883}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Model | Backbone | Style | Lr schd | box AP | mask AP | Download | 17 | |:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:--------:| 18 | | Faster R-CNN | R-50-FPN | pytorch | 6x | 40.1 | - | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/scratch/scratch_faster_rcnn_r50_fpn_gn_6x_20190515-ff554978.pth) | 19 | | Mask R-CNN | R-50-FPN | pytorch | 6x | 41.0 | 37.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_mask_rcnn_r50_fpn_gn_6x_20190515-96743f5e.pth) | 20 | 21 | Note: 22 | - The above models are trained with 16 GPUs. -------------------------------------------------------------------------------- /mmdet/ops/roi_align/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import numpy as np 5 | import torch 6 | from torch.autograd import gradcheck 7 | 8 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 9 | from roi_align import RoIAlign # noqa: E402, isort:skip 10 | 11 | feat_size = 15 12 | spatial_scale = 1.0 / 8 13 | img_size = feat_size / spatial_scale 14 | num_imgs = 2 15 | num_rois = 20 16 | 17 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) 18 | rois = np.random.rand(num_rois, 4) * img_size * 0.5 19 | rois[:, 2:] += img_size * 0.5 20 | rois = np.hstack((batch_ind, rois)) 21 | 22 | feat = torch.randn( 23 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') 24 | rois = torch.from_numpy(rois).float().cuda() 25 | inputs = (feat, rois) 26 | print('Gradcheck for roi align...') 27 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) 28 | print(test) 29 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) 30 | print(test) 31 | -------------------------------------------------------------------------------- /tools/coco_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from mmdet.core import coco_eval 4 | 5 | 6 | def main(): 7 | parser = ArgumentParser(description='COCO Evaluation') 8 | parser.add_argument('result', help='result file path') 9 | parser.add_argument('--ann', help='annotation file path') 10 | parser.add_argument( 11 | '--types', 12 | type=str, 13 | nargs='+', 14 | choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], 15 | default=['bbox'], 16 | help='result types') 17 | parser.add_argument( 18 | '--max-dets', 19 | type=int, 20 | nargs='+', 21 | default=[100, 300, 1000], 22 | help='proposal numbers, only used for recall evaluation') 23 | parser.add_argument( 24 | '--classwise', action='store_true', help='whether eval class wise ap') 25 | args = parser.parse_args() 26 | coco_eval(args.result, args.types, args.ann, args.max_dets, args.classwise) 27 | 28 | 29 | if __name__ == '__main__': 30 | main() 31 | -------------------------------------------------------------------------------- /mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class MaskRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | mask_roi_extractor, 14 | mask_head, 15 | train_cfg, 16 | test_cfg, 17 | neck=None, 18 | shared_head=None, 19 | pretrained=None): 20 | super(MaskRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | shared_head=shared_head, 24 | rpn_head=rpn_head, 25 | bbox_roi_extractor=bbox_roi_extractor, 26 | bbox_head=bbox_head, 27 | mask_roi_extractor=mask_roi_extractor, 28 | mask_head=mask_head, 29 | train_cfg=train_cfg, 30 | test_cfg=test_cfg, 31 | pretrained=pretrained) 32 | -------------------------------------------------------------------------------- /mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (coco_classes, dataset_aliases, get_classes, 2 | imagenet_det_classes, imagenet_vid_classes, 3 | voc_classes) 4 | from .coco_utils import coco_eval, fast_eval_recall, results2json, results2json_segm 5 | from .eval_hooks import (CocoDistEvalmAPHook, CocoDistEvalRecallHook, 6 | DistEvalHook, DistEvalmAPHook) 7 | from .mean_ap import average_precision, eval_map, print_map_summary 8 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 9 | print_recall_summary) 10 | 11 | __all__ = [ 12 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 13 | 'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval', 14 | 'fast_eval_recall', 'results2json', 'DistEvalHook', 'DistEvalmAPHook', 15 | 'CocoDistEvalRecallHook', 'CocoDistEvalmAPHook', 'average_precision', 16 | 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 17 | 'plot_num_recall', 'plot_iou_recall', 'results2json_segm' 18 | ] 19 | -------------------------------------------------------------------------------- /mmdet/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .accuracy import Accuracy, accuracy 2 | from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss 3 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 4 | cross_entropy, mask_cross_entropy) 5 | from .focal_loss import FocalLoss, sigmoid_focal_loss 6 | from .ghm_loss import GHMC, GHMR 7 | from .iou_loss import (BoundedIoULoss, GIoULoss, IoULoss, bounded_iou_loss, 8 | iou_loss) 9 | from .mse_loss import MSELoss, mse_loss 10 | from .smooth_l1_loss import SmoothL1Loss, smooth_l1_loss 11 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 12 | 13 | __all__ = [ 14 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 15 | 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', 16 | 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', 17 | 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', 18 | 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 19 | 'weight_reduce_loss', 'weighted_loss' 20 | ] 21 | -------------------------------------------------------------------------------- /mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .atss import ATSS 2 | from .base import BaseDetector 3 | from .cascade_rcnn import CascadeRCNN 4 | from .double_head_rcnn import DoubleHeadRCNN 5 | from .fast_rcnn import FastRCNN 6 | from .faster_rcnn import FasterRCNN 7 | from .fcos import FCOS 8 | from .fovea import FOVEA 9 | from .grid_rcnn import GridRCNN 10 | from .htc import HybridTaskCascade 11 | from .mask_rcnn import MaskRCNN 12 | from .mask_scoring_rcnn import MaskScoringRCNN 13 | from .reppoints_detector import RepPointsDetector 14 | from .retinanet import RetinaNet 15 | from .rpn import RPN 16 | from .single_stage import SingleStageDetector 17 | from .single_stage_ins import SingleStageInsDetector 18 | from .two_stage import TwoStageDetector 19 | from .solo import SOLO 20 | from .solov2 import SOLOv2 21 | 22 | __all__ = [ 23 | 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', 24 | 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 25 | 'DoubleHeadRCNN', 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 26 | 'RepPointsDetector', 'FOVEA', 'SingleStageInsDetector', 'SOLO', 'SOLOv2' 27 | ] 28 | -------------------------------------------------------------------------------- /mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .context_block import ContextBlock 2 | from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling, 3 | DeformRoIPoolingPack, ModulatedDeformConv, 4 | ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack, 5 | deform_conv, deform_roi_pooling, modulated_deform_conv) 6 | from .masked_conv import MaskedConv2d 7 | from .nms import nms, soft_nms 8 | from .roi_align import RoIAlign, roi_align 9 | from .roi_pool import RoIPool, roi_pool 10 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 11 | from .utils import get_compiler_version, get_compiling_cuda_version 12 | 13 | __all__ = [ 14 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 15 | 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 16 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 17 | 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', 18 | 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss', 19 | 'MaskedConv2d', 'ContextBlock', 'get_compiler_version', 20 | 'get_compiling_cuda_version' 21 | ] 22 | -------------------------------------------------------------------------------- /mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from mmdet.utils import build_from_cfg 4 | from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 5 | ROI_EXTRACTORS, SHARED_HEADS) 6 | 7 | 8 | def build(cfg, registry, default_args=None): 9 | if isinstance(cfg, list): 10 | modules = [ 11 | build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg 12 | ] 13 | return nn.Sequential(*modules) 14 | else: 15 | return build_from_cfg(cfg, registry, default_args) 16 | 17 | 18 | def build_backbone(cfg): 19 | return build(cfg, BACKBONES) 20 | 21 | 22 | def build_neck(cfg): 23 | return build(cfg, NECKS) 24 | 25 | 26 | def build_roi_extractor(cfg): 27 | return build(cfg, ROI_EXTRACTORS) 28 | 29 | 30 | def build_shared_head(cfg): 31 | return build(cfg, SHARED_HEADS) 32 | 33 | 34 | def build_head(cfg): 35 | return build(cfg, HEADS) 36 | 37 | 38 | def build_loss(cfg): 39 | return build(cfg, LOSSES) 40 | 41 | 42 | def build_detector(cfg, train_cfg=None, test_cfg=None): 43 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 44 | -------------------------------------------------------------------------------- /tools/publish_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import subprocess 3 | 4 | import torch 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser( 9 | description='Process a checkpoint to be published') 10 | parser.add_argument('in_file', help='input checkpoint filename') 11 | parser.add_argument('out_file', help='output checkpoint filename') 12 | args = parser.parse_args() 13 | return args 14 | 15 | 16 | def process_checkpoint(in_file, out_file): 17 | checkpoint = torch.load(in_file, map_location='cpu') 18 | # remove optimizer for smaller file size 19 | if 'optimizer' in checkpoint: 20 | del checkpoint['optimizer'] 21 | # if it is necessary to remove some sensitive data in checkpoint['meta'], 22 | # add the code here. 23 | torch.save(checkpoint, out_file) 24 | sha = subprocess.check_output(['sha256sum', out_file]).decode() 25 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) 26 | subprocess.Popen(['mv', out_file, final_file]) 27 | 28 | 29 | def main(): 30 | args = parse_args() 31 | process_checkpoint(args.in_file, args.out_file) 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner 2 | from .bbox_target import bbox_target 3 | from .geometry import bbox_overlaps 4 | from .samplers import (BaseSampler, CombinedSampler, 5 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 6 | PseudoSampler, RandomSampler, SamplingResult) 7 | from .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip, 8 | bbox_mapping, bbox_mapping_back, delta2bbox, 9 | distance2bbox, roi2bbox) 10 | 11 | from .assign_sampling import ( # isort:skip, avoid recursive imports 12 | assign_and_sample, build_assigner, build_sampler) 13 | 14 | __all__ = [ 15 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 16 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 17 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 18 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 19 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 20 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 21 | 'distance2bbox', 'bbox_target' 22 | ] 23 | -------------------------------------------------------------------------------- /mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .atss_head import ATSSHead 3 | from .fcos_head import FCOSHead 4 | from .fovea_head import FoveaHead 5 | from .free_anchor_retina_head import FreeAnchorRetinaHead 6 | from .ga_retina_head import GARetinaHead 7 | from .ga_rpn_head import GARPNHead 8 | from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead 9 | from .reppoints_head import RepPointsHead 10 | from .retina_head import RetinaHead 11 | from .retina_sepbn_head import RetinaSepBNHead 12 | from .rpn_head import RPNHead 13 | from .ssd_head import SSDHead 14 | from .solo_head import SOLOHead 15 | from .solov2_head import SOLOv2Head 16 | from .solov2_light_head import SOLOv2LightHead 17 | from .decoupled_solo_head import DecoupledSOLOHead 18 | from .decoupled_solo_light_head import DecoupledSOLOLightHead 19 | 20 | __all__ = [ 21 | 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 22 | 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 23 | 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 24 | 'ATSSHead', 'SOLOHead', 'SOLOv2Head', 'SOLOv2LightHead', 'DecoupledSOLOHead', 'DecoupledSOLOLightHead' 25 | ] 26 | -------------------------------------------------------------------------------- /mmdet/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | from mmdet.utils import build_from_cfg 4 | from ..registry import PIPELINES 5 | 6 | 7 | @PIPELINES.register_module 8 | class Compose(object): 9 | 10 | def __init__(self, transforms): 11 | assert isinstance(transforms, collections.abc.Sequence) 12 | self.transforms = [] 13 | for transform in transforms: 14 | if isinstance(transform, dict): 15 | transform = build_from_cfg(transform, PIPELINES) 16 | self.transforms.append(transform) 17 | elif callable(transform): 18 | self.transforms.append(transform) 19 | else: 20 | raise TypeError('transform must be callable or a dict') 21 | 22 | def __call__(self, data): 23 | for t in self.transforms: 24 | data = t(data) 25 | if data is None: 26 | return None 27 | return data 28 | 29 | def __repr__(self): 30 | format_string = self.__class__.__name__ + '(' 31 | for t in self.transforms: 32 | format_string += '\n' 33 | format_string += ' {0}'.format(t) 34 | format_string += '\n)' 35 | return format_string 36 | -------------------------------------------------------------------------------- /configs/wider_face/README.md: -------------------------------------------------------------------------------- 1 | ## WIDER Face Dataset 2 | 3 | To use the WIDER Face dataset you need to download it 4 | and extract to the `data/WIDERFace` folder. Annotation in the VOC format 5 | can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). 6 | You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders 7 | to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. 8 | Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. 9 | The directory should be like this: 10 | 11 | ``` 12 | mmdetection 13 | ├── mmdet 14 | ├── tools 15 | ├── configs 16 | ├── data 17 | │ ├── WIDERFace 18 | │ │ ├── WIDER_train 19 | │ | │ ├──0--Parade 20 | │ | │ ├── ... 21 | │ | │ ├── Annotations 22 | │ │ ├── WIDER_val 23 | │ | │ ├──0--Parade 24 | │ | │ ├── ... 25 | │ | │ ├── Annotations 26 | │ │ ├── val.txt 27 | │ │ ├── train.txt 28 | 29 | ``` 30 | 31 | After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or 32 | create your own config based on the presented one. 33 | -------------------------------------------------------------------------------- /mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs) 11 | else: 12 | raise TypeError('Invalid type {} for building a sampler'.format( 13 | type(cfg))) 14 | 15 | 16 | def build_sampler(cfg, **kwargs): 17 | if isinstance(cfg, samplers.BaseSampler): 18 | return cfg 19 | elif isinstance(cfg, dict): 20 | return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs) 21 | else: 22 | raise TypeError('Invalid type {} for building a sampler'.format( 23 | type(cfg))) 24 | 25 | 26 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 27 | bbox_assigner = build_assigner(cfg.assigner) 28 | bbox_sampler = build_sampler(cfg.sampler) 29 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 30 | gt_labels) 31 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 32 | gt_labels) 33 | return assign_result, sampling_result 34 | -------------------------------------------------------------------------------- /mmdet/models/mask_heads/htc_mask_head.py: -------------------------------------------------------------------------------- 1 | from ..registry import HEADS 2 | from ..utils import ConvModule 3 | from .fcn_mask_head import FCNMaskHead 4 | 5 | 6 | @HEADS.register_module 7 | class HTCMaskHead(FCNMaskHead): 8 | 9 | def __init__(self, *args, **kwargs): 10 | super(HTCMaskHead, self).__init__(*args, **kwargs) 11 | self.conv_res = ConvModule( 12 | self.conv_out_channels, 13 | self.conv_out_channels, 14 | 1, 15 | conv_cfg=self.conv_cfg, 16 | norm_cfg=self.norm_cfg) 17 | 18 | def init_weights(self): 19 | super(HTCMaskHead, self).init_weights() 20 | self.conv_res.init_weights() 21 | 22 | def forward(self, x, res_feat=None, return_logits=True, return_feat=True): 23 | if res_feat is not None: 24 | res_feat = self.conv_res(res_feat) 25 | x = x + res_feat 26 | for conv in self.convs: 27 | x = conv(x) 28 | res_feat = x 29 | outs = [] 30 | if return_logits: 31 | x = self.upsample(x) 32 | if self.upsample_method == 'deconv': 33 | x = self.relu(x) 34 | mask_pred = self.conv_logits(x) 35 | outs.append(mask_pred) 36 | if return_feat: 37 | outs.append(res_feat) 38 | return outs if len(outs) > 1 else outs[0] 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | SOLO for non-commercial purposes 2 | 3 | Copyright (c) 2019 the authors 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /mmdet/core/anchor/point_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class PointGenerator(object): 5 | 6 | def _meshgrid(self, x, y, row_major=True): 7 | xx = x.repeat(len(y)) 8 | yy = y.view(-1, 1).repeat(1, len(x)).view(-1) 9 | if row_major: 10 | return xx, yy 11 | else: 12 | return yy, xx 13 | 14 | def grid_points(self, featmap_size, stride=16, device='cuda'): 15 | feat_h, feat_w = featmap_size 16 | shift_x = torch.arange(0., feat_w, device=device) * stride 17 | shift_y = torch.arange(0., feat_h, device=device) * stride 18 | shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) 19 | stride = shift_x.new_full((shift_xx.shape[0], ), stride) 20 | shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) 21 | all_points = shifts.to(device) 22 | return all_points 23 | 24 | def valid_flags(self, featmap_size, valid_size, device='cuda'): 25 | feat_h, feat_w = featmap_size 26 | valid_h, valid_w = valid_size 27 | assert valid_h <= feat_h and valid_w <= feat_w 28 | valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) 29 | valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) 30 | valid_x[:valid_w] = 1 31 | valid_y[:valid_h] = 1 32 | valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) 33 | valid = valid_xx & valid_yy 34 | return valid 35 | -------------------------------------------------------------------------------- /configs/nas_fpn/README.md: -------------------------------------------------------------------------------- 1 | # NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{ghiasi2019fpn, 7 | title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, 8 | author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, 9 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 10 | pages={7036--7045}, 11 | year={2019} 12 | } 13 | ``` 14 | 15 | ## Results and Models 16 | 17 | We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. 18 | 19 | | Backbone | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 20 | |:-----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 21 | | R-50-FPN | 50e | 12.8 | 0.513 | 15.3 | 37.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/nas_fpn/retinanet_crop640_r50_fpn_50e_190824-4d75bfa0.pth) | 22 | | R-50-NASFPN | 50e | 14.8 | 0.662 | 13.1 | 39.8 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/nas_fpn/retinanet_crop640_r50_nasfpn_50e_20191225-b82d3a86.pth) | 23 | 24 | 25 | **Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. 26 | -------------------------------------------------------------------------------- /demo/webcam_demo.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import torch 5 | 6 | from mmdet.apis import inference_detector, init_detector, show_result, show_result_ins 7 | 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description='MMDetection webcam demo') 11 | parser.add_argument('config', help='test config file path') 12 | parser.add_argument('checkpoint', help='checkpoint file') 13 | parser.add_argument('--device', type=int, default=0, help='CUDA device id') 14 | parser.add_argument( 15 | '--camera-id', type=int, default=0, help='camera device id') 16 | parser.add_argument( 17 | '--score-thr', type=float, default=0.5, help='bbox score threshold') 18 | args = parser.parse_args() 19 | return args 20 | 21 | 22 | def main(): 23 | args = parse_args() 24 | 25 | model = init_detector( 26 | args.config, args.checkpoint, device=torch.device('cuda', args.device)) 27 | 28 | camera = cv2.VideoCapture(args.camera_id) 29 | 30 | print('Press "Esc", "q" or "Q" to exit.') 31 | while True: 32 | ret_val, img = camera.read() 33 | result = inference_detector(model, img) 34 | 35 | ch = cv2.waitKey(1) 36 | if ch == 27 or ch == ord('q') or ch == ord('Q'): 37 | break 38 | 39 | img_show = show_result_ins(img, result, model.CLASSES, score_thr=0.25) 40 | cv2.imshow('Demo', img_show) 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/error-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Error report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Thanks for your error report and we appreciate it a lot. 11 | 12 | **Checklist** 13 | 1. I have searched related issues but cannot get the expected help. 14 | 2. The bug has not been fixed in the latest version. 15 | 16 | **Describe the bug** 17 | A clear and concise description of what the bug is. 18 | 19 | **Reproduction** 20 | 1. What command or script did you run? 21 | ``` 22 | A placeholder for the command. 23 | ``` 24 | 2. Did you make any modifications on the code or config? Did you understand what you have modified? 25 | 3. What dataset did you use? 26 | 27 | **Environment** 28 | 29 | 1. Please run `python tools/collect_env.py` to collect necessary environment infomation and paste it here. 30 | 2. You may add addition that may be helpful for locating the problem, such as 31 | - How you installed PyTorch [e.g., pip, conda, source] 32 | - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) 33 | 34 | **Error traceback** 35 | If applicable, paste the error trackback here. 36 | ``` 37 | A placeholder for trackback. 38 | ``` 39 | 40 | **Bug fix** 41 | If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! 42 | -------------------------------------------------------------------------------- /mmdet/datasets/pipelines/test_aug.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from ..registry import PIPELINES 4 | from .compose import Compose 5 | 6 | 7 | @PIPELINES.register_module 8 | class MultiScaleFlipAug(object): 9 | 10 | def __init__(self, transforms, img_scale, flip=False): 11 | self.transforms = Compose(transforms) 12 | self.img_scale = img_scale if isinstance(img_scale, 13 | list) else [img_scale] 14 | assert mmcv.is_list_of(self.img_scale, tuple) 15 | self.flip = flip 16 | 17 | def __call__(self, results): 18 | aug_data = [] 19 | flip_aug = [False, True] if self.flip else [False] 20 | for scale in self.img_scale: 21 | for flip in flip_aug: 22 | _results = results.copy() 23 | _results['scale'] = scale 24 | _results['flip'] = flip 25 | data = self.transforms(_results) 26 | aug_data.append(data) 27 | # list of dict to dict of list 28 | aug_data_dict = {key: [] for key in aug_data[0]} 29 | for data in aug_data: 30 | for key, val in data.items(): 31 | aug_data_dict[key].append(val) 32 | return aug_data_dict 33 | 34 | def __repr__(self): 35 | repr_str = self.__class__.__name__ 36 | repr_str += '(transforms={}, img_scale={}, flip={})'.format( 37 | self.transforms, self.img_scale, self.flip) 38 | return repr_str 39 | -------------------------------------------------------------------------------- /mmdet/datasets/wider_face.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import xml.etree.ElementTree as ET 3 | 4 | import mmcv 5 | 6 | from .registry import DATASETS 7 | from .xml_style import XMLDataset 8 | 9 | 10 | @DATASETS.register_module 11 | class WIDERFaceDataset(XMLDataset): 12 | """ 13 | Reader for the WIDER Face dataset in PASCAL VOC format. 14 | Conversion scripts can be found in 15 | https://github.com/sovrasov/wider-face-pascal-voc-annotations 16 | """ 17 | CLASSES = ('face', ) 18 | 19 | def __init__(self, **kwargs): 20 | super(WIDERFaceDataset, self).__init__(**kwargs) 21 | 22 | def load_annotations(self, ann_file): 23 | img_infos = [] 24 | img_ids = mmcv.list_from_file(ann_file) 25 | for img_id in img_ids: 26 | filename = '{}.jpg'.format(img_id) 27 | xml_path = osp.join(self.img_prefix, 'Annotations', 28 | '{}.xml'.format(img_id)) 29 | tree = ET.parse(xml_path) 30 | root = tree.getroot() 31 | size = root.find('size') 32 | width = int(size.find('width').text) 33 | height = int(size.find('height').text) 34 | folder = root.find('folder').text 35 | img_infos.append( 36 | dict( 37 | id=img_id, 38 | filename=osp.join(folder, filename), 39 | width=width, 40 | height=height)) 41 | 42 | return img_infos 43 | -------------------------------------------------------------------------------- /mmdet/models/losses/smooth_l1_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from ..registry import LOSSES 5 | from .utils import weighted_loss 6 | 7 | 8 | @weighted_loss 9 | def smooth_l1_loss(pred, target, beta=1.0): 10 | assert beta > 0 11 | assert pred.size() == target.size() and target.numel() > 0 12 | diff = torch.abs(pred - target) 13 | loss = torch.where(diff < beta, 0.5 * diff * diff / beta, 14 | diff - 0.5 * beta) 15 | return loss 16 | 17 | 18 | @LOSSES.register_module 19 | class SmoothL1Loss(nn.Module): 20 | 21 | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): 22 | super(SmoothL1Loss, self).__init__() 23 | self.beta = beta 24 | self.reduction = reduction 25 | self.loss_weight = loss_weight 26 | 27 | def forward(self, 28 | pred, 29 | target, 30 | weight=None, 31 | avg_factor=None, 32 | reduction_override=None, 33 | **kwargs): 34 | assert reduction_override in (None, 'none', 'mean', 'sum') 35 | reduction = ( 36 | reduction_override if reduction_override else self.reduction) 37 | loss_bbox = self.loss_weight * smooth_l1_loss( 38 | pred, 39 | target, 40 | weight, 41 | beta=self.beta, 42 | reduction=reduction, 43 | avg_factor=avg_factor, 44 | **kwargs) 45 | return loss_bbox 46 | -------------------------------------------------------------------------------- /tools/upgrade_model_version.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import re 3 | from collections import OrderedDict 4 | 5 | import torch 6 | 7 | 8 | def convert(in_file, out_file): 9 | """Convert keys in checkpoints. 10 | 11 | There can be some breaking changes during the development of mmdetection, 12 | and this tool is used for upgrading checkpoints trained with old versions 13 | to the latest one. 14 | """ 15 | checkpoint = torch.load(in_file) 16 | in_state_dict = checkpoint.pop('state_dict') 17 | out_state_dict = OrderedDict() 18 | for key, val in in_state_dict.items(): 19 | # Use ConvModule instead of nn.Conv2d in RetinaNet 20 | # cls_convs.0.weight -> cls_convs.0.conv.weight 21 | m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) 22 | if m is not None: 23 | param = m.groups()[1] 24 | new_key = key.replace(param, 'conv.{}'.format(param)) 25 | out_state_dict[new_key] = val 26 | continue 27 | 28 | out_state_dict[key] = val 29 | checkpoint['state_dict'] = out_state_dict 30 | torch.save(checkpoint, out_file) 31 | 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser(description='Upgrade model version') 35 | parser.add_argument('in_file', help='input checkpoint file') 36 | parser.add_argument('out_file', help='output checkpoint file') 37 | args = parser.parse_args() 38 | convert(args.in_file, args.out_file) 39 | 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /tools/voc_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | import mmcv 4 | 5 | from mmdet import datasets 6 | from mmdet.core import eval_map 7 | 8 | 9 | def voc_eval(result_file, dataset, iou_thr=0.5, nproc=4): 10 | det_results = mmcv.load(result_file) 11 | annotations = [dataset.get_ann_info(i) for i in range(len(dataset))] 12 | if hasattr(dataset, 'year') and dataset.year == 2007: 13 | dataset_name = 'voc07' 14 | else: 15 | dataset_name = dataset.CLASSES 16 | eval_map( 17 | det_results, 18 | annotations, 19 | scale_ranges=None, 20 | iou_thr=iou_thr, 21 | dataset=dataset_name, 22 | logger='print', 23 | nproc=nproc) 24 | 25 | 26 | def main(): 27 | parser = ArgumentParser(description='VOC Evaluation') 28 | parser.add_argument('result', help='result file path') 29 | parser.add_argument('config', help='config file path') 30 | parser.add_argument( 31 | '--iou-thr', 32 | type=float, 33 | default=0.5, 34 | help='IoU threshold for evaluation') 35 | parser.add_argument( 36 | '--nproc', 37 | type=int, 38 | default=4, 39 | help='Processes to be used for computing mAP') 40 | args = parser.parse_args() 41 | cfg = mmcv.Config.fromfile(args.config) 42 | test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets) 43 | voc_eval(args.result, test_dataset, args.iou_thr, args.nproc) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /configs/free_anchor/README.md: -------------------------------------------------------------------------------- 1 | # FreeAnchor: Learning to Match Anchors for Visual Object Detection 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{zhang2019freeanchor, 7 | title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, 8 | author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, 9 | booktitle = {Neural Information Processing Systems}, 10 | year = {2019} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 18 | | R-50 | pytorch | 1x | 4.7 | 0.322 | 12.0 | 38.4 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_r50_fpn_1x_20190914-84db6585.pth) | 19 | | R-101 | pytorch | 1x | 6.6 | 0.437 | 9.7 | 40.3 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_r101_fpn_1x_20190914-c4e4db81.pth) | 20 | | X-101-32x4d | pytorch | 1x | 7.8 | 0.640 | 8.4 | 42.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_x101-32x4d_fpn_1x_20190914-eb73b804.pth) | 21 | 22 | **Notes:** 23 | - We use 8 GPUs with 2 images/GPU. 24 | - For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor). 25 | -------------------------------------------------------------------------------- /configs/ghm/README.md: -------------------------------------------------------------------------------- 1 | # Gradient Harmonized Single-stage Detector 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{li2019gradient, 7 | title={Gradient Harmonized Single-stage Detector}, 8 | author={Li, Buyu and Liu, Yu and Wang, Xiaogang}, 9 | booktitle={AAAI Conference on Artificial Intelligence}, 10 | year={2019} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | | :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :------: | 18 | | R-50-FPN | pytorch | 1x | 3.9 | 0.500 | 9.4 | 36.9 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r50_fpn_1x_20190608-b9aa5862.pth) | 19 | | R-101-FPN | pytorch | 1x | 5.8 | 0.625 | 8.5 | 39.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r101_fpn_1x_20190608-b885b74a.pth) | 20 | | X-101-32x4d-FPN | pytorch | 1x | 7.0 | 0.818 | 7.6 | 40.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_32x4d_fpn_1x_20190608-ed295d22.pth) | 21 | | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 1.191 | 6.1 | 41.6 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_64x4d_fpn_1x_20190608-7f2037ce.pth) | -------------------------------------------------------------------------------- /mmdet/models/utils/conv_ws.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | def conv_ws_2d(input, 6 | weight, 7 | bias=None, 8 | stride=1, 9 | padding=0, 10 | dilation=1, 11 | groups=1, 12 | eps=1e-5): 13 | c_in = weight.size(0) 14 | weight_flat = weight.view(c_in, -1) 15 | mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) 16 | std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) 17 | weight = (weight - mean) / (std + eps) 18 | return F.conv2d(input, weight, bias, stride, padding, dilation, groups) 19 | 20 | 21 | class ConvWS2d(nn.Conv2d): 22 | 23 | def __init__(self, 24 | in_channels, 25 | out_channels, 26 | kernel_size, 27 | stride=1, 28 | padding=0, 29 | dilation=1, 30 | groups=1, 31 | bias=True, 32 | eps=1e-5): 33 | super(ConvWS2d, self).__init__( 34 | in_channels, 35 | out_channels, 36 | kernel_size, 37 | stride=stride, 38 | padding=padding, 39 | dilation=dilation, 40 | groups=groups, 41 | bias=bias) 42 | self.eps = eps 43 | 44 | def forward(self, x): 45 | return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, 46 | self.dilation, self.groups, self.eps) 47 | -------------------------------------------------------------------------------- /mmdet/utils/profiling.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import sys 3 | import time 4 | 5 | import torch 6 | 7 | if sys.version_info >= (3, 7): 8 | 9 | @contextlib.contextmanager 10 | def profile_time(trace_name, 11 | name, 12 | enabled=True, 13 | stream=None, 14 | end_stream=None): 15 | """Print time spent by CPU and GPU. 16 | 17 | Useful as a temporary context manager to find sweet spots of 18 | code suitable for async implementation. 19 | 20 | """ 21 | if (not enabled) or not torch.cuda.is_available(): 22 | yield 23 | return 24 | stream = stream if stream else torch.cuda.current_stream() 25 | end_stream = end_stream if end_stream else stream 26 | start = torch.cuda.Event(enable_timing=True) 27 | end = torch.cuda.Event(enable_timing=True) 28 | stream.record_event(start) 29 | try: 30 | cpu_start = time.monotonic() 31 | yield 32 | finally: 33 | cpu_end = time.monotonic() 34 | end_stream.record_event(end) 35 | end.synchronize() 36 | cpu_time = (cpu_end - cpu_start) * 1000 37 | gpu_time = start.elapsed_time(end) 38 | msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name, 39 | cpu_time) 40 | msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream) 41 | print(msg, end_stream) 42 | -------------------------------------------------------------------------------- /mmdet/datasets/builder.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from mmdet.utils import build_from_cfg 4 | from .dataset_wrappers import ConcatDataset, RepeatDataset 5 | from .registry import DATASETS 6 | 7 | 8 | def _concat_dataset(cfg, default_args=None): 9 | ann_files = cfg['ann_file'] 10 | img_prefixes = cfg.get('img_prefix', None) 11 | seg_prefixes = cfg.get('seg_prefix', None) 12 | proposal_files = cfg.get('proposal_file', None) 13 | 14 | datasets = [] 15 | num_dset = len(ann_files) 16 | for i in range(num_dset): 17 | data_cfg = copy.deepcopy(cfg) 18 | data_cfg['ann_file'] = ann_files[i] 19 | if isinstance(img_prefixes, (list, tuple)): 20 | data_cfg['img_prefix'] = img_prefixes[i] 21 | if isinstance(seg_prefixes, (list, tuple)): 22 | data_cfg['seg_prefix'] = seg_prefixes[i] 23 | if isinstance(proposal_files, (list, tuple)): 24 | data_cfg['proposal_file'] = proposal_files[i] 25 | datasets.append(build_dataset(data_cfg, default_args)) 26 | 27 | return ConcatDataset(datasets) 28 | 29 | 30 | def build_dataset(cfg, default_args=None): 31 | if isinstance(cfg, (list, tuple)): 32 | dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) 33 | elif cfg['type'] == 'RepeatDataset': 34 | dataset = RepeatDataset( 35 | build_dataset(cfg['dataset'], default_args), cfg['times']) 36 | elif isinstance(cfg['ann_file'], (list, tuple)): 37 | dataset = _concat_dataset(cfg, default_args) 38 | else: 39 | dataset = build_from_cfg(cfg, DATASETS, default_args) 40 | 41 | return dataset 42 | -------------------------------------------------------------------------------- /mmdet/models/utils/weight_init.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | 4 | 5 | def xavier_init(module, gain=1, bias=0, distribution='normal'): 6 | assert distribution in ['uniform', 'normal'] 7 | if distribution == 'uniform': 8 | nn.init.xavier_uniform_(module.weight, gain=gain) 9 | else: 10 | nn.init.xavier_normal_(module.weight, gain=gain) 11 | if hasattr(module, 'bias'): 12 | nn.init.constant_(module.bias, bias) 13 | 14 | 15 | def normal_init(module, mean=0, std=1, bias=0): 16 | nn.init.normal_(module.weight, mean, std) 17 | if hasattr(module, 'bias'): 18 | nn.init.constant_(module.bias, bias) 19 | 20 | 21 | def uniform_init(module, a=0, b=1, bias=0): 22 | nn.init.uniform_(module.weight, a, b) 23 | if hasattr(module, 'bias'): 24 | nn.init.constant_(module.bias, bias) 25 | 26 | 27 | def kaiming_init(module, 28 | mode='fan_out', 29 | nonlinearity='relu', 30 | bias=0, 31 | distribution='normal'): 32 | assert distribution in ['uniform', 'normal'] 33 | if distribution == 'uniform': 34 | nn.init.kaiming_uniform_( 35 | module.weight, mode=mode, nonlinearity=nonlinearity) 36 | else: 37 | nn.init.kaiming_normal_( 38 | module.weight, mode=mode, nonlinearity=nonlinearity) 39 | if hasattr(module, 'bias'): 40 | nn.init.constant_(module.bias, bias) 41 | 42 | 43 | def bias_init_with_prob(prior_prob): 44 | """ initialize conv/fc bias value according to giving probablity""" 45 | bias_init = float(-np.log((1 - prior_prob) / prior_prob)) 46 | return bias_init 47 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: bionic # ubuntu 18.04 2 | language: python 3 | 4 | python: 5 | - "3.5" 6 | - "3.6" 7 | - "3.7" 8 | 9 | env: CUDA=10.1.105-1 CUDA_SHORT=10.1 UBUNTU_VERSION=ubuntu1804 FORCE_CUDA=1 10 | cache: pip 11 | 12 | # Ref to CUDA installation in Travis: https://github.com/jeremad/cuda-travis 13 | before_install: 14 | - INSTALLER=cuda-repo-${UBUNTU_VERSION}_${CUDA}_amd64.deb 15 | - wget http://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/${INSTALLER} 16 | - sudo dpkg -i ${INSTALLER} 17 | - wget https://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/7fa2af80.pub 18 | - sudo apt-key add 7fa2af80.pub 19 | - sudo apt update -qq 20 | - sudo apt install -y cuda-${CUDA_SHORT/./-} cuda-cufft-dev-${CUDA_SHORT/./-} 21 | - sudo apt clean 22 | - CUDA_HOME=/usr/local/cuda-${CUDA_SHORT} 23 | - LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${CUDA_HOME}/include:${LD_LIBRARY_PATH} 24 | - PATH=${CUDA_HOME}/bin:${PATH} 25 | 26 | install: 27 | - pip install Pillow==6.2.2 # remove this line when torchvision>=0.5 28 | - pip install Cython torch==1.2 torchvision==0.4.0 # TODO: fix CI for pytorch>1.2 29 | - pip install "git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI" 30 | - pip install -r requirements.txt 31 | 32 | before_script: 33 | - flake8 . 34 | - isort -rc --check-only --diff mmdet/ tools/ tests/ 35 | - yapf -r -d --style .style.yapf mmdet/ tools/ tests/ configs/ 36 | 37 | script: 38 | - python setup.py check -m -s 39 | - python setup.py build_ext --inplace 40 | - coverage run --source mmdet -m py.test -v --xdoctest-modules tests mmdet 41 | 42 | after_success: 43 | - coverage report 44 | -------------------------------------------------------------------------------- /mmdet/ops/utils/src/compiling_info.cpp: -------------------------------------------------------------------------------- 1 | // modified from 2 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/vision.cpp 3 | #include 4 | #include 5 | 6 | #ifdef WITH_CUDA 7 | int get_cudart_version() { return CUDART_VERSION; } 8 | #endif 9 | 10 | std::string get_compiling_cuda_version() { 11 | #ifdef WITH_CUDA 12 | std::ostringstream oss; 13 | 14 | // copied from 15 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 16 | auto printCudaStyleVersion = [&](int v) { 17 | oss << (v / 1000) << "." << (v / 10 % 100); 18 | if (v % 10 != 0) { 19 | oss << "." << (v % 10); 20 | } 21 | }; 22 | printCudaStyleVersion(get_cudart_version()); 23 | return oss.str(); 24 | #else 25 | return std::string("not available"); 26 | #endif 27 | } 28 | 29 | // similar to 30 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp 31 | std::string get_compiler_version() { 32 | std::ostringstream ss; 33 | #if defined(__GNUC__) 34 | #ifndef __clang__ 35 | { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } 36 | #endif 37 | #endif 38 | 39 | #if defined(__clang_major__) 40 | { 41 | ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." 42 | << __clang_patchlevel__; 43 | } 44 | #endif 45 | 46 | #if defined(_MSC_VER) 47 | { ss << "MSVC " << _MSC_FULL_VER; } 48 | #endif 49 | return ss.str(); 50 | } 51 | 52 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 53 | m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); 54 | m.def("get_compiling_cuda_version", &get_compiling_cuda_version, 55 | "get_compiling_cuda_version"); 56 | } 57 | -------------------------------------------------------------------------------- /tools/get_flops.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from mmcv import Config 4 | 5 | from mmdet.models import build_detector 6 | from mmdet.utils import get_model_complexity_info 7 | 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description='Train a detector') 11 | parser.add_argument('config', help='train config file path') 12 | parser.add_argument( 13 | '--shape', 14 | type=int, 15 | nargs='+', 16 | default=[1280, 800], 17 | help='input image size') 18 | args = parser.parse_args() 19 | return args 20 | 21 | 22 | def main(): 23 | 24 | args = parse_args() 25 | 26 | if len(args.shape) == 1: 27 | input_shape = (3, args.shape[0], args.shape[0]) 28 | elif len(args.shape) == 2: 29 | input_shape = (3, ) + tuple(args.shape) 30 | else: 31 | raise ValueError('invalid input shape') 32 | 33 | cfg = Config.fromfile(args.config) 34 | model = build_detector( 35 | cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda() 36 | model.eval() 37 | 38 | if hasattr(model, 'forward_dummy'): 39 | model.forward = model.forward_dummy 40 | else: 41 | raise NotImplementedError( 42 | 'FLOPs counter is currently not currently supported with {}'. 43 | format(model.__class__.__name__)) 44 | 45 | flops, params = get_model_complexity_info(model, input_shape) 46 | split_line = '=' * 30 47 | print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format( 48 | split_line, input_shape, flops, params)) 49 | print('!!!Please be cautious if you use the results in papers. ' 50 | 'You may need to check if all ops are supported and verify that the ' 51 | 'flops computation is correct.') 52 | 53 | 54 | if __name__ == '__main__': 55 | main() 56 | -------------------------------------------------------------------------------- /mmdet/core/evaluation/bbox_overlaps.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou'): 5 | """Calculate the ious between each bbox of bboxes1 and bboxes2. 6 | 7 | Args: 8 | bboxes1(ndarray): shape (n, 4) 9 | bboxes2(ndarray): shape (k, 4) 10 | mode(str): iou (intersection over union) or iof (intersection 11 | over foreground) 12 | 13 | Returns: 14 | ious(ndarray): shape (n, k) 15 | """ 16 | 17 | assert mode in ['iou', 'iof'] 18 | 19 | bboxes1 = bboxes1.astype(np.float32) 20 | bboxes2 = bboxes2.astype(np.float32) 21 | rows = bboxes1.shape[0] 22 | cols = bboxes2.shape[0] 23 | ious = np.zeros((rows, cols), dtype=np.float32) 24 | if rows * cols == 0: 25 | return ious 26 | exchange = False 27 | if bboxes1.shape[0] > bboxes2.shape[0]: 28 | bboxes1, bboxes2 = bboxes2, bboxes1 29 | ious = np.zeros((cols, rows), dtype=np.float32) 30 | exchange = True 31 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 32 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 33 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 34 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 35 | for i in range(bboxes1.shape[0]): 36 | x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) 37 | y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) 38 | x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) 39 | y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) 40 | overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( 41 | y_end - y_start + 1, 0) 42 | if mode == 'iou': 43 | union = area1[i] + area2 - overlap 44 | else: 45 | union = area1[i] if not exchange else area2 46 | ious[i, :] = overlap / union 47 | if exchange: 48 | ious = ious.T 49 | return ious 50 | -------------------------------------------------------------------------------- /mmdet/core/mask/mask_target.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | import numpy as np 3 | import torch 4 | from torch.nn.modules.utils import _pair 5 | 6 | 7 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, 8 | cfg): 9 | cfg_list = [cfg for _ in range(len(pos_proposals_list))] 10 | mask_targets = map(mask_target_single, pos_proposals_list, 11 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list) 12 | mask_targets = torch.cat(list(mask_targets)) 13 | return mask_targets 14 | 15 | 16 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): 17 | mask_size = _pair(cfg.mask_size) 18 | num_pos = pos_proposals.size(0) 19 | mask_targets = [] 20 | if num_pos > 0: 21 | proposals_np = pos_proposals.cpu().numpy() 22 | _, maxh, maxw = gt_masks.shape 23 | proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw - 1) 24 | proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh - 1) 25 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() 26 | for i in range(num_pos): 27 | gt_mask = gt_masks[pos_assigned_gt_inds[i]] 28 | bbox = proposals_np[i, :].astype(np.int32) 29 | x1, y1, x2, y2 = bbox 30 | w = np.maximum(x2 - x1 + 1, 1) 31 | h = np.maximum(y2 - y1 + 1, 1) 32 | # mask is uint8 both before and after resizing 33 | # mask_size (h, w) to (w, h) 34 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], 35 | mask_size[::-1]) 36 | mask_targets.append(target) 37 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( 38 | pos_proposals.device) 39 | else: 40 | mask_targets = pos_proposals.new_zeros((0, ) + mask_size) 41 | return mask_targets 42 | -------------------------------------------------------------------------------- /mmdet/datasets/dataset_wrappers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset 3 | 4 | from .registry import DATASETS 5 | 6 | 7 | @DATASETS.register_module 8 | class ConcatDataset(_ConcatDataset): 9 | """A wrapper of concatenated dataset. 10 | 11 | Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but 12 | concat the group flag for image aspect ratio. 13 | 14 | Args: 15 | datasets (list[:obj:`Dataset`]): A list of datasets. 16 | """ 17 | 18 | def __init__(self, datasets): 19 | super(ConcatDataset, self).__init__(datasets) 20 | self.CLASSES = datasets[0].CLASSES 21 | if hasattr(datasets[0], 'flag'): 22 | flags = [] 23 | for i in range(0, len(datasets)): 24 | flags.append(datasets[i].flag) 25 | self.flag = np.concatenate(flags) 26 | 27 | 28 | @DATASETS.register_module 29 | class RepeatDataset(object): 30 | """A wrapper of repeated dataset. 31 | 32 | The length of repeated dataset will be `times` larger than the original 33 | dataset. This is useful when the data loading time is long but the dataset 34 | is small. Using RepeatDataset can reduce the data loading time between 35 | epochs. 36 | 37 | Args: 38 | dataset (:obj:`Dataset`): The dataset to be repeated. 39 | times (int): Repeat times. 40 | """ 41 | 42 | def __init__(self, dataset, times): 43 | self.dataset = dataset 44 | self.times = times 45 | self.CLASSES = dataset.CLASSES 46 | if hasattr(self.dataset, 'flag'): 47 | self.flag = np.tile(self.dataset.flag, times) 48 | 49 | self._ori_len = len(self.dataset) 50 | 51 | def __getitem__(self, idx): 52 | return self.dataset[idx % self._ori_len] 53 | 54 | def __len__(self): 55 | return self.times * self._ori_len 56 | -------------------------------------------------------------------------------- /mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.autograd import Function 3 | from torch.autograd.function import once_differentiable 4 | 5 | from . import sigmoid_focal_loss_cuda 6 | 7 | 8 | class SigmoidFocalLossFunction(Function): 9 | 10 | @staticmethod 11 | def forward(ctx, input, target, gamma=2.0, alpha=0.25): 12 | ctx.save_for_backward(input, target) 13 | num_classes = input.shape[1] 14 | ctx.num_classes = num_classes 15 | ctx.gamma = gamma 16 | ctx.alpha = alpha 17 | 18 | loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes, 19 | gamma, alpha) 20 | return loss 21 | 22 | @staticmethod 23 | @once_differentiable 24 | def backward(ctx, d_loss): 25 | input, target = ctx.saved_tensors 26 | num_classes = ctx.num_classes 27 | gamma = ctx.gamma 28 | alpha = ctx.alpha 29 | d_loss = d_loss.contiguous() 30 | d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss, 31 | num_classes, gamma, alpha) 32 | return d_input, None, None, None, None 33 | 34 | 35 | sigmoid_focal_loss = SigmoidFocalLossFunction.apply 36 | 37 | 38 | # TODO: remove this module 39 | class SigmoidFocalLoss(nn.Module): 40 | 41 | def __init__(self, gamma, alpha): 42 | super(SigmoidFocalLoss, self).__init__() 43 | self.gamma = gamma 44 | self.alpha = alpha 45 | 46 | def forward(self, logits, targets): 47 | assert logits.is_cuda 48 | loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha) 49 | return loss.sum() 50 | 51 | def __repr__(self): 52 | tmpstr = self.__class__.__name__ + '(gamma={}, alpha={})'.format( 53 | self.gamma, self.alpha) 54 | return tmpstr 55 | -------------------------------------------------------------------------------- /configs/grid_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Grid R-CNN 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{lu2019grid, 7 | title={Grid r-cnn}, 8 | author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, 9 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 10 | year={2019} 11 | } 12 | 13 | @article{lu2019grid, 14 | title={Grid R-CNN Plus: Faster and Better}, 15 | author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, 16 | journal={arXiv preprint arXiv:1906.05688}, 17 | year={2019} 18 | } 19 | ``` 20 | 21 | ## Results and Models 22 | 23 | | Backbone | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 24 | |:-----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 25 | | R-50 | 2x | 4.8 | 1.172 | 10.9 | 40.3 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x_20190619-5b29cf9d.pth) | 26 | | R-101 | 2x | 6.7 | 1.214 | 10.0 | 41.7 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_r101_fpn_2x_20190619-a4b61645.pth) | 27 | | X-101-32x4d | 2x | 8.0 | 1.335 | 8.5 | 43.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_x101_32x4d_fpn_2x_20190619-0bbfd87a.pth) | 28 | | X-101-64x4d | 2x | 10.9 | 1.753 | 6.4 | 43.1 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/grid_rcnn/grid_rcnn_gn_head_x101_64x4d_fpn_2x_20190619-8f4e20bb.pth) | 29 | 30 | **Notes:** 31 | - All models are trained with 8 GPUs instead of 32 GPUs in the original paper. 32 | - The warming up lasts for 1 epoch and `2x` here indicates 25 epochs. 33 | -------------------------------------------------------------------------------- /configs/cityscapes/README.md: -------------------------------------------------------------------------------- 1 | ## Common settings 2 | 3 | - All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate. 4 | - All models were trained on `cityscapes_train`, and tested on `cityscapes_val`. 5 | - 1x training schedule indicates 64 epochs which corresponds to slightly less than the 24k iterations reported in the original schedule from the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870) 6 | - All pytorch-style pretrained backbones on ImageNet are from PyTorch model zoo. 7 | 8 | 9 | ## Baselines 10 | 11 | Download links and more models with different backbones and training schemes will be added to the model zoo. 12 | 13 | 14 | ### Faster R-CNN 15 | 16 | | Backbone | Style | Lr schd | Scale | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | | :-------------: | :-----: | :-----: | :---: | :------: | :-----------------: | :------------: | :----: | :------: | 18 | | R-50-FPN | pytorch | 1x | 800-1024 | 4.9 | 0.345 | 8.8 | 36.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/cityscapes/faster_rcnn_r50_fpn_1x_city_20190727-7b9c0534.pth) | 19 | 20 | ### Mask R-CNN 21 | 22 | | Backbone | Style | Lr schd | Scale | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | 23 | | :-------------: | :-----: | :-----: | :------: | :------: | :-----------------: | :------------: | :----: | :-----: | :------: | 24 | | R-50-FPN | pytorch | 1x | 800-1024 | 4.9 | 0.609 | 2.5 | 37.4 | 32.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/cityscapes/mask_rcnn_r50_fpn_1x_city_20190727-9b3c56a5.pth) | 25 | 26 | **Notes:** 27 | - In the original paper, the mask AP of Mask R-CNN R-50-FPN is 31.5. 28 | 29 | -------------------------------------------------------------------------------- /mmdet/models/utils/norm.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | norm_cfg = { 4 | # format: layer_type: (abbreviation, module) 5 | 'BN': ('bn', nn.BatchNorm2d), 6 | 'SyncBN': ('bn', nn.SyncBatchNorm), 7 | 'GN': ('gn', nn.GroupNorm), 8 | # and potentially 'SN' 9 | } 10 | 11 | 12 | def build_norm_layer(cfg, num_features, postfix=''): 13 | """ Build normalization layer 14 | 15 | Args: 16 | cfg (dict): cfg should contain: 17 | type (str): identify norm layer type. 18 | layer args: args needed to instantiate a norm layer. 19 | requires_grad (bool): [optional] whether stop gradient updates 20 | num_features (int): number of channels from input. 21 | postfix (int, str): appended into norm abbreviation to 22 | create named layer. 23 | 24 | Returns: 25 | name (str): abbreviation + postfix 26 | layer (nn.Module): created norm layer 27 | """ 28 | assert isinstance(cfg, dict) and 'type' in cfg 29 | cfg_ = cfg.copy() 30 | 31 | layer_type = cfg_.pop('type') 32 | if layer_type not in norm_cfg: 33 | raise KeyError('Unrecognized norm type {}'.format(layer_type)) 34 | else: 35 | abbr, norm_layer = norm_cfg[layer_type] 36 | if norm_layer is None: 37 | raise NotImplementedError 38 | 39 | assert isinstance(postfix, (int, str)) 40 | name = abbr + str(postfix) 41 | 42 | requires_grad = cfg_.pop('requires_grad', True) 43 | cfg_.setdefault('eps', 1e-5) 44 | if layer_type != 'GN': 45 | layer = norm_layer(num_features, **cfg_) 46 | if layer_type == 'SyncBN': 47 | layer._specify_ddp_gpu_num(1) 48 | else: 49 | assert 'num_groups' in cfg_ 50 | layer = norm_layer(num_channels=num_features, **cfg_) 51 | 52 | for param in layer.parameters(): 53 | param.requires_grad = requires_grad 54 | 55 | return name, layer 56 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .random_sampler import RandomSampler 5 | 6 | 7 | class InstanceBalancedPosSampler(RandomSampler): 8 | 9 | def _sample_pos(self, assign_result, num_expected, **kwargs): 10 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 11 | if pos_inds.numel() != 0: 12 | pos_inds = pos_inds.squeeze(1) 13 | if pos_inds.numel() <= num_expected: 14 | return pos_inds 15 | else: 16 | unique_gt_inds = assign_result.gt_inds[pos_inds].unique() 17 | num_gts = len(unique_gt_inds) 18 | num_per_gt = int(round(num_expected / float(num_gts)) + 1) 19 | sampled_inds = [] 20 | for i in unique_gt_inds: 21 | inds = torch.nonzero(assign_result.gt_inds == i.item()) 22 | if inds.numel() != 0: 23 | inds = inds.squeeze(1) 24 | else: 25 | continue 26 | if len(inds) > num_per_gt: 27 | inds = self.random_choice(inds, num_per_gt) 28 | sampled_inds.append(inds) 29 | sampled_inds = torch.cat(sampled_inds) 30 | if len(sampled_inds) < num_expected: 31 | num_extra = num_expected - len(sampled_inds) 32 | extra_inds = np.array( 33 | list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) 34 | if len(extra_inds) > num_extra: 35 | extra_inds = self.random_choice(extra_inds, num_extra) 36 | extra_inds = torch.from_numpy(extra_inds).to( 37 | assign_result.gt_inds.device).long() 38 | sampled_inds = torch.cat([sampled_inds, extra_inds]) 39 | elif len(sampled_inds) > num_expected: 40 | sampled_inds = self.random_choice(sampled_inds, num_expected) 41 | return sampled_inds 42 | -------------------------------------------------------------------------------- /mmdet/core/bbox/demodata.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | def ensure_rng(rng=None): 6 | """ 7 | Simple version of the ``kwarray.ensure_rng`` 8 | 9 | Args: 10 | rng (int | numpy.random.RandomState | None): 11 | if None, then defaults to the global rng. Otherwise this can be an 12 | integer or a RandomState class 13 | Returns: 14 | (numpy.random.RandomState) : rng - 15 | a numpy random number generator 16 | 17 | References: 18 | https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 19 | """ 20 | 21 | if rng is None: 22 | rng = np.random.mtrand._rand 23 | elif isinstance(rng, int): 24 | rng = np.random.RandomState(rng) 25 | else: 26 | rng = rng 27 | return rng 28 | 29 | 30 | def random_boxes(num=1, scale=1, rng=None): 31 | """ 32 | Simple version of ``kwimage.Boxes.random`` 33 | 34 | Returns: 35 | Tensor: shape (n, 4) in x1, y1, x2, y2 format. 36 | 37 | References: 38 | https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 39 | 40 | Example: 41 | >>> num = 3 42 | >>> scale = 512 43 | >>> rng = 0 44 | >>> boxes = random_boxes(num, scale, rng) 45 | >>> print(boxes) 46 | tensor([[280.9925, 278.9802, 308.6148, 366.1769], 47 | [216.9113, 330.6978, 224.0446, 456.5878], 48 | [405.3632, 196.3221, 493.3953, 270.7942]]) 49 | """ 50 | rng = ensure_rng(rng) 51 | 52 | tlbr = rng.rand(num, 4).astype(np.float32) 53 | 54 | tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) 55 | tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) 56 | br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) 57 | br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) 58 | 59 | tlbr[:, 0] = tl_x * scale 60 | tlbr[:, 1] = tl_y * scale 61 | tlbr[:, 2] = br_x * scale 62 | tlbr[:, 3] = br_y * scale 63 | 64 | boxes = torch.from_numpy(tlbr) 65 | return boxes 66 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # cython generated cpp 107 | mmdet/ops/nms/src/soft_nms_cpu.cpp 108 | mmdet/version.py 109 | data 110 | .vscode 111 | .idea 112 | 113 | # custom 114 | *.pkl 115 | *.pkl.json 116 | *.segm.json 117 | *.log.json 118 | work_dirs/ 119 | 120 | # Pytorch 121 | *.pth 122 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to mmdetection 2 | 3 | All kinds of contributions are welcome, including but not limited to the following. 4 | 5 | - Fixes (typo, bugs) 6 | - New features and components 7 | 8 | ## Workflow 9 | 10 | 1. fork and pull the latest mmdetection 11 | 2. checkout a new branch (do not use master branch for PRs) 12 | 3. commit your changes 13 | 4. create a PR 14 | 15 | Note 16 | - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. 17 | - If you are the author of some papers and would like to include your method to mmdetection, 18 | please contact Kai Chen (chenkaidev[at]gmail[dot]com). We will much appreciate your contribution. 19 | 20 | ## Code style 21 | 22 | ### Python 23 | We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. 24 | 25 | We use the following tools for linting and formatting: 26 | - [flake8](http://flake8.pycqa.org/en/latest/): linter 27 | - [yapf](https://github.com/google/yapf): formatter 28 | - [isort](https://github.com/timothycrosley/isort): sort imports 29 | 30 | Style configurations of yapf and isort can be found in [.style.yapf](../.style.yapf) and [.isort.cfg](../.isort.cfg). 31 | 32 | We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, 33 | fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. 34 | The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml). 35 | 36 | After you clone the repository, you will need to install initialize pre-commit hook. 37 | 38 | ``` 39 | pip install -U pre-commit 40 | ``` 41 | 42 | From the repository folder 43 | ``` 44 | pre-commit install 45 | ``` 46 | 47 | After this on every commit check code linters and formatter will be enforced. 48 | 49 | 50 | >Before you create a PR, make sure that your code lints and is formatted by yapf. 51 | 52 | ### C++ and CUDA 53 | We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). 54 | -------------------------------------------------------------------------------- /configs/libra_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Libra R-CNN: Towards Balanced Learning for Object Detection 2 | 3 | ## Introduction 4 | 5 | We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf). 6 | 7 | ``` 8 | @inproceedings{pang2019libra, 9 | title={Libra R-CNN: Towards Balanced Learning for Object Detection}, 10 | author={Pang, Jiangmiao and Chen, Kai and Shi, Jianping and Feng, Huajun and Ouyang, Wanli and Dahua Lin}, 11 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, 12 | year={2019} 13 | } 14 | ``` 15 | 16 | ## Results and models 17 | 18 | The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) 19 | 20 | | Architecture | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 21 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| 22 | | Faster R-CNN | R-50-FPN | pytorch | 1x | 4.2 | 0.375 | 12.0 | 38.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_20190610-bf0ea559.pth) | 23 | | Fast R-CNN | R-50-FPN | pytorch | 1x | 3.7 | 0.272 | 16.3 | 38.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_20190525-a43f88b5.pth) | 24 | | Faster R-CNN | R-101-FPN | pytorch | 1x | 6.0 | 0.495 | 10.4 | 40.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_20190525-94e94051.pth) | 25 | | Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 10.1 | 1.050 | 6.8 | 42.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_20190525-359c134a.pth) | 26 | | RetinaNet | R-50-FPN | pytorch | 1x | 3.7 | 0.328 | 11.8 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/libra_rcnn/libra_retinanet_r50_fpn_1x_20190525-ead2a6bb.pth) | 27 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/random_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .base_sampler import BaseSampler 5 | 6 | 7 | class RandomSampler(BaseSampler): 8 | 9 | def __init__(self, 10 | num, 11 | pos_fraction, 12 | neg_pos_ub=-1, 13 | add_gt_as_proposals=True, 14 | **kwargs): 15 | from mmdet.core.bbox import demodata 16 | super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, 17 | add_gt_as_proposals) 18 | self.rng = demodata.ensure_rng(kwargs.get('rng', None)) 19 | 20 | def random_choice(self, gallery, num): 21 | """Random select some elements from the gallery. 22 | 23 | It seems that Pytorch's implementation is slower than numpy so we use 24 | numpy to randperm the indices. 25 | """ 26 | assert len(gallery) >= num 27 | if isinstance(gallery, list): 28 | gallery = np.array(gallery) 29 | cands = np.arange(len(gallery)) 30 | self.rng.shuffle(cands) 31 | rand_inds = cands[:num] 32 | if not isinstance(gallery, np.ndarray): 33 | rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) 34 | return gallery[rand_inds] 35 | 36 | def _sample_pos(self, assign_result, num_expected, **kwargs): 37 | """Randomly sample some positive samples.""" 38 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 39 | if pos_inds.numel() != 0: 40 | pos_inds = pos_inds.squeeze(1) 41 | if pos_inds.numel() <= num_expected: 42 | return pos_inds 43 | else: 44 | return self.random_choice(pos_inds, num_expected) 45 | 46 | def _sample_neg(self, assign_result, num_expected, **kwargs): 47 | """Randomly sample some negative samples.""" 48 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 49 | if neg_inds.numel() != 0: 50 | neg_inds = neg_inds.squeeze(1) 51 | if len(neg_inds) <= num_expected: 52 | return neg_inds 53 | else: 54 | return self.random_choice(neg_inds, num_expected) 55 | -------------------------------------------------------------------------------- /mmdet/models/losses/balanced_l1_loss.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | from ..registry import LOSSES 6 | from .utils import weighted_loss 7 | 8 | 9 | @weighted_loss 10 | def balanced_l1_loss(pred, 11 | target, 12 | beta=1.0, 13 | alpha=0.5, 14 | gamma=1.5, 15 | reduction='mean'): 16 | assert beta > 0 17 | assert pred.size() == target.size() and target.numel() > 0 18 | 19 | diff = torch.abs(pred - target) 20 | b = np.e**(gamma / alpha) - 1 21 | loss = torch.where( 22 | diff < beta, alpha / b * 23 | (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, 24 | gamma * diff + gamma / b - alpha * beta) 25 | 26 | return loss 27 | 28 | 29 | @LOSSES.register_module 30 | class BalancedL1Loss(nn.Module): 31 | """Balanced L1 Loss 32 | 33 | arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) 34 | """ 35 | 36 | def __init__(self, 37 | alpha=0.5, 38 | gamma=1.5, 39 | beta=1.0, 40 | reduction='mean', 41 | loss_weight=1.0): 42 | super(BalancedL1Loss, self).__init__() 43 | self.alpha = alpha 44 | self.gamma = gamma 45 | self.beta = beta 46 | self.reduction = reduction 47 | self.loss_weight = loss_weight 48 | 49 | def forward(self, 50 | pred, 51 | target, 52 | weight=None, 53 | avg_factor=None, 54 | reduction_override=None, 55 | **kwargs): 56 | assert reduction_override in (None, 'none', 'mean', 'sum') 57 | reduction = ( 58 | reduction_override if reduction_override else self.reduction) 59 | loss_bbox = self.loss_weight * balanced_l1_loss( 60 | pred, 61 | target, 62 | weight, 63 | alpha=self.alpha, 64 | gamma=self.gamma, 65 | beta=self.beta, 66 | reduction=reduction, 67 | avg_factor=avg_factor, 68 | **kwargs) 69 | return loss_bbox 70 | -------------------------------------------------------------------------------- /mmdet/core/utils/dist_utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch.distributed as dist 4 | from mmcv.runner import OptimizerHook 5 | from torch._utils import (_flatten_dense_tensors, _take_tensors, 6 | _unflatten_dense_tensors) 7 | 8 | 9 | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): 10 | if bucket_size_mb > 0: 11 | bucket_size_bytes = bucket_size_mb * 1024 * 1024 12 | buckets = _take_tensors(tensors, bucket_size_bytes) 13 | else: 14 | buckets = OrderedDict() 15 | for tensor in tensors: 16 | tp = tensor.type() 17 | if tp not in buckets: 18 | buckets[tp] = [] 19 | buckets[tp].append(tensor) 20 | buckets = buckets.values() 21 | 22 | for bucket in buckets: 23 | flat_tensors = _flatten_dense_tensors(bucket) 24 | dist.all_reduce(flat_tensors) 25 | flat_tensors.div_(world_size) 26 | for tensor, synced in zip( 27 | bucket, _unflatten_dense_tensors(flat_tensors, bucket)): 28 | tensor.copy_(synced) 29 | 30 | 31 | def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): 32 | grads = [ 33 | param.grad.data for param in params 34 | if param.requires_grad and param.grad is not None 35 | ] 36 | world_size = dist.get_world_size() 37 | if coalesce: 38 | _allreduce_coalesced(grads, world_size, bucket_size_mb) 39 | else: 40 | for tensor in grads: 41 | dist.all_reduce(tensor.div_(world_size)) 42 | 43 | 44 | class DistOptimizerHook(OptimizerHook): 45 | 46 | def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): 47 | self.grad_clip = grad_clip 48 | self.coalesce = coalesce 49 | self.bucket_size_mb = bucket_size_mb 50 | 51 | def after_train_iter(self, runner): 52 | runner.optimizer.zero_grad() 53 | runner.outputs['loss'].backward() 54 | allreduce_grads(runner.model.parameters(), self.coalesce, 55 | self.bucket_size_mb) 56 | if self.grad_clip is not None: 57 | self.clip_grads(runner.model.parameters()) 58 | runner.optimizer.step() 59 | -------------------------------------------------------------------------------- /tools/collect_env.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import subprocess 3 | import sys 4 | from collections import defaultdict 5 | 6 | import cv2 7 | import mmcv 8 | import torch 9 | import torchvision 10 | 11 | import mmdet 12 | from mmdet.ops import get_compiler_version, get_compiling_cuda_version 13 | 14 | 15 | def collect_env(): 16 | env_info = {} 17 | env_info['sys.platform'] = sys.platform 18 | env_info['Python'] = sys.version.replace('\n', '') 19 | 20 | cuda_available = torch.cuda.is_available() 21 | env_info['CUDA available'] = cuda_available 22 | 23 | if cuda_available: 24 | from torch.utils.cpp_extension import CUDA_HOME 25 | env_info['CUDA_HOME'] = CUDA_HOME 26 | 27 | if CUDA_HOME is not None and osp.isdir(CUDA_HOME): 28 | try: 29 | nvcc = osp.join(CUDA_HOME, 'bin/nvcc') 30 | nvcc = subprocess.check_output( 31 | '"{}" -V | tail -n1'.format(nvcc), shell=True) 32 | nvcc = nvcc.decode('utf-8').strip() 33 | except subprocess.SubprocessError: 34 | nvcc = 'Not Available' 35 | env_info['NVCC'] = nvcc 36 | 37 | devices = defaultdict(list) 38 | for k in range(torch.cuda.device_count()): 39 | devices[torch.cuda.get_device_name(k)].append(str(k)) 40 | for name, devids in devices.items(): 41 | env_info['GPU ' + ','.join(devids)] = name 42 | 43 | gcc = subprocess.check_output('gcc --version | head -n1', shell=True) 44 | gcc = gcc.decode('utf-8').strip() 45 | env_info['GCC'] = gcc 46 | 47 | env_info['PyTorch'] = torch.__version__ 48 | env_info['PyTorch compiling details'] = torch.__config__.show() 49 | 50 | env_info['TorchVision'] = torchvision.__version__ 51 | 52 | env_info['OpenCV'] = cv2.__version__ 53 | 54 | env_info['MMCV'] = mmcv.__version__ 55 | env_info['MMDetection'] = mmdet.__version__ 56 | env_info['MMDetection Compiler'] = get_compiler_version() 57 | env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version() 58 | 59 | for name, val in env_info.items(): 60 | print('{}: {}'.format(name, val)) 61 | 62 | 63 | if __name__ == "__main__": 64 | collect_env() 65 | -------------------------------------------------------------------------------- /configs/gn/README.md: -------------------------------------------------------------------------------- 1 | # Group Normalization 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{wu2018group, 7 | title={Group Normalization}, 8 | author={Wu, Yuxin and He, Kaiming}, 9 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | model | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | 17 | |:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| 18 | | R-50-FPN (d) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.8 | 36.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_2x_20180113-86832cf2.pth) | 19 | | R-50-FPN (d) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.1 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_3x_20180113-8e82f48d.pth) | 20 | | R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 0.970 | 4.8 | 41.5 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_2x_20180113-9598649c.pth) | 21 | | R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | 0.970 | 4.8 | 41.6 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_3x_20180113-a14ffb96.pth) | 22 | | R-50-FPN (c) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.7 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_2x_20180113-ec93305c.pth) | 23 | | R-50-FPN (c) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.0 | 36.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_3x_20180113-9d230cab.pth) | 24 | 25 | **Notes:** 26 | - (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). 27 | - The `3x` schedule is epoch [28, 34, 36]. 28 | - **Memory, Train/Inf time is outdated.** -------------------------------------------------------------------------------- /mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp: -------------------------------------------------------------------------------- 1 | // modify from 2 | // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/SigmoidFocalLoss.h 3 | #include 4 | 5 | at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, 6 | const at::Tensor &targets, 7 | const int num_classes, 8 | const float gamma, const float alpha); 9 | 10 | at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, 11 | const at::Tensor &targets, 12 | const at::Tensor &d_losses, 13 | const int num_classes, 14 | const float gamma, const float alpha); 15 | 16 | // Interface for Python 17 | at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits, 18 | const at::Tensor &targets, 19 | const int num_classes, const float gamma, 20 | const float alpha) { 21 | if (logits.type().is_cuda()) { 22 | return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma, 23 | alpha); 24 | } 25 | AT_ERROR("SigmoidFocalLoss is not implemented on the CPU"); 26 | } 27 | 28 | at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits, 29 | const at::Tensor &targets, 30 | const at::Tensor &d_losses, 31 | const int num_classes, const float gamma, 32 | const float alpha) { 33 | if (logits.is_cuda()) { 34 | return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses, 35 | num_classes, gamma, alpha); 36 | } 37 | AT_ERROR("SigmoidFocalLoss is not implemented on the CPU"); 38 | } 39 | 40 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 41 | m.def("forward", &SigmoidFocalLoss_forward, 42 | "SigmoidFocalLoss forward (CUDA)"); 43 | m.def("backward", &SigmoidFocalLoss_backward, 44 | "SigmoidFocalLoss backward (CUDA)"); 45 | } 46 | -------------------------------------------------------------------------------- /configs/ms_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Mask Scoring R-CNN 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{huang2019msrcnn, 7 | title={Mask Scoring R-CNN}, 8 | author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang}, 9 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, 10 | year={2019}, 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | 17 | |:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| 18 | | R-50-FPN | caffe | 1x | 4.3 | 0.537 | 10.1 | 37.4 | 35.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_r50_caffe_fpn_1x_20190624-619934b5.pth) | 19 | | R-50-FPN | caffe | 2x | - | - | - | 38.2 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_r50_caffe_fpn_2x_20190525-a07be31e.pth) | 20 | | R-101-FPN | caffe | 1x | 6.2 | 0.682 | 9.1 | 39.8 | 37.2 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_r101_caffe_fpn_1x_20190624-677a5548.pth) | 21 | | R-101-FPN | caffe | 2x | - | - | - | 40.7 | 37.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_r101_caffe_fpn_2x_20190525-4aee1528.pth) | 22 | | R-X101-32x4d | pytorch | 2x | 7.6 | 0.844 | 8.0 | 41.7 | 38.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_x101_32x4d_fpn_2x_20190628-ab454d07.pth) | 23 | | R-X101-64x4d | pytorch | 1x | 10.5 | 1.214 | 6.4 | 42.0 | 39.1 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ms-rcnn/ms_rcnn_x101_64x4d_fpn_1x_20190628-dec32bda.pth) | 24 | | R-X101-64x4d | pytorch | 2x | - | - | - | 42.2 | 38.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ms-rcnn/ms_rcnn_x101_64x4d_fpn_2x_20190525-c044c25a.pth) | 25 | -------------------------------------------------------------------------------- /mmdet/models/shared_heads/res_layer.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from mmcv.cnn import constant_init, kaiming_init 3 | from mmcv.runner import load_checkpoint 4 | 5 | from mmdet.core import auto_fp16 6 | from mmdet.utils import get_root_logger 7 | from ..backbones import ResNet, make_res_layer 8 | from ..registry import SHARED_HEADS 9 | 10 | 11 | @SHARED_HEADS.register_module 12 | class ResLayer(nn.Module): 13 | 14 | def __init__(self, 15 | depth, 16 | stage=3, 17 | stride=2, 18 | dilation=1, 19 | style='pytorch', 20 | norm_cfg=dict(type='BN', requires_grad=True), 21 | norm_eval=True, 22 | with_cp=False, 23 | dcn=None): 24 | super(ResLayer, self).__init__() 25 | self.norm_eval = norm_eval 26 | self.norm_cfg = norm_cfg 27 | self.stage = stage 28 | self.fp16_enabled = False 29 | block, stage_blocks = ResNet.arch_settings[depth] 30 | stage_block = stage_blocks[stage] 31 | planes = 64 * 2**stage 32 | inplanes = 64 * 2**(stage - 1) * block.expansion 33 | 34 | res_layer = make_res_layer( 35 | block, 36 | inplanes, 37 | planes, 38 | stage_block, 39 | stride=stride, 40 | dilation=dilation, 41 | style=style, 42 | with_cp=with_cp, 43 | norm_cfg=self.norm_cfg, 44 | dcn=dcn) 45 | self.add_module('layer{}'.format(stage + 1), res_layer) 46 | 47 | def init_weights(self, pretrained=None): 48 | if isinstance(pretrained, str): 49 | logger = get_root_logger() 50 | load_checkpoint(self, pretrained, strict=False, logger=logger) 51 | elif pretrained is None: 52 | for m in self.modules(): 53 | if isinstance(m, nn.Conv2d): 54 | kaiming_init(m) 55 | elif isinstance(m, nn.BatchNorm2d): 56 | constant_init(m, 1) 57 | else: 58 | raise TypeError('pretrained must be a str or None') 59 | 60 | @auto_fp16() 61 | def forward(self, x): 62 | res_layer = getattr(self, 'layer{}'.format(self.stage + 1)) 63 | out = res_layer(x) 64 | return out 65 | 66 | def train(self, mode=True): 67 | super(ResLayer, self).train(mode) 68 | if self.norm_eval: 69 | for m in self.modules(): 70 | if isinstance(m, nn.BatchNorm2d): 71 | m.eval() 72 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fast_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FastRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | bbox_roi_extractor, 11 | bbox_head, 12 | train_cfg, 13 | test_cfg, 14 | neck=None, 15 | shared_head=None, 16 | mask_roi_extractor=None, 17 | mask_head=None, 18 | pretrained=None): 19 | super(FastRCNN, self).__init__( 20 | backbone=backbone, 21 | neck=neck, 22 | shared_head=shared_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | mask_roi_extractor=mask_roi_extractor, 28 | mask_head=mask_head, 29 | pretrained=pretrained) 30 | 31 | def forward_test(self, imgs, img_metas, proposals, **kwargs): 32 | """ 33 | Args: 34 | imgs (List[Tensor]): the outer list indicates test-time 35 | augmentations and inner Tensor should have a shape NxCxHxW, 36 | which contains all images in the batch. 37 | img_meta (List[List[dict]]): the outer list indicates test-time 38 | augs (multiscale, flip, etc.) and the inner list indicates 39 | images in a batch 40 | proposals (List[List[Tensor | None]]): predefiend proposals for 41 | each test-time augmentation and each item. 42 | """ 43 | for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: 44 | if not isinstance(var, list): 45 | raise TypeError('{} must be a list, but got {}'.format( 46 | name, type(var))) 47 | 48 | num_augs = len(imgs) 49 | if num_augs != len(img_metas): 50 | raise ValueError( 51 | 'num of augmentations ({}) != num of image meta ({})'.format( 52 | len(imgs), len(img_metas))) 53 | # TODO: remove the restriction of imgs_per_gpu == 1 when prepared 54 | imgs_per_gpu = imgs[0].size(0) 55 | assert imgs_per_gpu == 1 56 | 57 | if num_augs == 1: 58 | return self.simple_test(imgs[0], img_metas[0], proposals[0], 59 | **kwargs) 60 | else: 61 | return self.aug_test(imgs, img_metas, proposals, **kwargs) 62 | -------------------------------------------------------------------------------- /tests/test_nms.py: -------------------------------------------------------------------------------- 1 | """ 2 | CommandLine: 3 | pytest tests/test_nms.py 4 | """ 5 | import numpy as np 6 | import torch 7 | 8 | from mmdet.ops.nms.nms_wrapper import nms 9 | 10 | 11 | def test_nms_device_and_dtypes_cpu(): 12 | """ 13 | CommandLine: 14 | xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_cpu 15 | """ 16 | iou_thr = 0.7 17 | base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], 18 | [49.3, 32.9, 51.0, 35.3, 0.9], 19 | [35.3, 11.5, 39.9, 14.5, 0.4], 20 | [35.2, 11.7, 39.7, 15.7, 0.3]]) 21 | 22 | # CPU can handle float32 and float64 23 | dets = base_dets.astype(np.float32) 24 | supressed, inds = nms(dets, iou_thr) 25 | assert dets.dtype == supressed.dtype 26 | assert len(inds) == len(supressed) == 3 27 | 28 | dets = torch.FloatTensor(base_dets) 29 | surpressed, inds = nms(dets, iou_thr) 30 | assert dets.dtype == surpressed.dtype 31 | assert len(inds) == len(surpressed) == 3 32 | 33 | dets = base_dets.astype(np.float64) 34 | supressed, inds = nms(dets, iou_thr) 35 | assert dets.dtype == supressed.dtype 36 | assert len(inds) == len(supressed) == 3 37 | 38 | dets = torch.DoubleTensor(base_dets) 39 | surpressed, inds = nms(dets, iou_thr) 40 | assert dets.dtype == surpressed.dtype 41 | assert len(inds) == len(surpressed) == 3 42 | 43 | 44 | def test_nms_device_and_dtypes_gpu(): 45 | """ 46 | CommandLine: 47 | xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_gpu 48 | """ 49 | if not torch.cuda.is_available(): 50 | import pytest 51 | pytest.skip('test requires GPU and torch+cuda') 52 | 53 | iou_thr = 0.7 54 | base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], 55 | [49.3, 32.9, 51.0, 35.3, 0.9], 56 | [35.3, 11.5, 39.9, 14.5, 0.4], 57 | [35.2, 11.7, 39.7, 15.7, 0.3]]) 58 | 59 | for device_id in range(torch.cuda.device_count()): 60 | print('Run NMS on device_id = {!r}'.format(device_id)) 61 | # GPU can handle float32 but not float64 62 | dets = base_dets.astype(np.float32) 63 | supressed, inds = nms(dets, iou_thr, device_id) 64 | assert dets.dtype == supressed.dtype 65 | assert len(inds) == len(supressed) == 3 66 | 67 | dets = torch.FloatTensor(base_dets).to(device_id) 68 | surpressed, inds = nms(dets, iou_thr) 69 | assert dets.dtype == surpressed.dtype 70 | assert len(inds) == len(surpressed) == 3 71 | -------------------------------------------------------------------------------- /mmdet/ops/nms/src/nms_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | template 5 | at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) { 6 | AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); 7 | 8 | if (dets.numel() == 0) { 9 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 10 | } 11 | 12 | auto x1_t = dets.select(1, 0).contiguous(); 13 | auto y1_t = dets.select(1, 1).contiguous(); 14 | auto x2_t = dets.select(1, 2).contiguous(); 15 | auto y2_t = dets.select(1, 3).contiguous(); 16 | auto scores = dets.select(1, 4).contiguous(); 17 | 18 | at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1); 19 | 20 | auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); 21 | 22 | auto ndets = dets.size(0); 23 | at::Tensor suppressed_t = 24 | at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); 25 | 26 | auto suppressed = suppressed_t.data(); 27 | auto order = order_t.data(); 28 | auto x1 = x1_t.data(); 29 | auto y1 = y1_t.data(); 30 | auto x2 = x2_t.data(); 31 | auto y2 = y2_t.data(); 32 | auto areas = areas_t.data(); 33 | 34 | for (int64_t _i = 0; _i < ndets; _i++) { 35 | auto i = order[_i]; 36 | if (suppressed[i] == 1) continue; 37 | auto ix1 = x1[i]; 38 | auto iy1 = y1[i]; 39 | auto ix2 = x2[i]; 40 | auto iy2 = y2[i]; 41 | auto iarea = areas[i]; 42 | 43 | for (int64_t _j = _i + 1; _j < ndets; _j++) { 44 | auto j = order[_j]; 45 | if (suppressed[j] == 1) continue; 46 | auto xx1 = std::max(ix1, x1[j]); 47 | auto yy1 = std::max(iy1, y1[j]); 48 | auto xx2 = std::min(ix2, x2[j]); 49 | auto yy2 = std::min(iy2, y2[j]); 50 | 51 | auto w = std::max(static_cast(0), xx2 - xx1 + 1); 52 | auto h = std::max(static_cast(0), yy2 - yy1 + 1); 53 | auto inter = w * h; 54 | auto ovr = inter / (iarea + areas[j] - inter); 55 | if (ovr >= threshold) suppressed[j] = 1; 56 | } 57 | } 58 | return at::nonzero(suppressed_t == 0).squeeze(1); 59 | } 60 | 61 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 62 | at::Tensor result; 63 | AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] { 64 | result = nms_cpu_kernel(dets, threshold); 65 | }); 66 | return result; 67 | } 68 | 69 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 70 | m.def("nms", &nms, "non-maximum suppression"); 71 | } -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | 17 | # -- Project information ----------------------------------------------------- 18 | 19 | project = 'MMDetection' 20 | copyright = '2018-2020, OpenMMLab' 21 | author = 'OpenMMLab' 22 | 23 | # The full version, including alpha/beta/rc tags 24 | release = '1.0.0' 25 | 26 | # -- General configuration --------------------------------------------------- 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | 'sphinx.ext.autodoc', 33 | 'sphinx.ext.napoleon', 34 | 'sphinx.ext.viewcode', 35 | 'recommonmark', 36 | 'sphinx_markdown_tables', 37 | ] 38 | 39 | autodoc_mock_imports = ['torch', 'torchvision', 'mmcv'] 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | # The suffix(es) of source filenames. 45 | # You can specify multiple suffix as a list of string: 46 | # 47 | source_suffix = { 48 | '.rst': 'restructuredtext', 49 | '.md': 'markdown', 50 | } 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # List of patterns, relative to source directory, that match files and 56 | # directories to ignore when looking for source files. 57 | # This pattern also affects html_static_path and html_extra_path. 58 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 59 | 60 | # -- Options for HTML output ------------------------------------------------- 61 | 62 | # The theme to use for HTML and HTML Help pages. See the documentation for 63 | # a list of builtin themes. 64 | # 65 | html_theme = 'sphinx_rtd_theme' 66 | 67 | # Add any paths that contain custom static files (such as style sheets) here, 68 | # relative to this directory. They are copied after the builtin static files, 69 | # so a file named "default.css" will overwrite the builtin "default.css". 70 | html_static_path = ['_static'] 71 | -------------------------------------------------------------------------------- /tests/test_async.py: -------------------------------------------------------------------------------- 1 | """Tests for async interface.""" 2 | 3 | import asyncio 4 | import os 5 | import sys 6 | 7 | import asynctest 8 | import mmcv 9 | import torch 10 | 11 | from mmdet.apis import async_inference_detector, init_detector 12 | 13 | if sys.version_info >= (3, 7): 14 | from mmdet.utils.contextmanagers import concurrent 15 | 16 | 17 | class AsyncTestCase(asynctest.TestCase): 18 | use_default_loop = False 19 | forbid_get_event_loop = True 20 | 21 | TEST_TIMEOUT = int(os.getenv("ASYNCIO_TEST_TIMEOUT", "30")) 22 | 23 | def _run_test_method(self, method): 24 | result = method() 25 | if asyncio.iscoroutine(result): 26 | self.loop.run_until_complete( 27 | asyncio.wait_for(result, timeout=self.TEST_TIMEOUT)) 28 | 29 | 30 | class MaskRCNNDetector: 31 | 32 | def __init__(self, 33 | model_config, 34 | checkpoint=None, 35 | streamqueue_size=3, 36 | device="cuda:0"): 37 | 38 | self.streamqueue_size = streamqueue_size 39 | self.device = device 40 | # build the model and load checkpoint 41 | self.model = init_detector( 42 | model_config, checkpoint=None, device=self.device) 43 | self.streamqueue = None 44 | 45 | async def init(self): 46 | self.streamqueue = asyncio.Queue() 47 | for _ in range(self.streamqueue_size): 48 | stream = torch.cuda.Stream(device=self.device) 49 | self.streamqueue.put_nowait(stream) 50 | 51 | if sys.version_info >= (3, 7): 52 | 53 | async def apredict(self, img): 54 | if isinstance(img, str): 55 | img = mmcv.imread(img) 56 | async with concurrent(self.streamqueue): 57 | result = await async_inference_detector(self.model, img) 58 | return result 59 | 60 | 61 | class AsyncInferenceTestCase(AsyncTestCase): 62 | 63 | if sys.version_info >= (3, 7): 64 | 65 | async def test_simple_inference(self): 66 | if not torch.cuda.is_available(): 67 | import pytest 68 | 69 | pytest.skip("test requires GPU and torch+cuda") 70 | 71 | root_dir = os.path.dirname(os.path.dirname(__name__)) 72 | model_config = os.path.join(root_dir, 73 | "configs/mask_rcnn_r50_fpn_1x.py") 74 | detector = MaskRCNNDetector(model_config) 75 | await detector.init() 76 | img_path = os.path.join(root_dir, "demo/demo.jpg") 77 | bboxes, _ = await detector.apredict(img_path) 78 | self.assertTrue(bboxes) 79 | -------------------------------------------------------------------------------- /mmdet/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.runner import get_dist_info 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get the root logger. 8 | 9 | The logger will be initialized if it has not been initialized. By default a 10 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 11 | also be added. The name of the root logger is the top-level package name, 12 | e.g., "mmdet". 13 | 14 | Args: 15 | log_file (str | None): The log filename. If specified, a FileHandler 16 | will be added to the root logger. 17 | log_level (int): The root logger level. Note that only the process of 18 | rank 0 is affected, while other processes will set the level to 19 | "Error" and be silent most of the time. 20 | 21 | Returns: 22 | logging.Logger: The root logger. 23 | """ 24 | logger = logging.getLogger(__name__.split('.')[0]) # i.e., mmdet 25 | # if the logger has been initialized, just return it 26 | if logger.hasHandlers(): 27 | return logger 28 | 29 | format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' 30 | logging.basicConfig(format=format_str, level=log_level) 31 | rank, _ = get_dist_info() 32 | if rank != 0: 33 | logger.setLevel('ERROR') 34 | elif log_file is not None: 35 | file_handler = logging.FileHandler(log_file, 'w') 36 | file_handler.setFormatter(logging.Formatter(format_str)) 37 | file_handler.setLevel(log_level) 38 | logger.addHandler(file_handler) 39 | 40 | return logger 41 | 42 | 43 | def print_log(msg, logger=None, level=logging.INFO): 44 | """Print a log message. 45 | 46 | Args: 47 | msg (str): The message to be logged. 48 | logger (logging.Logger | str | None): The logger to be used. Some 49 | special loggers are: 50 | - "root": the root logger obtained with `get_root_logger()`. 51 | - "silent": no message will be printed. 52 | - None: The `print()` method will be used to print log messages. 53 | level (int): Logging level. Only available when `logger` is a Logger 54 | object or "root". 55 | """ 56 | if logger is None: 57 | print(msg) 58 | elif logger == 'root': 59 | _logger = get_root_logger() 60 | _logger.log(level, msg) 61 | elif isinstance(logger, logging.Logger): 62 | logger.log(level, msg) 63 | elif logger != 'silent': 64 | raise TypeError( 65 | 'logger should be either a logging.Logger object, "root", ' 66 | '"silent" or None, but got {}'.format(logger)) 67 | -------------------------------------------------------------------------------- /mmdet/core/post_processing/bbox_nms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmdet.ops.nms import nms_wrapper 4 | 5 | 6 | def multiclass_nms(multi_bboxes, 7 | multi_scores, 8 | score_thr, 9 | nms_cfg, 10 | max_num=-1, 11 | score_factors=None): 12 | """NMS for multi-class bboxes. 13 | 14 | Args: 15 | multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) 16 | multi_scores (Tensor): shape (n, #class), where the 0th column 17 | contains scores of the background class, but this will be ignored. 18 | score_thr (float): bbox threshold, bboxes with scores lower than it 19 | will not be considered. 20 | nms_thr (float): NMS IoU threshold 21 | max_num (int): if there are more than max_num bboxes after NMS, 22 | only top max_num will be kept. 23 | score_factors (Tensor): The factors multiplied to scores before 24 | applying NMS 25 | 26 | Returns: 27 | tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels 28 | are 0-based. 29 | """ 30 | num_classes = multi_scores.shape[1] 31 | bboxes, labels = [], [] 32 | nms_cfg_ = nms_cfg.copy() 33 | nms_type = nms_cfg_.pop('type', 'nms') 34 | nms_op = getattr(nms_wrapper, nms_type) 35 | for i in range(1, num_classes): 36 | cls_inds = multi_scores[:, i] > score_thr 37 | if not cls_inds.any(): 38 | continue 39 | # get bboxes and scores of this class 40 | if multi_bboxes.shape[1] == 4: 41 | _bboxes = multi_bboxes[cls_inds, :] 42 | else: 43 | _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] 44 | _scores = multi_scores[cls_inds, i] 45 | if score_factors is not None: 46 | _scores *= score_factors[cls_inds] 47 | cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) 48 | cls_dets, _ = nms_op(cls_dets, **nms_cfg_) 49 | cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ), 50 | i - 1, 51 | dtype=torch.long) 52 | bboxes.append(cls_dets) 53 | labels.append(cls_labels) 54 | if bboxes: 55 | bboxes = torch.cat(bboxes) 56 | labels = torch.cat(labels) 57 | if bboxes.shape[0] > max_num: 58 | _, inds = bboxes[:, -1].sort(descending=True) 59 | inds = inds[:max_num] 60 | bboxes = bboxes[inds] 61 | labels = labels[inds] 62 | else: 63 | bboxes = multi_bboxes.new_zeros((0, 5)) 64 | labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) 65 | 66 | return bboxes, labels 67 | -------------------------------------------------------------------------------- /mmdet/datasets/loader/build_loader.py: -------------------------------------------------------------------------------- 1 | import platform 2 | from functools import partial 3 | 4 | from mmcv.parallel import collate 5 | from mmcv.runner import get_dist_info 6 | from torch.utils.data import DataLoader 7 | 8 | from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler 9 | 10 | if platform.system() != 'Windows': 11 | # https://github.com/pytorch/pytorch/issues/973 12 | import resource 13 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 14 | resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) 15 | 16 | 17 | def build_dataloader(dataset, 18 | imgs_per_gpu, 19 | workers_per_gpu, 20 | num_gpus=1, 21 | dist=True, 22 | shuffle=True, 23 | **kwargs): 24 | """Build PyTorch DataLoader. 25 | 26 | In distributed training, each GPU/process has a dataloader. 27 | In non-distributed training, there is only one dataloader for all GPUs. 28 | 29 | Args: 30 | dataset (Dataset): A PyTorch dataset. 31 | imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of 32 | each GPU. 33 | workers_per_gpu (int): How many subprocesses to use for data loading 34 | for each GPU. 35 | num_gpus (int): Number of GPUs. Only used in non-distributed training. 36 | dist (bool): Distributed training/test or not. Default: True. 37 | shuffle (bool): Whether to shuffle the data at every epoch. 38 | Default: True. 39 | kwargs: any keyword argument to be used to initialize DataLoader 40 | 41 | Returns: 42 | DataLoader: A PyTorch dataloader. 43 | """ 44 | if dist: 45 | rank, world_size = get_dist_info() 46 | # DistributedGroupSampler will definitely shuffle the data to satisfy 47 | # that images on each GPU are in the same group 48 | if shuffle: 49 | sampler = DistributedGroupSampler(dataset, imgs_per_gpu, 50 | world_size, rank) 51 | else: 52 | sampler = DistributedSampler( 53 | dataset, world_size, rank, shuffle=False) 54 | batch_size = imgs_per_gpu 55 | num_workers = workers_per_gpu 56 | else: 57 | sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None 58 | batch_size = num_gpus * imgs_per_gpu 59 | num_workers = num_gpus * workers_per_gpu 60 | 61 | data_loader = DataLoader( 62 | dataset, 63 | batch_size=batch_size, 64 | sampler=sampler, 65 | num_workers=num_workers, 66 | collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), 67 | pin_memory=False, 68 | **kwargs) 69 | 70 | return data_loader 71 | -------------------------------------------------------------------------------- /mmdet/utils/registry.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from functools import partial 3 | 4 | import mmcv 5 | 6 | 7 | class Registry(object): 8 | 9 | def __init__(self, name): 10 | self._name = name 11 | self._module_dict = dict() 12 | 13 | def __repr__(self): 14 | format_str = self.__class__.__name__ + '(name={}, items={})'.format( 15 | self._name, list(self._module_dict.keys())) 16 | return format_str 17 | 18 | @property 19 | def name(self): 20 | return self._name 21 | 22 | @property 23 | def module_dict(self): 24 | return self._module_dict 25 | 26 | def get(self, key): 27 | return self._module_dict.get(key, None) 28 | 29 | def _register_module(self, module_class, force=False): 30 | """Register a module. 31 | 32 | Args: 33 | module (:obj:`nn.Module`): Module to be registered. 34 | """ 35 | if not inspect.isclass(module_class): 36 | raise TypeError('module must be a class, but got {}'.format( 37 | type(module_class))) 38 | module_name = module_class.__name__ 39 | if not force and module_name in self._module_dict: 40 | raise KeyError('{} is already registered in {}'.format( 41 | module_name, self.name)) 42 | self._module_dict[module_name] = module_class 43 | 44 | def register_module(self, cls=None, force=False): 45 | if cls is None: 46 | return partial(self.register_module, force=force) 47 | self._register_module(cls, force=force) 48 | return cls 49 | 50 | 51 | def build_from_cfg(cfg, registry, default_args=None): 52 | """Build a module from config dict. 53 | 54 | Args: 55 | cfg (dict): Config dict. It should at least contain the key "type". 56 | registry (:obj:`Registry`): The registry to search the type from. 57 | default_args (dict, optional): Default initialization arguments. 58 | 59 | Returns: 60 | obj: The constructed object. 61 | """ 62 | assert isinstance(cfg, dict) and 'type' in cfg 63 | assert isinstance(default_args, dict) or default_args is None 64 | args = cfg.copy() 65 | obj_type = args.pop('type') 66 | if mmcv.is_str(obj_type): 67 | obj_cls = registry.get(obj_type) 68 | if obj_cls is None: 69 | raise KeyError('{} is not in the {} registry'.format( 70 | obj_type, registry.name)) 71 | elif inspect.isclass(obj_type): 72 | obj_cls = obj_type 73 | else: 74 | raise TypeError('type must be a str or valid type, but got {}'.format( 75 | type(obj_type))) 76 | if default_args is not None: 77 | for name, value in default_args.items(): 78 | args.setdefault(name, value) 79 | return obj_cls(**args) 80 | -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/roi_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Function 4 | from torch.autograd.function import once_differentiable 5 | from torch.nn.modules.utils import _pair 6 | 7 | from . import roi_pool_cuda 8 | 9 | 10 | class RoIPoolFunction(Function): 11 | 12 | @staticmethod 13 | def forward(ctx, features, rois, out_size, spatial_scale): 14 | assert features.is_cuda 15 | out_h, out_w = _pair(out_size) 16 | assert isinstance(out_h, int) and isinstance(out_w, int) 17 | ctx.save_for_backward(rois) 18 | num_channels = features.size(1) 19 | num_rois = rois.size(0) 20 | out_size = (num_rois, num_channels, out_h, out_w) 21 | output = features.new_zeros(out_size) 22 | argmax = features.new_zeros(out_size, dtype=torch.int) 23 | roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, 24 | output, argmax) 25 | ctx.spatial_scale = spatial_scale 26 | ctx.feature_size = features.size() 27 | ctx.argmax = argmax 28 | 29 | return output 30 | 31 | @staticmethod 32 | @once_differentiable 33 | def backward(ctx, grad_output): 34 | assert grad_output.is_cuda 35 | spatial_scale = ctx.spatial_scale 36 | feature_size = ctx.feature_size 37 | argmax = ctx.argmax 38 | rois = ctx.saved_tensors[0] 39 | assert feature_size is not None 40 | 41 | grad_input = grad_rois = None 42 | if ctx.needs_input_grad[0]: 43 | grad_input = grad_output.new_zeros(feature_size) 44 | roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, 45 | spatial_scale, grad_input) 46 | 47 | return grad_input, grad_rois, None, None 48 | 49 | 50 | roi_pool = RoIPoolFunction.apply 51 | 52 | 53 | class RoIPool(nn.Module): 54 | 55 | def __init__(self, out_size, spatial_scale, use_torchvision=False): 56 | super(RoIPool, self).__init__() 57 | 58 | self.out_size = _pair(out_size) 59 | self.spatial_scale = float(spatial_scale) 60 | self.use_torchvision = use_torchvision 61 | 62 | def forward(self, features, rois): 63 | if self.use_torchvision: 64 | from torchvision.ops import roi_pool as tv_roi_pool 65 | return tv_roi_pool(features, rois, self.out_size, 66 | self.spatial_scale) 67 | else: 68 | return roi_pool(features, rois, self.out_size, self.spatial_scale) 69 | 70 | def __repr__(self): 71 | format_str = self.__class__.__name__ 72 | format_str += '(out_size={}, spatial_scale={}'.format( 73 | self.out_size, self.spatial_scale) 74 | format_str += ', use_torchvision={})'.format(self.use_torchvision) 75 | return format_str 76 | -------------------------------------------------------------------------------- /mmdet/core/bbox/bbox_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from ..utils import multi_apply 4 | from .transforms import bbox2delta 5 | 6 | 7 | def bbox_target(pos_bboxes_list, 8 | neg_bboxes_list, 9 | pos_gt_bboxes_list, 10 | pos_gt_labels_list, 11 | cfg, 12 | reg_classes=1, 13 | target_means=[.0, .0, .0, .0], 14 | target_stds=[1.0, 1.0, 1.0, 1.0], 15 | concat=True): 16 | labels, label_weights, bbox_targets, bbox_weights = multi_apply( 17 | bbox_target_single, 18 | pos_bboxes_list, 19 | neg_bboxes_list, 20 | pos_gt_bboxes_list, 21 | pos_gt_labels_list, 22 | cfg=cfg, 23 | reg_classes=reg_classes, 24 | target_means=target_means, 25 | target_stds=target_stds) 26 | 27 | if concat: 28 | labels = torch.cat(labels, 0) 29 | label_weights = torch.cat(label_weights, 0) 30 | bbox_targets = torch.cat(bbox_targets, 0) 31 | bbox_weights = torch.cat(bbox_weights, 0) 32 | return labels, label_weights, bbox_targets, bbox_weights 33 | 34 | 35 | def bbox_target_single(pos_bboxes, 36 | neg_bboxes, 37 | pos_gt_bboxes, 38 | pos_gt_labels, 39 | cfg, 40 | reg_classes=1, 41 | target_means=[.0, .0, .0, .0], 42 | target_stds=[1.0, 1.0, 1.0, 1.0]): 43 | num_pos = pos_bboxes.size(0) 44 | num_neg = neg_bboxes.size(0) 45 | num_samples = num_pos + num_neg 46 | labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long) 47 | label_weights = pos_bboxes.new_zeros(num_samples) 48 | bbox_targets = pos_bboxes.new_zeros(num_samples, 4) 49 | bbox_weights = pos_bboxes.new_zeros(num_samples, 4) 50 | if num_pos > 0: 51 | labels[:num_pos] = pos_gt_labels 52 | pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight 53 | label_weights[:num_pos] = pos_weight 54 | pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, 55 | target_stds) 56 | bbox_targets[:num_pos, :] = pos_bbox_targets 57 | bbox_weights[:num_pos, :] = 1 58 | if num_neg > 0: 59 | label_weights[-num_neg:] = 1.0 60 | 61 | return labels, label_weights, bbox_targets, bbox_weights 62 | 63 | 64 | def expand_target(bbox_targets, bbox_weights, labels, num_classes): 65 | bbox_targets_expand = bbox_targets.new_zeros( 66 | (bbox_targets.size(0), 4 * num_classes)) 67 | bbox_weights_expand = bbox_weights.new_zeros( 68 | (bbox_weights.size(0), 4 * num_classes)) 69 | for i in torch.nonzero(labels > 0).squeeze(-1): 70 | start, end = labels[i] * 4, (labels[i] + 1) * 4 71 | bbox_targets_expand[i, start:end] = bbox_targets[i, :] 72 | bbox_weights_expand[i, start:end] = bbox_weights[i, :] 73 | return bbox_targets_expand, bbox_weights_expand 74 | -------------------------------------------------------------------------------- /mmdet/ops/masked_conv/src/masked_conv2d_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | int MaskedIm2colForwardLaucher(const at::Tensor im, const int height, 7 | const int width, const int channels, 8 | const int kernel_h, const int kernel_w, 9 | const int pad_h, const int pad_w, 10 | const at::Tensor mask_h_idx, 11 | const at::Tensor mask_w_idx, const int mask_cnt, 12 | at::Tensor col); 13 | 14 | int MaskedCol2imForwardLaucher(const at::Tensor col, const int height, 15 | const int width, const int channels, 16 | const at::Tensor mask_h_idx, 17 | const at::Tensor mask_w_idx, const int mask_cnt, 18 | at::Tensor im); 19 | 20 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x, " must be a CUDAtensor ") 21 | #define CHECK_CONTIGUOUS(x) \ 22 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 23 | #define CHECK_INPUT(x) \ 24 | CHECK_CUDA(x); \ 25 | CHECK_CONTIGUOUS(x) 26 | 27 | int masked_im2col_forward_cuda(const at::Tensor im, const at::Tensor mask_h_idx, 28 | const at::Tensor mask_w_idx, const int kernel_h, 29 | const int kernel_w, const int pad_h, 30 | const int pad_w, at::Tensor col) { 31 | CHECK_INPUT(im); 32 | CHECK_INPUT(mask_h_idx); 33 | CHECK_INPUT(mask_w_idx); 34 | CHECK_INPUT(col); 35 | // im: (n, ic, h, w), kernel size (kh, kw) 36 | // kernel: (oc, ic * kh * kw), col: (kh * kw * ic, ow * oh) 37 | 38 | int channels = im.size(1); 39 | int height = im.size(2); 40 | int width = im.size(3); 41 | int mask_cnt = mask_h_idx.size(0); 42 | 43 | MaskedIm2colForwardLaucher(im, height, width, channels, kernel_h, kernel_w, 44 | pad_h, pad_w, mask_h_idx, mask_w_idx, mask_cnt, 45 | col); 46 | 47 | return 1; 48 | } 49 | 50 | int masked_col2im_forward_cuda(const at::Tensor col, 51 | const at::Tensor mask_h_idx, 52 | const at::Tensor mask_w_idx, int height, 53 | int width, int channels, at::Tensor im) { 54 | CHECK_INPUT(col); 55 | CHECK_INPUT(mask_h_idx); 56 | CHECK_INPUT(mask_w_idx); 57 | CHECK_INPUT(im); 58 | // im: (n, ic, h, w), kernel size (kh, kw) 59 | // kernel: (oc, ic * kh * kh), col: (kh * kw * ic, ow * oh) 60 | 61 | int mask_cnt = mask_h_idx.size(0); 62 | 63 | MaskedCol2imForwardLaucher(col, height, width, channels, mask_h_idx, 64 | mask_w_idx, mask_cnt, im); 65 | 66 | return 1; 67 | } 68 | 69 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 70 | m.def("masked_im2col_forward", &masked_im2col_forward_cuda, 71 | "masked_im2col forward (CUDA)"); 72 | m.def("masked_col2im_forward", &masked_col2im_forward_cuda, 73 | "masked_col2im forward (CUDA)"); 74 | } -------------------------------------------------------------------------------- /mmdet/models/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | from mmdet.ops import sigmoid_focal_loss as _sigmoid_focal_loss 5 | from ..registry import LOSSES 6 | from .utils import weight_reduce_loss 7 | 8 | 9 | # This method is only for debugging 10 | def py_sigmoid_focal_loss(pred, 11 | target, 12 | weight=None, 13 | gamma=2.0, 14 | alpha=0.25, 15 | reduction='mean', 16 | avg_factor=None): 17 | pred_sigmoid = pred.sigmoid() 18 | target = target.type_as(pred) 19 | pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) 20 | focal_weight = (alpha * target + (1 - alpha) * 21 | (1 - target)) * pt.pow(gamma) 22 | loss = F.binary_cross_entropy_with_logits( 23 | pred, target, reduction='none') * focal_weight 24 | loss = weight_reduce_loss(loss, weight, reduction, avg_factor) 25 | return loss 26 | 27 | 28 | def sigmoid_focal_loss(pred, 29 | target, 30 | weight=None, 31 | gamma=2.0, 32 | alpha=0.25, 33 | reduction='mean', 34 | avg_factor=None): 35 | # Function.apply does not accept keyword arguments, so the decorator 36 | # "weighted_loss" is not applicable 37 | loss = _sigmoid_focal_loss(pred, target, gamma, alpha) 38 | # TODO: find a proper way to handle the shape of weight 39 | if weight is not None: 40 | weight = weight.view(-1, 1) 41 | loss = weight_reduce_loss(loss, weight, reduction, avg_factor) 42 | return loss 43 | 44 | 45 | @LOSSES.register_module 46 | class FocalLoss(nn.Module): 47 | 48 | def __init__(self, 49 | use_sigmoid=True, 50 | gamma=2.0, 51 | alpha=0.25, 52 | reduction='mean', 53 | loss_weight=1.0): 54 | super(FocalLoss, self).__init__() 55 | assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' 56 | self.use_sigmoid = use_sigmoid 57 | self.gamma = gamma 58 | self.alpha = alpha 59 | self.reduction = reduction 60 | self.loss_weight = loss_weight 61 | 62 | def forward(self, 63 | pred, 64 | target, 65 | weight=None, 66 | avg_factor=None, 67 | reduction_override=None): 68 | assert reduction_override in (None, 'none', 'mean', 'sum') 69 | reduction = ( 70 | reduction_override if reduction_override else self.reduction) 71 | if self.use_sigmoid: 72 | loss_cls = self.loss_weight * sigmoid_focal_loss( 73 | pred, 74 | target, 75 | weight, 76 | gamma=self.gamma, 77 | alpha=self.alpha, 78 | reduction=reduction, 79 | avg_factor=avg_factor) 80 | else: 81 | raise NotImplementedError 82 | return loss_cls 83 | -------------------------------------------------------------------------------- /configs/fcos/README.md: -------------------------------------------------------------------------------- 1 | # FCOS: Fully Convolutional One-Stage Object Detection 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{tian2019fcos, 7 | title={FCOS: Fully Convolutional One-Stage Object Detection}, 8 | author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, 9 | journal={arXiv preprint arXiv:1904.01355}, 10 | year={2019} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | |:---------:|:-------:|:-------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 18 | | R-50 | caffe | N | N | 1x | 5.5 | 0.373 | 13.7 | 35.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r50_caffe_fpn_1x_4gpu_20190516-a7cac5ff.pth) | 19 | | R-50 | caffe | Y | N | 1x | 6.9 | 0.396 | 13.6 | 36.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu_20190516-9f253a93.pth) | 20 | | R-50 | caffe | Y | N | 2x | - | - | - | 36.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r50_caffe_fpn_gn_2x_4gpu_20190516_-93484354.pth) | 21 | | R-101 | caffe | Y | N | 1x | 10.4 | 0.558 | 11.6 | 39.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r101_caffe_fpn_gn_1x_4gpu_20190516-e4889733.pth) | 22 | | R-101 | caffe | Y | N | 2x | - | - | - | 39.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r101_caffe_fpn_gn_2x_4gpu_20190516-c03af97b.pth) | 23 | 24 | 25 | | Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 26 | |:---------:|:-------:|:-------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 27 | | R-50 | caffe | Y | Y | 2x | - | - | - | 38.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_mstrain_640_800_r50_caffe_fpn_gn_2x_4gpu_20190516-f7329d80.pth) | 28 | | R-101 | caffe | Y | Y | 2x | - | - | - | 40.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu_20190516-42e6f62d.pth) | 29 | | X-101 | caffe | Y | Y | 2x | 9.7 | 0.892 | 7.0 | 42.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x_20190516-a36c0872.pth) | 30 | 31 | **Notes:** 32 | - To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU for R-50 and R-101 models, and 8 GPUs with 2 image/GPU for X-101 models. 33 | - The X-101 backbone is X-101-64x4d. 34 | -------------------------------------------------------------------------------- /mmdet/models/detectors/single_stage.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from mmdet.core import bbox2result 4 | from .. import builder 5 | from ..registry import DETECTORS 6 | from .base import BaseDetector 7 | 8 | 9 | @DETECTORS.register_module 10 | class SingleStageDetector(BaseDetector): 11 | """Base class for single-stage detectors. 12 | 13 | Single-stage detectors directly and densely predict bounding boxes on the 14 | output features of the backbone+neck. 15 | """ 16 | 17 | def __init__(self, 18 | backbone, 19 | neck=None, 20 | bbox_head=None, 21 | train_cfg=None, 22 | test_cfg=None, 23 | pretrained=None): 24 | super(SingleStageDetector, self).__init__() 25 | self.backbone = builder.build_backbone(backbone) 26 | if neck is not None: 27 | self.neck = builder.build_neck(neck) 28 | self.bbox_head = builder.build_head(bbox_head) 29 | self.train_cfg = train_cfg 30 | self.test_cfg = test_cfg 31 | self.init_weights(pretrained=pretrained) 32 | 33 | def init_weights(self, pretrained=None): 34 | super(SingleStageDetector, self).init_weights(pretrained) 35 | self.backbone.init_weights(pretrained=pretrained) 36 | if self.with_neck: 37 | if isinstance(self.neck, nn.Sequential): 38 | for m in self.neck: 39 | m.init_weights() 40 | else: 41 | self.neck.init_weights() 42 | self.bbox_head.init_weights() 43 | 44 | def extract_feat(self, img): 45 | """Directly extract features from the backbone+neck 46 | """ 47 | x = self.backbone(img) 48 | if self.with_neck: 49 | x = self.neck(x) 50 | return x 51 | 52 | def forward_dummy(self, img): 53 | """Used for computing network flops. 54 | 55 | See `mmedetection/tools/get_flops.py` 56 | """ 57 | x = self.extract_feat(img) 58 | outs = self.bbox_head(x) 59 | return outs 60 | 61 | def forward_train(self, 62 | img, 63 | img_metas, 64 | gt_bboxes, 65 | gt_labels, 66 | gt_bboxes_ignore=None): 67 | x = self.extract_feat(img) 68 | outs = self.bbox_head(x) 69 | loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) 70 | losses = self.bbox_head.loss( 71 | *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) 72 | return losses 73 | 74 | def simple_test(self, img, img_meta, rescale=False): 75 | x = self.extract_feat(img) 76 | outs = self.bbox_head(x) 77 | bbox_inputs = outs + (img_meta, self.test_cfg, rescale) 78 | bbox_list = self.bbox_head.get_bboxes(*bbox_inputs) 79 | bbox_results = [ 80 | bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) 81 | for det_bboxes, det_labels in bbox_list 82 | ] 83 | return bbox_results[0] 84 | 85 | def aug_test(self, imgs, img_metas, rescale=False): 86 | raise NotImplementedError 87 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/ohem_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from ..transforms import bbox2roi 4 | from .base_sampler import BaseSampler 5 | 6 | 7 | class OHEMSampler(BaseSampler): 8 | """ 9 | Online Hard Example Mining Sampler described in [1]_. 10 | 11 | References: 12 | .. [1] https://arxiv.org/pdf/1604.03540.pdf 13 | """ 14 | 15 | def __init__(self, 16 | num, 17 | pos_fraction, 18 | context, 19 | neg_pos_ub=-1, 20 | add_gt_as_proposals=True, 21 | **kwargs): 22 | super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, 23 | add_gt_as_proposals) 24 | if not hasattr(context, 'num_stages'): 25 | self.bbox_roi_extractor = context.bbox_roi_extractor 26 | self.bbox_head = context.bbox_head 27 | else: 28 | self.bbox_roi_extractor = context.bbox_roi_extractor[ 29 | context.current_stage] 30 | self.bbox_head = context.bbox_head[context.current_stage] 31 | 32 | def hard_mining(self, inds, num_expected, bboxes, labels, feats): 33 | with torch.no_grad(): 34 | rois = bbox2roi([bboxes]) 35 | bbox_feats = self.bbox_roi_extractor( 36 | feats[:self.bbox_roi_extractor.num_inputs], rois) 37 | cls_score, _ = self.bbox_head(bbox_feats) 38 | loss = self.bbox_head.loss( 39 | cls_score=cls_score, 40 | bbox_pred=None, 41 | labels=labels, 42 | label_weights=cls_score.new_ones(cls_score.size(0)), 43 | bbox_targets=None, 44 | bbox_weights=None, 45 | reduction_override='none')['loss_cls'] 46 | _, topk_loss_inds = loss.topk(num_expected) 47 | return inds[topk_loss_inds] 48 | 49 | def _sample_pos(self, 50 | assign_result, 51 | num_expected, 52 | bboxes=None, 53 | feats=None, 54 | **kwargs): 55 | # Sample some hard positive samples 56 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 57 | if pos_inds.numel() != 0: 58 | pos_inds = pos_inds.squeeze(1) 59 | if pos_inds.numel() <= num_expected: 60 | return pos_inds 61 | else: 62 | return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], 63 | assign_result.labels[pos_inds], feats) 64 | 65 | def _sample_neg(self, 66 | assign_result, 67 | num_expected, 68 | bboxes=None, 69 | feats=None, 70 | **kwargs): 71 | # Sample some hard negative samples 72 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 73 | if neg_inds.numel() != 0: 74 | neg_inds = neg_inds.squeeze(1) 75 | if len(neg_inds) <= num_expected: 76 | return neg_inds 77 | else: 78 | return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], 79 | assign_result.labels[neg_inds], feats) 80 | -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/src/roi_pool_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, 7 | const float spatial_scale, const int channels, 8 | const int height, const int width, const int num_rois, 9 | const int pooled_h, const int pooled_w, 10 | at::Tensor output, at::Tensor argmax); 11 | 12 | int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, 13 | const at::Tensor argmax, const float spatial_scale, 14 | const int batch_size, const int channels, 15 | const int height, const int width, 16 | const int num_rois, const int pooled_h, 17 | const int pooled_w, at::Tensor bottom_grad); 18 | 19 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x, " must be a CUDAtensor ") 20 | #define CHECK_CONTIGUOUS(x) \ 21 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 22 | #define CHECK_INPUT(x) \ 23 | CHECK_CUDA(x); \ 24 | CHECK_CONTIGUOUS(x) 25 | 26 | int roi_pooling_forward_cuda(at::Tensor features, at::Tensor rois, 27 | int pooled_height, int pooled_width, 28 | float spatial_scale, at::Tensor output, 29 | at::Tensor argmax) { 30 | CHECK_INPUT(features); 31 | CHECK_INPUT(rois); 32 | CHECK_INPUT(output); 33 | CHECK_INPUT(argmax); 34 | 35 | // Number of ROIs 36 | int num_rois = rois.size(0); 37 | int size_rois = rois.size(1); 38 | 39 | if (size_rois != 5) { 40 | printf("wrong roi size\n"); 41 | return 0; 42 | } 43 | 44 | int channels = features.size(1); 45 | int height = features.size(2); 46 | int width = features.size(3); 47 | 48 | ROIPoolForwardLaucher(features, rois, spatial_scale, channels, height, width, 49 | num_rois, pooled_height, pooled_width, output, argmax); 50 | 51 | return 1; 52 | } 53 | 54 | int roi_pooling_backward_cuda(at::Tensor top_grad, at::Tensor rois, 55 | at::Tensor argmax, float spatial_scale, 56 | at::Tensor bottom_grad) { 57 | CHECK_INPUT(top_grad); 58 | CHECK_INPUT(rois); 59 | CHECK_INPUT(argmax); 60 | CHECK_INPUT(bottom_grad); 61 | 62 | int pooled_height = top_grad.size(2); 63 | int pooled_width = top_grad.size(3); 64 | int num_rois = rois.size(0); 65 | int size_rois = rois.size(1); 66 | 67 | if (size_rois != 5) { 68 | printf("wrong roi size\n"); 69 | return 0; 70 | } 71 | int batch_size = bottom_grad.size(0); 72 | int channels = bottom_grad.size(1); 73 | int height = bottom_grad.size(2); 74 | int width = bottom_grad.size(3); 75 | 76 | ROIPoolBackwardLaucher(top_grad, rois, argmax, spatial_scale, batch_size, 77 | channels, height, width, num_rois, pooled_height, 78 | pooled_width, bottom_grad); 79 | 80 | return 1; 81 | } 82 | 83 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 84 | m.def("forward", &roi_pooling_forward_cuda, "Roi_Pooling forward (CUDA)"); 85 | m.def("backward", &roi_pooling_backward_cuda, "Roi_Pooling backward (CUDA)"); 86 | } 87 | -------------------------------------------------------------------------------- /mmdet/ops/roi_align/src/roi_align_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, 9 | const float spatial_scale, const int sample_num, 10 | const int channels, const int height, 11 | const int width, const int num_rois, 12 | const int pooled_height, const int pooled_width, 13 | at::Tensor output); 14 | 15 | int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, 16 | const float spatial_scale, const int sample_num, 17 | const int channels, const int height, 18 | const int width, const int num_rois, 19 | const int pooled_height, const int pooled_width, 20 | at::Tensor bottom_grad); 21 | 22 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x, " must be a CUDAtensor ") 23 | #define CHECK_CONTIGUOUS(x) \ 24 | TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") 25 | #define CHECK_INPUT(x) \ 26 | CHECK_CUDA(x); \ 27 | CHECK_CONTIGUOUS(x) 28 | 29 | int roi_align_forward_cuda(at::Tensor features, at::Tensor rois, 30 | int pooled_height, int pooled_width, 31 | float spatial_scale, int sample_num, 32 | at::Tensor output) { 33 | CHECK_INPUT(features); 34 | CHECK_INPUT(rois); 35 | CHECK_INPUT(output); 36 | 37 | // Number of ROIs 38 | int num_rois = rois.size(0); 39 | int size_rois = rois.size(1); 40 | 41 | if (size_rois != 5) { 42 | printf("wrong roi size\n"); 43 | return 0; 44 | } 45 | 46 | int num_channels = features.size(1); 47 | int data_height = features.size(2); 48 | int data_width = features.size(3); 49 | 50 | ROIAlignForwardLaucher(features, rois, spatial_scale, sample_num, 51 | num_channels, data_height, data_width, num_rois, 52 | pooled_height, pooled_width, output); 53 | 54 | return 1; 55 | } 56 | 57 | int roi_align_backward_cuda(at::Tensor top_grad, at::Tensor rois, 58 | int pooled_height, int pooled_width, 59 | float spatial_scale, int sample_num, 60 | at::Tensor bottom_grad) { 61 | CHECK_INPUT(top_grad); 62 | CHECK_INPUT(rois); 63 | CHECK_INPUT(bottom_grad); 64 | 65 | // Number of ROIs 66 | int num_rois = rois.size(0); 67 | int size_rois = rois.size(1); 68 | if (size_rois != 5) { 69 | printf("wrong roi size\n"); 70 | return 0; 71 | } 72 | 73 | int num_channels = bottom_grad.size(1); 74 | int data_height = bottom_grad.size(2); 75 | int data_width = bottom_grad.size(3); 76 | 77 | ROIAlignBackwardLaucher(top_grad, rois, spatial_scale, sample_num, 78 | num_channels, data_height, data_width, num_rois, 79 | pooled_height, pooled_width, bottom_grad); 80 | 81 | return 1; 82 | } 83 | 84 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 85 | m.def("forward", &roi_align_forward_cuda, "Roi_Align forward (CUDA)"); 86 | m.def("backward", &roi_align_backward_cuda, "Roi_Align backward (CUDA)"); 87 | } 88 | -------------------------------------------------------------------------------- /mmdet/models/losses/utils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import torch.nn.functional as F 4 | 5 | 6 | def reduce_loss(loss, reduction): 7 | """Reduce loss as specified. 8 | 9 | Args: 10 | loss (Tensor): Elementwise loss tensor. 11 | reduction (str): Options are "none", "mean" and "sum". 12 | 13 | Return: 14 | Tensor: Reduced loss tensor. 15 | """ 16 | reduction_enum = F._Reduction.get_enum(reduction) 17 | # none: 0, elementwise_mean:1, sum: 2 18 | if reduction_enum == 0: 19 | return loss 20 | elif reduction_enum == 1: 21 | return loss.mean() 22 | elif reduction_enum == 2: 23 | return loss.sum() 24 | 25 | 26 | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 27 | """Apply element-wise weight and reduce loss. 28 | 29 | Args: 30 | loss (Tensor): Element-wise loss. 31 | weight (Tensor): Element-wise weights. 32 | reduction (str): Same as built-in losses of PyTorch. 33 | avg_factor (float): Avarage factor when computing the mean of losses. 34 | 35 | Returns: 36 | Tensor: Processed loss values. 37 | """ 38 | # if weight is specified, apply element-wise weight 39 | if weight is not None: 40 | loss = loss * weight 41 | 42 | # if avg_factor is not specified, just reduce the loss 43 | if avg_factor is None: 44 | loss = reduce_loss(loss, reduction) 45 | else: 46 | # if reduction is mean, then average the loss by avg_factor 47 | if reduction == 'mean': 48 | loss = loss.sum() / avg_factor 49 | # if reduction is 'none', then do nothing, otherwise raise an error 50 | elif reduction != 'none': 51 | raise ValueError('avg_factor can not be used with reduction="sum"') 52 | return loss 53 | 54 | 55 | def weighted_loss(loss_func): 56 | """Create a weighted version of a given loss function. 57 | 58 | To use this decorator, the loss function must have the signature like 59 | `loss_func(pred, target, **kwargs)`. The function only needs to compute 60 | element-wise loss without any reduction. This decorator will add weight 61 | and reduction arguments to the function. The decorated function will have 62 | the signature like `loss_func(pred, target, weight=None, reduction='mean', 63 | avg_factor=None, **kwargs)`. 64 | 65 | :Example: 66 | 67 | >>> import torch 68 | >>> @weighted_loss 69 | >>> def l1_loss(pred, target): 70 | >>> return (pred - target).abs() 71 | 72 | >>> pred = torch.Tensor([0, 2, 3]) 73 | >>> target = torch.Tensor([1, 1, 1]) 74 | >>> weight = torch.Tensor([1, 0, 1]) 75 | 76 | >>> l1_loss(pred, target) 77 | tensor(1.3333) 78 | >>> l1_loss(pred, target, weight) 79 | tensor(1.) 80 | >>> l1_loss(pred, target, reduction='none') 81 | tensor([1., 1., 2.]) 82 | >>> l1_loss(pred, target, weight, avg_factor=2) 83 | tensor(1.5000) 84 | """ 85 | 86 | @functools.wraps(loss_func) 87 | def wrapper(pred, 88 | target, 89 | weight=None, 90 | reduction='mean', 91 | avg_factor=None, 92 | **kwargs): 93 | # get element-wise loss 94 | loss = loss_func(pred, target, **kwargs) 95 | loss = weight_reduce_loss(loss, weight, reduction, avg_factor) 96 | return loss 97 | 98 | return wrapper 99 | -------------------------------------------------------------------------------- /mmdet/datasets/xml_style.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import xml.etree.ElementTree as ET 3 | 4 | import mmcv 5 | import numpy as np 6 | 7 | from .custom import CustomDataset 8 | from .registry import DATASETS 9 | 10 | 11 | @DATASETS.register_module 12 | class XMLDataset(CustomDataset): 13 | 14 | def __init__(self, min_size=None, **kwargs): 15 | super(XMLDataset, self).__init__(**kwargs) 16 | self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)} 17 | self.min_size = min_size 18 | 19 | def load_annotations(self, ann_file): 20 | img_infos = [] 21 | img_ids = mmcv.list_from_file(ann_file) 22 | for img_id in img_ids: 23 | filename = 'JPEGImages/{}.jpg'.format(img_id) 24 | xml_path = osp.join(self.img_prefix, 'Annotations', 25 | '{}.xml'.format(img_id)) 26 | tree = ET.parse(xml_path) 27 | root = tree.getroot() 28 | size = root.find('size') 29 | width = int(size.find('width').text) 30 | height = int(size.find('height').text) 31 | img_infos.append( 32 | dict(id=img_id, filename=filename, width=width, height=height)) 33 | return img_infos 34 | 35 | def get_ann_info(self, idx): 36 | img_id = self.img_infos[idx]['id'] 37 | xml_path = osp.join(self.img_prefix, 'Annotations', 38 | '{}.xml'.format(img_id)) 39 | tree = ET.parse(xml_path) 40 | root = tree.getroot() 41 | bboxes = [] 42 | labels = [] 43 | bboxes_ignore = [] 44 | labels_ignore = [] 45 | for obj in root.findall('object'): 46 | name = obj.find('name').text 47 | label = self.cat2label[name] 48 | difficult = int(obj.find('difficult').text) 49 | bnd_box = obj.find('bndbox') 50 | bbox = [ 51 | int(bnd_box.find('xmin').text), 52 | int(bnd_box.find('ymin').text), 53 | int(bnd_box.find('xmax').text), 54 | int(bnd_box.find('ymax').text) 55 | ] 56 | ignore = False 57 | if self.min_size: 58 | assert not self.test_mode 59 | w = bbox[2] - bbox[0] 60 | h = bbox[3] - bbox[1] 61 | if w < self.min_size or h < self.min_size: 62 | ignore = True 63 | if difficult or ignore: 64 | bboxes_ignore.append(bbox) 65 | labels_ignore.append(label) 66 | else: 67 | bboxes.append(bbox) 68 | labels.append(label) 69 | if not bboxes: 70 | bboxes = np.zeros((0, 4)) 71 | labels = np.zeros((0, )) 72 | else: 73 | bboxes = np.array(bboxes, ndmin=2) - 1 74 | labels = np.array(labels) 75 | if not bboxes_ignore: 76 | bboxes_ignore = np.zeros((0, 4)) 77 | labels_ignore = np.zeros((0, )) 78 | else: 79 | bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 80 | labels_ignore = np.array(labels_ignore) 81 | ann = dict( 82 | bboxes=bboxes.astype(np.float32), 83 | labels=labels.astype(np.int64), 84 | bboxes_ignore=bboxes_ignore.astype(np.float32), 85 | labels_ignore=labels_ignore.astype(np.int64)) 86 | return ann 87 | -------------------------------------------------------------------------------- /mmdet/ops/roi_align/roi_align.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.autograd import Function 3 | from torch.autograd.function import once_differentiable 4 | from torch.nn.modules.utils import _pair 5 | 6 | from . import roi_align_cuda 7 | 8 | 9 | class RoIAlignFunction(Function): 10 | 11 | @staticmethod 12 | def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): 13 | out_h, out_w = _pair(out_size) 14 | assert isinstance(out_h, int) and isinstance(out_w, int) 15 | ctx.spatial_scale = spatial_scale 16 | ctx.sample_num = sample_num 17 | ctx.save_for_backward(rois) 18 | ctx.feature_size = features.size() 19 | 20 | batch_size, num_channels, data_height, data_width = features.size() 21 | num_rois = rois.size(0) 22 | 23 | output = features.new_zeros(num_rois, num_channels, out_h, out_w) 24 | if features.is_cuda: 25 | roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, 26 | sample_num, output) 27 | else: 28 | raise NotImplementedError 29 | 30 | return output 31 | 32 | @staticmethod 33 | @once_differentiable 34 | def backward(ctx, grad_output): 35 | feature_size = ctx.feature_size 36 | spatial_scale = ctx.spatial_scale 37 | sample_num = ctx.sample_num 38 | rois = ctx.saved_tensors[0] 39 | assert (feature_size is not None and grad_output.is_cuda) 40 | 41 | batch_size, num_channels, data_height, data_width = feature_size 42 | out_w = grad_output.size(3) 43 | out_h = grad_output.size(2) 44 | 45 | grad_input = grad_rois = None 46 | if ctx.needs_input_grad[0]: 47 | grad_input = rois.new_zeros(batch_size, num_channels, data_height, 48 | data_width) 49 | roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, 50 | out_w, spatial_scale, sample_num, 51 | grad_input) 52 | 53 | return grad_input, grad_rois, None, None, None 54 | 55 | 56 | roi_align = RoIAlignFunction.apply 57 | 58 | 59 | class RoIAlign(nn.Module): 60 | 61 | def __init__(self, 62 | out_size, 63 | spatial_scale, 64 | sample_num=0, 65 | use_torchvision=False): 66 | super(RoIAlign, self).__init__() 67 | 68 | self.out_size = _pair(out_size) 69 | self.spatial_scale = float(spatial_scale) 70 | self.sample_num = int(sample_num) 71 | self.use_torchvision = use_torchvision 72 | 73 | def forward(self, features, rois): 74 | if self.use_torchvision: 75 | from torchvision.ops import roi_align as tv_roi_align 76 | return tv_roi_align(features, rois, self.out_size, 77 | self.spatial_scale, self.sample_num) 78 | else: 79 | return roi_align(features, rois, self.out_size, self.spatial_scale, 80 | self.sample_num) 81 | 82 | def __repr__(self): 83 | format_str = self.__class__.__name__ 84 | format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( 85 | self.out_size, self.spatial_scale, self.sample_num) 86 | format_str += ', use_torchvision={})'.format(self.use_torchvision) 87 | return format_str 88 | -------------------------------------------------------------------------------- /mmdet/models/detectors/reppoints_detector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms 4 | from ..registry import DETECTORS 5 | from .single_stage import SingleStageDetector 6 | 7 | 8 | @DETECTORS.register_module 9 | class RepPointsDetector(SingleStageDetector): 10 | """RepPoints: Point Set Representation for Object Detection. 11 | 12 | This detector is the implementation of: 13 | - RepPoints detector (https://arxiv.org/pdf/1904.11490) 14 | """ 15 | 16 | def __init__(self, 17 | backbone, 18 | neck, 19 | bbox_head, 20 | train_cfg=None, 21 | test_cfg=None, 22 | pretrained=None): 23 | super(RepPointsDetector, 24 | self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, 25 | pretrained) 26 | 27 | def merge_aug_results(self, aug_bboxes, aug_scores, img_metas): 28 | """Merge augmented detection bboxes and scores. 29 | 30 | Args: 31 | aug_bboxes (list[Tensor]): shape (n, 4*#class) 32 | aug_scores (list[Tensor] or None): shape (n, #class) 33 | img_shapes (list[Tensor]): shape (3, ). 34 | 35 | Returns: 36 | tuple: (bboxes, scores) 37 | """ 38 | recovered_bboxes = [] 39 | for bboxes, img_info in zip(aug_bboxes, img_metas): 40 | img_shape = img_info[0]['img_shape'] 41 | scale_factor = img_info[0]['scale_factor'] 42 | flip = img_info[0]['flip'] 43 | bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) 44 | recovered_bboxes.append(bboxes) 45 | bboxes = torch.cat(recovered_bboxes, dim=0) 46 | if aug_scores is None: 47 | return bboxes 48 | else: 49 | scores = torch.cat(aug_scores, dim=0) 50 | return bboxes, scores 51 | 52 | def aug_test(self, imgs, img_metas, rescale=False): 53 | # recompute feats to save memory 54 | feats = self.extract_feats(imgs) 55 | 56 | aug_bboxes = [] 57 | aug_scores = [] 58 | for x, img_meta in zip(feats, img_metas): 59 | # only one image in the batch 60 | outs = self.bbox_head(x) 61 | bbox_inputs = outs + (img_meta, self.test_cfg, False, False) 62 | det_bboxes, det_scores = self.bbox_head.get_bboxes(*bbox_inputs)[0] 63 | aug_bboxes.append(det_bboxes) 64 | aug_scores.append(det_scores) 65 | 66 | # after merging, bboxes will be rescaled to the original image size 67 | merged_bboxes, merged_scores = self.merge_aug_results( 68 | aug_bboxes, aug_scores, img_metas) 69 | det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, 70 | self.test_cfg.score_thr, 71 | self.test_cfg.nms, 72 | self.test_cfg.max_per_img) 73 | 74 | if rescale: 75 | _det_bboxes = det_bboxes 76 | else: 77 | _det_bboxes = det_bboxes.clone() 78 | _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] 79 | bbox_results = bbox2result(_det_bboxes, det_labels, 80 | self.bbox_head.num_classes) 81 | return bbox_results 82 | -------------------------------------------------------------------------------- /mmdet/core/bbox/geometry.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): 5 | """Calculate overlap between two set of bboxes. 6 | 7 | If ``is_aligned`` is ``False``, then calculate the ious between each bbox 8 | of bboxes1 and bboxes2, otherwise the ious between each aligned pair of 9 | bboxes1 and bboxes2. 10 | 11 | Args: 12 | bboxes1 (Tensor): shape (m, 4) in format. 13 | bboxes2 (Tensor): shape (n, 4) in format. 14 | If is_aligned is ``True``, then m and n must be equal. 15 | mode (str): "iou" (intersection over union) or iof (intersection over 16 | foreground). 17 | 18 | Returns: 19 | ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) 20 | 21 | Example: 22 | >>> bboxes1 = torch.FloatTensor([ 23 | >>> [0, 0, 10, 10], 24 | >>> [10, 10, 20, 20], 25 | >>> [32, 32, 38, 42], 26 | >>> ]) 27 | >>> bboxes2 = torch.FloatTensor([ 28 | >>> [0, 0, 10, 20], 29 | >>> [0, 10, 10, 19], 30 | >>> [10, 10, 20, 20], 31 | >>> ]) 32 | >>> bbox_overlaps(bboxes1, bboxes2) 33 | tensor([[0.5238, 0.0500, 0.0041], 34 | [0.0323, 0.0452, 1.0000], 35 | [0.0000, 0.0000, 0.0000]]) 36 | 37 | Example: 38 | >>> empty = torch.FloatTensor([]) 39 | >>> nonempty = torch.FloatTensor([ 40 | >>> [0, 0, 10, 9], 41 | >>> ]) 42 | >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) 43 | >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) 44 | >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) 45 | """ 46 | 47 | assert mode in ['iou', 'iof'] 48 | 49 | rows = bboxes1.size(0) 50 | cols = bboxes2.size(0) 51 | if is_aligned: 52 | assert rows == cols 53 | 54 | if rows * cols == 0: 55 | return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) 56 | 57 | if is_aligned: 58 | lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] 59 | rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] 60 | 61 | wh = (rb - lt + 1).clamp(min=0) # [rows, 2] 62 | overlap = wh[:, 0] * wh[:, 1] 63 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 64 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 65 | 66 | if mode == 'iou': 67 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 68 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 69 | ious = overlap / (area1 + area2 - overlap) 70 | else: 71 | ious = overlap / area1 72 | else: 73 | lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] 74 | rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] 75 | 76 | wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] 77 | overlap = wh[:, :, 0] * wh[:, :, 1] 78 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 79 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 80 | 81 | if mode == 'iou': 82 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 83 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 84 | ious = overlap / (area1[:, None] + area2 - overlap) 85 | else: 86 | ious = overlap / (area1[:, None]) 87 | 88 | return ious 89 | -------------------------------------------------------------------------------- /configs/htc/README.md: -------------------------------------------------------------------------------- 1 | # Hybrid Task Cascade for Instance Segmentation 2 | 3 | ## Introduction 4 | 5 | We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). 6 | 7 | ``` 8 | @inproceedings{chen2019hybrid, 9 | title={Hybrid task cascade for instance segmentation}, 10 | author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, 11 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, 12 | year={2019} 13 | } 14 | ``` 15 | 16 | ## Dataset 17 | 18 | HTC requires COCO and COCO-stuff dataset for training. You need to download and extract it in the COCO dataset path. 19 | The directory should be like this. 20 | 21 | ``` 22 | mmdetection 23 | ├── mmdet 24 | ├── tools 25 | ├── configs 26 | ├── data 27 | │ ├── coco 28 | │ │ ├── annotations 29 | │ │ ├── train2017 30 | │ │ ├── val2017 31 | │ │ ├── test2017 32 | | | ├── stuffthingmaps 33 | ``` 34 | 35 | ## Results and Models 36 | 37 | The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) 38 | 39 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | 40 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| 41 | | R-50-FPN | pytorch | 1x | 7.4 | 0.936 | 4.1 | 42.1 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_1x_20190408-878c1712.pth) | 42 | | R-50-FPN | pytorch | 20e | - | - | - | 43.2 | 38.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_20e_20190408-c03b7015.pth) | 43 | | R-101-FPN | pytorch | 20e | 9.3 | 1.051 | 4.0 | 44.9 | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r101_fpn_20e_20190408-a2e586db.pth) | 44 | | X-101-32x4d-FPN | pytorch |20e| 5.8 | 0.769 | 3.8 | 46.1 | 40.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_32x4d_fpn_20e_20190408-9eae4d0b.pth) | 45 | | X-101-64x4d-FPN | pytorch |20e| 7.5 | 1.120 | 3.5 | 46.9 | 40.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth) | 46 | 47 | - In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. 48 | - We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. 49 | If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. 50 | 51 | We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. 52 | 53 | | Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Download | 54 | |:----------------:|:-------:|:-----:|:---------------:|:-------:|:------:|:-------:|:--------:| 55 | | X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.7 | 43.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth) | 56 | -------------------------------------------------------------------------------- /mmdet/datasets/pipelines/instaboost.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..registry import PIPELINES 4 | 5 | 6 | @PIPELINES.register_module 7 | class InstaBoost(object): 8 | """ 9 | Data augmentation method in paper "InstaBoost: Boosting Instance 10 | Segmentation Via Probability Map Guided Copy-Pasting" 11 | Implementation details can refer to https://github.com/GothicAi/Instaboost. 12 | """ 13 | 14 | def __init__(self, 15 | action_candidate=('normal', 'horizontal', 'skip'), 16 | action_prob=(1, 0, 0), 17 | scale=(0.8, 1.2), 18 | dx=15, 19 | dy=15, 20 | theta=(-1, 1), 21 | color_prob=0.5, 22 | hflag=False, 23 | aug_ratio=0.5): 24 | try: 25 | import instaboostfast as instaboost 26 | except ImportError: 27 | raise ImportError( 28 | 'Please run "pip install instaboostfast" ' 29 | 'to install instaboostfast first for instaboost augmentation.') 30 | self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, 31 | scale, dx, dy, theta, 32 | color_prob, hflag) 33 | self.aug_ratio = aug_ratio 34 | 35 | def _load_anns(self, results): 36 | labels = results['ann_info']['labels'] 37 | masks = results['ann_info']['masks'] 38 | bboxes = results['ann_info']['bboxes'] 39 | n = len(labels) 40 | 41 | anns = [] 42 | for i in range(n): 43 | label = labels[i] 44 | bbox = bboxes[i] 45 | mask = masks[i] 46 | x1, y1, x2, y2 = bbox 47 | bbox = [x1, y1, x2 - x1 + 1, y2 - y1 + 1] 48 | anns.append({ 49 | 'category_id': label, 50 | 'segmentation': mask, 51 | 'bbox': bbox 52 | }) 53 | 54 | return anns 55 | 56 | def _parse_anns(self, results, anns, img): 57 | gt_bboxes = [] 58 | gt_labels = [] 59 | gt_masks_ann = [] 60 | for ann in anns: 61 | x1, y1, w, h = ann['bbox'] 62 | bbox = [x1, y1, x1 + w - 1, y1 + h - 1] 63 | gt_bboxes.append(bbox) 64 | gt_labels.append(ann['category_id']) 65 | gt_masks_ann.append(ann['segmentation']) 66 | gt_bboxes = np.array(gt_bboxes, dtype=np.float32) 67 | gt_labels = np.array(gt_labels, dtype=np.int64) 68 | results['ann_info']['labels'] = gt_labels 69 | results['ann_info']['bboxes'] = gt_bboxes 70 | results['ann_info']['masks'] = gt_masks_ann 71 | results['img'] = img 72 | return results 73 | 74 | def __call__(self, results): 75 | img = results['img'] 76 | anns = self._load_anns(results) 77 | if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): 78 | try: 79 | import instaboostfast as instaboost 80 | except ImportError: 81 | raise ImportError('Please run "pip install instaboostfast" ' 82 | 'to install instaboostfast first.') 83 | anns, img = instaboost.get_new_data( 84 | anns, img, self.cfg, background=None) 85 | results = self._parse_anns(results, anns, img) 86 | return results 87 | 88 | def __repr__(self): 89 | repr_str = self.__class__.__name__ 90 | repr_str += ('(cfg={}, aug_ratio={})').format(self.cfg, self.aug_ratio) 91 | return repr_str 92 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at chenkaidev@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /tests/async_benchmark.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import asyncio 4 | import os 5 | import shutil 6 | import urllib 7 | 8 | import mmcv 9 | import torch 10 | 11 | from mmdet.apis import (async_inference_detector, inference_detector, 12 | init_detector, show_result) 13 | from mmdet.utils.contextmanagers import concurrent 14 | from mmdet.utils.profiling import profile_time 15 | 16 | 17 | async def main(): 18 | """ 19 | 20 | Benchmark between async and synchronous inference interfaces. 21 | 22 | Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x: 23 | 24 | async sync 25 | 26 | 7981.79 ms 9660.82 ms 27 | 8074.52 ms 9660.94 ms 28 | 7976.44 ms 9406.83 ms 29 | 30 | Async variant takes about 0.83-0.85 of the time of the synchronous 31 | interface. 32 | 33 | """ 34 | project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) 35 | 36 | config_file = os.path.join(project_dir, 'configs/mask_rcnn_r50_fpn_1x.py') 37 | checkpoint_file = os.path.join( 38 | project_dir, 'checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth') 39 | 40 | if not os.path.exists(checkpoint_file): 41 | url = ('https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection' 42 | '/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth') 43 | print('Downloading {} ...'.format(url)) 44 | local_filename, _ = urllib.request.urlretrieve(url) 45 | os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True) 46 | shutil.move(local_filename, checkpoint_file) 47 | print('Saved as {}'.format(checkpoint_file)) 48 | else: 49 | print('Using existing checkpoint {}'.format(checkpoint_file)) 50 | 51 | device = 'cuda:0' 52 | model = init_detector( 53 | config_file, checkpoint=checkpoint_file, device=device) 54 | 55 | # queue is used for concurrent inference of multiple images 56 | streamqueue = asyncio.Queue() 57 | # queue size defines concurrency level 58 | streamqueue_size = 4 59 | 60 | for _ in range(streamqueue_size): 61 | streamqueue.put_nowait(torch.cuda.Stream(device=device)) 62 | 63 | # test a single image and show the results 64 | img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg')) 65 | 66 | # warmup 67 | await async_inference_detector(model, img) 68 | 69 | async def detect(img): 70 | async with concurrent(streamqueue): 71 | return await async_inference_detector(model, img) 72 | 73 | num_of_images = 20 74 | with profile_time('benchmark', 'async'): 75 | tasks = [ 76 | asyncio.create_task(detect(img)) for _ in range(num_of_images) 77 | ] 78 | async_results = await asyncio.gather(*tasks) 79 | 80 | with torch.cuda.stream(torch.cuda.default_stream()): 81 | with profile_time('benchmark', 'sync'): 82 | sync_results = [ 83 | inference_detector(model, img) for _ in range(num_of_images) 84 | ] 85 | 86 | result_dir = os.path.join(project_dir, 'demo') 87 | show_result( 88 | img, 89 | async_results[0], 90 | model.CLASSES, 91 | score_thr=0.5, 92 | show=False, 93 | out_file=os.path.join(result_dir, 'result_async.jpg')) 94 | show_result( 95 | img, 96 | sync_results[0], 97 | model.CLASSES, 98 | score_thr=0.5, 99 | show=False, 100 | out_file=os.path.join(result_dir, 'result_sync.jpg')) 101 | 102 | 103 | if __name__ == '__main__': 104 | asyncio.run(main()) 105 | --------------------------------------------------------------------------------