├── .gitignore ├── LICENSE ├── README.md ├── __pycache__ └── util.cpython-37.pyc ├── clustering ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── clustering_utils.cpython-37.pyc │ ├── feature_extraction_track_results.cpython-37.pyc │ ├── heapq.cpython-37.pyc │ ├── multi_cam_clustering.cpython-37.pyc │ ├── track_visualization.cpython-37.pyc │ └── velocity_calculation.cpython-37.pyc ├── clustering_utils.py ├── feature_extraction_track_results.py ├── heapq.py ├── multi_cam_clustering.py ├── track_visualization.py └── velocity_calculation.py ├── configs ├── __init__.py ├── __pycache__ │ └── __init__.cpython-37.pyc ├── clustering_configs │ ├── __init__.py │ └── mta_es_abd_non_clean.py ├── evaluation_configs │ └── evaluate_cascade_abdnet_mta_none_test_iosb.py └── tracker_configs │ ├── __init__.py │ ├── frcnn50_new_abd_test.py │ ├── frcnn50_new_abd_train.py │ └── frcnn50_new_reid_strong.py ├── datasets ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── base_dataset.cpython-37.pyc │ └── mta_dataset_cam_iterator.cpython-37.pyc ├── base_dataset.py └── mta_dataset_cam_iterator.py ├── detectors ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── base_detector.cpython-37.pyc │ └── mmdetection_detector.cpython-37.pyc ├── base_detector.py ├── mmdetection │ ├── .github │ │ ├── CODE_OF_CONDUCT.md │ │ ├── CONTRIBUTING.md │ │ └── ISSUE_TEMPLATE │ │ │ ├── config.yml │ │ │ ├── error-report.md │ │ │ ├── feature_request.md │ │ │ └── general_questions.md │ ├── .gitignore │ ├── .isort.cfg │ ├── .pre-commit-config.yaml │ ├── .style.yapf │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── configs │ │ ├── albu_example │ │ │ └── mask_rcnn_r50_fpn_1x.py │ │ ├── atss │ │ │ ├── README.md │ │ │ └── atss_r50_fpn_1x.py │ │ ├── carafe │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_carafe_1x.py │ │ │ └── mask_rcnn_r50_fpn_carafe_1x.py │ │ ├── cascade_mask_rcnn_r101_fpn_1x.py │ │ ├── cascade_mask_rcnn_r50_caffe_c4_1x.py │ │ ├── cascade_mask_rcnn_r50_fpn_1x.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x.py │ │ ├── cascade_rcnn_r101_fpn_1x.py │ │ ├── cascade_rcnn_r50_caffe_c4_1x.py │ │ ├── cascade_rcnn_r50_fpn_1x.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x.py │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x.py │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_gta.py │ │ ├── cityscapes │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py │ │ ├── dcn │ │ │ ├── README.md │ │ │ ├── cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py │ │ │ ├── faster_rcnn_dpool_r50_fpn_1x.py │ │ │ ├── faster_rcnn_mdconv_c3-c5_group4_r50_fpn_1x.py │ │ │ ├── faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_mdpool_r50_fpn_1x.py │ │ │ ├── mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ └── mask_rcnn_mdconv_c3-c5_r50_fpn_1x.py │ │ ├── double_heads │ │ │ └── dh_faster_rcnn_r50_fpn_1x.py │ │ ├── empirical_attention │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x.py │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x.py │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x.py │ │ │ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x.py │ │ ├── fast_mask_rcnn_r101_fpn_1x.py │ │ ├── fast_mask_rcnn_r50_caffe_c4_1x.py │ │ ├── fast_mask_rcnn_r50_fpn_1x.py │ │ ├── fast_rcnn_r101_fpn_1x.py │ │ ├── fast_rcnn_r50_caffe_c4_1x.py │ │ ├── fast_rcnn_r50_fpn_1x.py │ │ ├── faster_rcnn_ohem_r50_fpn_1x.py │ │ ├── faster_rcnn_r101_fpn_1x.py │ │ ├── faster_rcnn_r50_caffe_c4_1x.py │ │ ├── faster_rcnn_r50_fpn_1x.py │ │ ├── faster_rcnn_r50_fpn_1x_gta.py │ │ ├── faster_rcnn_x101_32x4d_fpn_1x.py │ │ ├── faster_rcnn_x101_64x4d_fpn_1x.py │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_gta.py │ │ ├── fcos │ │ │ ├── README.md │ │ │ ├── fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py │ │ │ ├── fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py │ │ │ └── fcos_r50_caffe_fpn_gn_1x_4gpu.py │ │ ├── foveabox │ │ │ ├── README.md │ │ │ ├── fovea_align_gn_ms_r101_fpn_4gpu_2x.py │ │ │ ├── fovea_align_gn_ms_r50_fpn_4gpu_2x.py │ │ │ ├── fovea_align_gn_r101_fpn_4gpu_2x.py │ │ │ ├── fovea_align_gn_r50_fpn_4gpu_2x.py │ │ │ └── fovea_r50_fpn_4gpu_1x.py │ │ ├── fp16 │ │ │ ├── faster_rcnn_r50_fpn_fp16_1x.py │ │ │ ├── mask_rcnn_r50_fpn_fp16_1x.py │ │ │ └── retinanet_r50_fpn_fp16_1x.py │ │ ├── free_anchor │ │ │ ├── README.md │ │ │ ├── retinanet_free_anchor_r101_fpn_1x.py │ │ │ ├── retinanet_free_anchor_r50_fpn_1x.py │ │ │ └── retinanet_free_anchor_x101-32x4d_fpn_1x.py │ │ ├── gcnet │ │ │ ├── README.md │ │ │ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py │ │ │ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py │ │ │ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py │ │ │ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py │ │ │ └── mask_rcnn_r50_fpn_sbn_1x.py │ │ ├── ghm │ │ │ ├── README.md │ │ │ └── retinanet_ghm_r50_fpn_1x.py │ │ ├── gn+ws │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_gn_ws_1x.py │ │ │ ├── mask_rcnn_r50_fpn_gn_ws_20_23_24e.py │ │ │ ├── mask_rcnn_r50_fpn_gn_ws_2x.py │ │ │ └── mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py │ │ ├── gn │ │ │ ├── README.md │ │ │ ├── mask_rcnn_r101_fpn_gn_2x.py │ │ │ ├── mask_rcnn_r50_fpn_gn_2x.py │ │ │ └── mask_rcnn_r50_fpn_gn_contrib_2x.py │ │ ├── grid_rcnn │ │ │ ├── README.md │ │ │ ├── grid_rcnn_gn_head_r50_fpn_2x.py │ │ │ └── grid_rcnn_gn_head_x101_32x4d_fpn_2x.py │ │ ├── guided_anchoring │ │ │ ├── README.md │ │ │ ├── ga_fast_r50_caffe_fpn_1x.py │ │ │ ├── ga_faster_r50_caffe_fpn_1x.py │ │ │ ├── ga_faster_x101_32x4d_fpn_1x.py │ │ │ ├── ga_retinanet_r50_caffe_fpn_1x.py │ │ │ ├── ga_retinanet_x101_32x4d_fpn_1x.py │ │ │ ├── ga_rpn_r101_caffe_rpn_1x.py │ │ │ ├── ga_rpn_r50_caffe_fpn_1x.py │ │ │ └── ga_rpn_x101_32x4d_fpn_1x.py │ │ ├── hrnet │ │ │ ├── README.md │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e.py │ │ │ ├── cascade_rcnn_hrnetv2p_w32_20e.py │ │ │ ├── faster_rcnn_hrnetv2p_w18_1x.py │ │ │ ├── faster_rcnn_hrnetv2p_w32_1x.py │ │ │ ├── faster_rcnn_hrnetv2p_w40_1x.py │ │ │ ├── fcos_hrnetv2p_w32_gn_1x_4gpu.py │ │ │ ├── htc_hrnetv2p_w32_20e.py │ │ │ ├── mask_rcnn_hrnetv2p_w18_1x.py │ │ │ └── mask_rcnn_hrnetv2p_w32_1x.py │ │ ├── htc │ │ │ ├── README.md │ │ │ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py │ │ │ ├── htc_r101_fpn_20e.py │ │ │ ├── htc_r50_fpn_1x.py │ │ │ ├── htc_r50_fpn_20e.py │ │ │ ├── htc_without_semantic_r50_fpn_1x.py │ │ │ ├── htc_x101_32x4d_fpn_20e_16gpu.py │ │ │ └── htc_x101_64x4d_fpn_20e_16gpu.py │ │ ├── instaboost │ │ │ ├── README.md │ │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x.py │ │ │ ├── mask_rcnn_r50_fpn_instaboost_4x.py │ │ │ └── ssd300_coco_instaboost_4x.py │ │ ├── libra_rcnn │ │ │ ├── README.md │ │ │ ├── libra_fast_rcnn_r50_fpn_1x.py │ │ │ ├── libra_faster_rcnn_r101_fpn_1x.py │ │ │ ├── libra_faster_rcnn_r50_fpn_1x.py │ │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x.py │ │ │ └── libra_retinanet_r50_fpn_1x.py │ │ ├── mask_rcnn_r101_fpn_1x.py │ │ ├── mask_rcnn_r50_caffe_c4_1x.py │ │ ├── mask_rcnn_r50_fpn_1x.py │ │ ├── mask_rcnn_x101_32x4d_fpn_1x.py │ │ ├── mask_rcnn_x101_64x4d_fpn_1x.py │ │ ├── ms_rcnn │ │ │ ├── README.md │ │ │ ├── ms_rcnn_r101_caffe_fpn_1x.py │ │ │ ├── ms_rcnn_r50_caffe_fpn_1x.py │ │ │ └── ms_rcnn_x101_64x4d_fpn_1x.py │ │ ├── nas_fpn │ │ │ ├── README.md │ │ │ ├── retinanet_crop640_r50_fpn_50e.py │ │ │ └── retinanet_crop640_r50_nasfpn_50e.py │ │ ├── pascal_voc │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ │ ├── ssd300_voc.py │ │ │ └── ssd512_voc.py │ │ ├── reppoints │ │ │ ├── README.md │ │ │ ├── bbox_r50_grid_center_fpn_1x.py │ │ │ ├── bbox_r50_grid_fpn_1x.py │ │ │ ├── reppoints.png │ │ │ ├── reppoints_minmax_r50_fpn_1x.py │ │ │ ├── reppoints_moment_r101_dcn_fpn_2x.py │ │ │ ├── reppoints_moment_r101_dcn_fpn_2x_mt.py │ │ │ ├── reppoints_moment_r101_fpn_2x.py │ │ │ ├── reppoints_moment_r101_fpn_2x_mt.py │ │ │ ├── reppoints_moment_r50_fpn_1x.py │ │ │ ├── reppoints_moment_r50_fpn_2x.py │ │ │ ├── reppoints_moment_r50_fpn_2x_mt.py │ │ │ ├── reppoints_moment_r50_no_gn_fpn_1x.py │ │ │ ├── reppoints_moment_x101_dcn_fpn_2x.py │ │ │ ├── reppoints_moment_x101_dcn_fpn_2x_mt.py │ │ │ └── reppoints_partial_minmax_r50_fpn_1x.py │ │ ├── retinanet_r101_fpn_1x.py │ │ ├── retinanet_r50_fpn_1x.py │ │ ├── retinanet_x101_32x4d_fpn_1x.py │ │ ├── retinanet_x101_64x4d_fpn_1x.py │ │ ├── retinanet_x101_64x4d_fpn_1x_gta.py │ │ ├── rpn_r101_fpn_1x.py │ │ ├── rpn_r50_caffe_c4_1x.py │ │ ├── rpn_r50_fpn_1x.py │ │ ├── rpn_x101_32x4d_fpn_1x.py │ │ ├── rpn_x101_64x4d_fpn_1x.py │ │ ├── scratch │ │ │ ├── README.md │ │ │ ├── scratch_faster_rcnn_r50_fpn_gn_6x.py │ │ │ └── scratch_mask_rcnn_r50_fpn_gn_6x.py │ │ ├── ssd300_coco.py │ │ ├── ssd512_coco.py │ │ ├── ssd512_coco_gta.py │ │ └── wider_face │ │ │ ├── README.md │ │ │ └── ssd300_wider_face.py │ ├── demo │ │ ├── coco_test_12510.jpg │ │ ├── corruptions_sev_3.png │ │ ├── data_pipeline.png │ │ ├── demo.jpg │ │ ├── inference_demo.ipynb │ │ ├── loss_curve.png │ │ └── webcam_demo.py │ ├── docker │ │ └── Dockerfile │ ├── docs │ │ ├── CHANGELOG.md │ │ ├── GETTING_STARTED.md │ │ ├── INSTALL.md │ │ ├── MODEL_ZOO.md │ │ ├── Makefile │ │ ├── ROBUSTNESS_BENCHMARKING.md │ │ ├── TECHNICAL_DETAILS.md │ │ ├── conf.py │ │ ├── index.rst │ │ ├── make.bat │ │ └── requirements.txt │ ├── mmdet │ │ ├── __init__.py │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── inference.py │ │ │ └── train.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── anchor │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_generator.py │ │ │ │ ├── anchor_target.py │ │ │ │ ├── guided_anchor_target.py │ │ │ │ ├── point_generator.py │ │ │ │ └── point_target.py │ │ │ ├── bbox │ │ │ │ ├── __init__.py │ │ │ │ ├── assign_sampling.py │ │ │ │ ├── assigners │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ │ ├── assign_result.py │ │ │ │ │ ├── atss_assigner.py │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ ├── max_iou_assigner.py │ │ │ │ │ └── point_assigner.py │ │ │ │ ├── bbox_target.py │ │ │ │ ├── demodata.py │ │ │ │ ├── geometry.py │ │ │ │ ├── samplers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ └── sampling_result.py │ │ │ │ └── transforms.py │ │ │ ├── evaluation │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_overlaps.py │ │ │ │ ├── class_names.py │ │ │ │ ├── eval_hooks.py │ │ │ │ ├── mean_ap.py │ │ │ │ └── recall.py │ │ │ ├── fp16 │ │ │ │ ├── __init__.py │ │ │ │ ├── decorators.py │ │ │ │ ├── hooks.py │ │ │ │ └── utils.py │ │ │ ├── mask │ │ │ │ ├── __init__.py │ │ │ │ ├── mask_target.py │ │ │ │ └── utils.py │ │ │ ├── optimizer │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ ├── copy_of_sgd.py │ │ │ │ └── registry.py │ │ │ ├── post_processing │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_nms.py │ │ │ │ └── merge_augs.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── dist_utils.py │ │ │ │ └── misc.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ ├── cityscapes.py │ │ │ ├── coco.py │ │ │ ├── custom.py │ │ │ ├── dataset_wrappers.py │ │ │ ├── gta_dataset.py │ │ │ ├── loader │ │ │ │ ├── __init__.py │ │ │ │ ├── build_loader.py │ │ │ │ └── sampler.py │ │ │ ├── pipelines │ │ │ │ ├── __init__.py │ │ │ │ ├── compose.py │ │ │ │ ├── formating.py │ │ │ │ ├── instaboost.py │ │ │ │ ├── loading.py │ │ │ │ ├── test_aug.py │ │ │ │ └── transforms.py │ │ │ ├── registry.py │ │ │ ├── voc.py │ │ │ ├── wider_face.py │ │ │ └── xml_style.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── anchor_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_head.py │ │ │ │ ├── atss_head.py │ │ │ │ ├── fcos_head.py │ │ │ │ ├── fovea_head.py │ │ │ │ ├── free_anchor_retina_head.py │ │ │ │ ├── ga_retina_head.py │ │ │ │ ├── ga_rpn_head.py │ │ │ │ ├── guided_anchor_head.py │ │ │ │ ├── reppoints_head.py │ │ │ │ ├── retina_head.py │ │ │ │ ├── retina_sepbn_head.py │ │ │ │ ├── rpn_head.py │ │ │ │ └── ssd_head.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── hrnet.py │ │ │ │ ├── resnet.py │ │ │ │ ├── resnext.py │ │ │ │ └── ssd_vgg.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ └── double_bbox_head.py │ │ │ ├── builder.py │ │ │ ├── detectors │ │ │ │ ├── __init__.py │ │ │ │ ├── atss.py │ │ │ │ ├── base.py │ │ │ │ ├── cascade_rcnn.py │ │ │ │ ├── double_head_rcnn.py │ │ │ │ ├── fast_rcnn.py │ │ │ │ ├── faster_rcnn.py │ │ │ │ ├── fcos.py │ │ │ │ ├── fovea.py │ │ │ │ ├── grid_rcnn.py │ │ │ │ ├── htc.py │ │ │ │ ├── mask_rcnn.py │ │ │ │ ├── mask_scoring_rcnn.py │ │ │ │ ├── reppoints_detector.py │ │ │ │ ├── retinanet.py │ │ │ │ ├── rpn.py │ │ │ │ ├── single_stage.py │ │ │ │ ├── test_mixins.py │ │ │ │ └── two_stage.py │ │ │ ├── losses │ │ │ │ ├── __init__.py │ │ │ │ ├── accuracy.py │ │ │ │ ├── balanced_l1_loss.py │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ ├── focal_loss.py │ │ │ │ ├── ghm_loss.py │ │ │ │ ├── iou_loss.py │ │ │ │ ├── mse_loss.py │ │ │ │ ├── smooth_l1_loss.py │ │ │ │ └── utils.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── fcn_mask_head.py │ │ │ │ ├── fused_semantic_head.py │ │ │ │ ├── grid_head.py │ │ │ │ ├── htc_mask_head.py │ │ │ │ └── maskiou_head.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ ├── bfp.py │ │ │ │ ├── fpn.py │ │ │ │ ├── fpn_carafe.py │ │ │ │ ├── hrfpn.py │ │ │ │ └── nas_fpn.py │ │ │ ├── plugins │ │ │ │ ├── __init__.py │ │ │ │ ├── generalized_attention.py │ │ │ │ └── non_local.py │ │ │ ├── registry.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ └── single_level.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── res_layer.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_module.py │ │ │ │ ├── conv_ws.py │ │ │ │ ├── norm.py │ │ │ │ ├── scale.py │ │ │ │ ├── upsample.py │ │ │ │ └── weight_init.py │ │ ├── ops │ │ │ ├── __init__.py │ │ │ ├── carafe │ │ │ │ ├── __init__.py │ │ │ │ ├── carafe.py │ │ │ │ ├── grad_check.py │ │ │ │ ├── setup.py │ │ │ │ └── src │ │ │ │ │ ├── carafe_cuda.cpp │ │ │ │ │ ├── carafe_cuda_kernel.cu │ │ │ │ │ ├── carafe_naive_cuda.cpp │ │ │ │ │ └── carafe_naive_cuda_kernel.cu │ │ │ ├── context_block.py │ │ │ ├── dcn │ │ │ │ ├── __init__.py │ │ │ │ ├── deform_conv.py │ │ │ │ ├── deform_pool.py │ │ │ │ └── src │ │ │ │ │ ├── deform_conv_cuda.cpp │ │ │ │ │ ├── deform_conv_cuda_kernel.cu │ │ │ │ │ ├── deform_pool_cuda.cpp │ │ │ │ │ └── deform_pool_cuda_kernel.cu │ │ │ ├── masked_conv │ │ │ │ ├── __init__.py │ │ │ │ ├── masked_conv.py │ │ │ │ └── src │ │ │ │ │ ├── masked_conv2d_cuda.cpp │ │ │ │ │ └── masked_conv2d_kernel.cu │ │ │ ├── nms │ │ │ │ ├── __init__.py │ │ │ │ ├── nms_wrapper.py │ │ │ │ └── src │ │ │ │ │ ├── nms_cpu.cpp │ │ │ │ │ ├── nms_cuda.cpp │ │ │ │ │ ├── nms_kernel.cu │ │ │ │ │ └── soft_nms_cpu.cpp │ │ │ ├── roi_align │ │ │ │ ├── __init__.py │ │ │ │ ├── gradcheck.py │ │ │ │ ├── roi_align.py │ │ │ │ └── src │ │ │ │ │ ├── roi_align_cuda.cpp │ │ │ │ │ └── roi_align_kernel.cu │ │ │ ├── roi_pool │ │ │ │ ├── __init__.py │ │ │ │ ├── gradcheck.py │ │ │ │ ├── roi_pool.py │ │ │ │ └── src │ │ │ │ │ ├── roi_pool_cuda.cpp │ │ │ │ │ └── roi_pool_kernel.cu │ │ │ ├── sigmoid_focal_loss │ │ │ │ ├── __init__.py │ │ │ │ ├── sigmoid_focal_loss.py │ │ │ │ └── src │ │ │ │ │ ├── sigmoid_focal_loss.cpp │ │ │ │ │ └── sigmoid_focal_loss_cuda.cu │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ └── src │ │ │ │ └── compiling_info.cpp │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── collect_env.py │ │ │ ├── contextmanagers.py │ │ │ ├── flops_counter.py │ │ │ ├── logger.py │ │ │ ├── profiling.py │ │ │ ├── registry.py │ │ │ └── util_mixins.py │ ├── pytest.ini │ ├── requirements.txt │ ├── requirements │ │ ├── build.txt │ │ ├── optional.txt │ │ ├── runtime.txt │ │ └── tests.txt │ ├── setup.py │ ├── tests │ │ ├── async_benchmark.py │ │ ├── test_assigner.py │ │ ├── test_async.py │ │ ├── test_config.py │ │ ├── test_forward.py │ │ ├── test_heads.py │ │ ├── test_nms.py │ │ ├── test_sampler.py │ │ ├── test_soft_nms.py │ │ └── test_utils.py │ └── tools │ │ ├── analyze_logs.py │ │ ├── coco_error_analysis.py │ │ ├── convert_datasets │ │ ├── convert_gta_to_coco.py │ │ ├── convert_gta_to_custom.py │ │ └── pascal_voc.py │ │ ├── detectron2pytorch.py │ │ ├── dist_test.sh │ │ ├── dist_train.sh │ │ ├── get_flops.py │ │ ├── publish_model.py │ │ ├── reshape_faster_rcnn.py │ │ ├── robustness_eval.py │ │ ├── slurm_test.sh │ │ ├── slurm_train.sh │ │ ├── test.py │ │ ├── test_robustness.py │ │ ├── train.py │ │ └── upgrade_model_version.py └── mmdetection_detector.py ├── evaluation ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── motmetrics_evaluation.cpython-37.pyc │ └── multicam_evaluation.cpython-37.pyc ├── motmetrics_evaluation.py ├── multicam_evaluation.py ├── multicam_trackwise_evaluation.py ├── py_motmetrics │ ├── .gitignore │ ├── .travis.yml │ ├── Dockerfile │ ├── LICENSE │ ├── MANIFEST.in │ ├── Readme.md │ ├── Release.md │ ├── __init__.py │ ├── appveyor.yml │ ├── environment.yml │ ├── motmetrics │ │ ├── __init__.py │ │ ├── apps │ │ │ ├── __init__.py │ │ │ ├── eval_motchallenge.py │ │ │ ├── example.py │ │ │ └── list_metrics.py │ │ ├── data │ │ │ ├── TUD-Campus │ │ │ │ ├── gt.txt │ │ │ │ └── test.txt │ │ │ ├── TUD-Stadtmitte │ │ │ │ ├── gt.txt │ │ │ │ └── test.txt │ │ │ └── iotest │ │ │ │ ├── motchallenge.txt │ │ │ │ └── vatic.txt │ │ ├── distances.py │ │ ├── etc │ │ │ └── mot.png │ │ ├── io.py │ │ ├── lap.py │ │ ├── metrics.py │ │ ├── mot.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ ├── test_distances.py │ │ │ ├── test_io.py │ │ │ ├── test_lap.py │ │ │ ├── test_metrics.py │ │ │ └── test_mot.py │ │ └── utils.py │ ├── requirements.txt │ └── setup.py └── track_metrics_evaluation.py ├── feature_extractors ├── .gitignore ├── ABD_Net │ ├── .gitignore │ ├── DATASETS.md │ ├── LICENSE │ ├── README.md │ ├── README_ORIG.md │ ├── README_Training_and_Testing_Guides.md │ ├── args.py │ ├── doc_images │ │ ├── Arch.png │ │ ├── JET_VIS.png │ │ ├── att.png │ │ └── qr.png │ ├── eval_acc.py │ ├── requirements.txt │ ├── torchreid │ │ ├── __init__.py │ │ ├── components │ │ │ ├── __init__.py │ │ │ ├── attention.py │ │ │ ├── branches.py │ │ │ ├── dropout.py │ │ │ └── shallow_cam.py │ │ ├── data_manager.py │ │ ├── dataset_loader.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── bases.py │ │ │ ├── cuhk01.py │ │ │ ├── cuhk03.py │ │ │ ├── dukemtmcreid.py │ │ │ ├── dukemtmcreid_d.py │ │ │ ├── dukemtmcvidreid.py │ │ │ ├── grid.py │ │ │ ├── ilids.py │ │ │ ├── ilidsvid.py │ │ │ ├── market1501.py │ │ │ ├── market1501_d.py │ │ │ ├── mars.py │ │ │ ├── msmt17.py │ │ │ ├── prid2011.py │ │ │ ├── prid450s.py │ │ │ ├── sensereid.py │ │ │ ├── valset.py │ │ │ ├── veri.py │ │ │ └── viper.py │ │ ├── eval_cylib │ │ │ ├── Makefile │ │ │ ├── __init__.py │ │ │ ├── eval_metrics_cy.pyx │ │ │ ├── setup.py │ │ │ └── test_cython.py │ │ ├── eval_metrics.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── batch_spectral_loss.py │ │ │ ├── center_loss.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── hard_mine_triplet_loss.py │ │ │ ├── incidence_loss.py │ │ │ ├── incidence_xent_loss.py │ │ │ ├── lowrank_loss.py │ │ │ ├── of_penalty.py │ │ │ ├── ring_loss.py │ │ │ ├── sa_loss.py │ │ │ ├── singular_triplet_loss.py │ │ │ └── spectral_loss.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── densenet.py │ │ │ ├── hacnn.py │ │ │ ├── inceptionresnetv2.py │ │ │ ├── inceptionv4.py │ │ │ ├── mlfn.py │ │ │ ├── mobilenetv2.py │ │ │ ├── mudeep.py │ │ │ ├── nasnet.py │ │ │ ├── pcb.py │ │ │ ├── resnet.py │ │ │ ├── resnetmid.py │ │ │ ├── resnext.py │ │ │ ├── senet.py │ │ │ ├── shufflenet.py │ │ │ ├── squeezenet.py │ │ │ └── xception.py │ │ ├── optimizers.py │ │ ├── regularizers │ │ │ ├── LSVO.py │ │ │ ├── NR.py │ │ │ ├── SO.py │ │ │ ├── SVDO.py │ │ │ ├── SVMO.py │ │ │ ├── __init__.py │ │ │ └── param_controller.py │ │ ├── samplers.py │ │ ├── transforms.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── avgmeter.py │ │ │ ├── environ.py │ │ │ ├── iotools.py │ │ │ ├── loggers.py │ │ │ ├── nuc_norm.py │ │ │ ├── reidtools.py │ │ │ └── torchtools.py │ └── train.py ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── abd_net_extractor.cpython-37.pyc │ └── reid_strong_extractor.cpython-37.pyc ├── abd_net_extractor.py ├── reid_strong_baseline │ ├── .gitignore │ ├── Experiment-all_tricks-tri_center-duke.sh │ ├── Experiment-all_tricks-tri_center-market.sh │ ├── Experiment-all_tricks-without_center-duke.sh │ ├── Experiment-all_tricks-without_center-gta-clean.sh │ ├── Experiment-all_tricks-without_center-gta.sh │ ├── Experiment-all_tricks-without_center-gta_track_images.sh │ ├── Experiment-all_tricks-without_center-gta_track_images_iosb.sh │ ├── Experiment-pretrain_choice-all_tricks-tri_center-market.sh │ ├── Experiment-seresnext50-all_tricks-tri_center-market.sh │ ├── LICENCE.md │ ├── README.md │ ├── Test-all_tricks-tri_center-feat_after_bn-cos-duke.sh │ ├── Test-all_tricks-tri_center-feat_after_bn-cos-market.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-duke-downloaded_model.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-duke.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-gta2207-rzr.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-gta2207.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-gta_track_images-rzr.sh │ ├── Test-all_tricks-without_center-feat_after_bn-cos-market.sh │ ├── Test-reranking-all_tricks-tri_center-feat_after_bn-cos-duke.sh │ ├── Test-reranking-all_tricks-tri_center-feat_after_bn-cos-market.sh │ ├── __init__.py │ ├── config │ │ ├── __init__.py │ │ └── defaults.py │ ├── configs │ │ ├── baseline.yml │ │ ├── softmax.yml │ │ ├── softmax_triplet.yml │ │ └── softmax_triplet_with_center.yml │ ├── data │ │ ├── __init__.py │ │ ├── build.py │ │ ├── collate_batch.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── bases.py │ │ │ ├── cuhk03.py │ │ │ ├── dataset_loader.py │ │ │ ├── dukemtmcreid.py │ │ │ ├── eval_reid.py │ │ │ ├── gta2207.py │ │ │ ├── gta2207clean.py │ │ │ ├── gta2207daynight.py │ │ │ ├── gta2207largeimgs.py │ │ │ ├── gta_track_images.py │ │ │ ├── market1501.py │ │ │ ├── msmt17.py │ │ │ └── veri.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ └── triplet_sampler.py │ │ └── transforms │ │ │ ├── __init__.py │ │ │ ├── build.py │ │ │ └── transforms.py │ ├── engine │ │ ├── inference.py │ │ └── trainer.py │ ├── imgs │ │ └── pipeline.jpg │ ├── layers │ │ ├── __init__.py │ │ ├── center_loss.py │ │ └── triplet_loss.py │ ├── modeling │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── resnet.py │ │ │ ├── resnet_ibn_a.py │ │ │ └── senet.py │ │ └── baseline.py │ ├── solver │ │ ├── __init__.py │ │ ├── build.py │ │ └── lr_scheduler.py │ ├── tests │ │ ├── __init__.py │ │ └── lr_scheduler_test.py │ ├── tools │ │ ├── __init__.py │ │ ├── test.py │ │ └── train.py │ └── utils │ │ ├── __init__.py │ │ ├── iotools.py │ │ ├── logger.py │ │ ├── re_ranking.py │ │ └── reid_metric.py └── reid_strong_extractor.py ├── readme_files ├── feature_graphics_all_cams_500_frames.jpg └── img_hid_31_oid_1934.jpg ├── requirements.txt ├── run_evaluation.py ├── run_multi_cam_clustering.py ├── run_tracker.py ├── start_run_tracker.sh ├── trackers ├── __init__.py ├── __pycache__ │ └── __init__.cpython-37.pyc ├── deep_sort │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── deep_sort.cpython-37.pyc │ ├── deep_sort.py │ └── sort │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── detection.cpython-37.pyc │ │ ├── iou_matching.cpython-37.pyc │ │ ├── kalman_filter.cpython-37.pyc │ │ ├── linear_assignment.cpython-37.pyc │ │ ├── nn_matching.cpython-37.pyc │ │ ├── track.cpython-37.pyc │ │ └── tracker.cpython-37.pyc │ │ ├── detection.py │ │ ├── iou_matching.py │ │ ├── kalman_filter.py │ │ ├── linear_assignment.py │ │ ├── nn_matching.py │ │ ├── preprocessing.py │ │ ├── track.py │ │ └── tracker.py └── iou_tracker │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── demo.py │ ├── iou_result_converter.py │ ├── iou_tracker.py │ ├── mot16.py │ ├── mot17.py │ ├── mta_track.py │ ├── run_tracker.m │ ├── saveStateInfo.m │ ├── seqmaps │ ├── frcnn-all.txt │ ├── frcnn-test.txt │ ├── frcnn-train.txt │ ├── sdp-all.txt │ ├── sdp-test.txt │ └── sdp-train.txt │ └── util_iou.py ├── util.py └── utilities ├── .idea ├── inspectionProfiles │ └── profiles_settings.xml ├── misc.xml ├── modules.xml ├── utilities.iml ├── vcs.xml └── workspace.xml ├── BoundingBoxTracked.py ├── __pycache__ ├── dataset_statistics.cpython-37.pyc ├── helper.cpython-37.pyc ├── joint.cpython-37.pyc ├── non_daemonic_pool.cpython-37.pyc ├── pandas_loader.cpython-37.pyc ├── pose.cpython-37.pyc ├── preprocessing.cpython-37.pyc ├── python_path_utility.cpython-37.pyc └── track_result_statistics.cpython-37.pyc ├── analyze_dataset.py ├── analyze_distances.py ├── arrange_single_camera_evaluation.py ├── calculate_brightness_over_time.py ├── convert_tracks_to_images.py ├── create_feature_graphic.py ├── create_mugshot_grid.py ├── create_reid_packed_image.py ├── dataset_statistics.py ├── download_pytorch_model.py ├── draw_multi_camera_tracks.py ├── draw_track_start_end.py ├── get_reid_dataset_data.py ├── glasbey ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── glasbey.py ├── images │ ├── palette-set1-30.png │ └── palette-white-30.png ├── palettes │ ├── accent.txt │ ├── dark2.txt │ ├── paired.txt │ ├── pastel1.txt │ ├── pastel2.txt │ ├── set1.txt │ ├── set2.txt │ └── set3.txt ├── test │ └── test_glasbey.py └── view_palette.py ├── helper.py ├── joint.py ├── log_statistics.csv ├── log_statistics.py ├── non_daemonic_pool.py ├── other_dataset_statistics.py ├── pandas_loader.py ├── pose.py ├── preprocessing.py ├── python_path_utility.py ├── reduce_dataset_size.py ├── reid_dataset_statistics.py ├── rename_mugshot_images.py ├── sort_mot_eval_columns.py ├── track_result_statistics.py ├── try_env.py └── visualize_reid_query.py /.gitignore: -------------------------------------------------------------------------------- 1 | work_dirs 2 | *.pyc -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Philipp Koehl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__init__.py -------------------------------------------------------------------------------- /clustering/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/clustering_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/clustering_utils.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/feature_extraction_track_results.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/feature_extraction_track_results.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/heapq.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/heapq.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/multi_cam_clustering.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/multi_cam_clustering.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/track_visualization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/track_visualization.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/__pycache__/velocity_calculation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/__pycache__/velocity_calculation.cpython-37.pyc -------------------------------------------------------------------------------- /clustering/heapq.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/clustering/heapq.py -------------------------------------------------------------------------------- /configs/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /configs/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/configs/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /configs/clustering_configs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/configs/clustering_configs/__init__.py -------------------------------------------------------------------------------- /configs/evaluation_configs/evaluate_cascade_abdnet_mta_none_test_iosb.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | root = { 4 | 5 | "dataset_folder" : "/net/merkur/storage/deeplearning/users/koehl/gta/GTA_Dataset_22.07.2019/test" 6 | , "track_results_folder" : "/home/koehlp/Downloads/work_dirs/config_runs/cascade_abdnet_mta_test_cpt_iosb" 7 | , "cam_ids" : list(range(6)) 8 | , "working_dir" : "/home/koehlp/Downloads/work_dirs" 9 | , "n_parts" : 10 10 | , "config_basename" : os.path.basename(__file__).replace(".py","") 11 | , "evaluate_multi_cam" : True 12 | , "evaluate_single_cam" : True 13 | ,"config_run_path" : "/home/koehlp/Downloads/work_dirs/evaluation/config_runs" 14 | 15 | } -------------------------------------------------------------------------------- /configs/tracker_configs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/configs/tracker_configs/__init__.py -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/datasets/__init__.py -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/datasets/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/mta_dataset_cam_iterator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/datasets/__pycache__/mta_dataset_cam_iterator.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/base_dataset.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class Base_dataset_image(metaclass=ABCMeta): 5 | @property 6 | @abstractmethod 7 | def cam_id(self): 8 | pass 9 | 10 | @property 11 | @abstractmethod 12 | def img(self): 13 | pass 14 | 15 | @property 16 | @abstractmethod 17 | def image_path(self): 18 | pass 19 | 20 | @property 21 | @abstractmethod 22 | def frame_no_cam(self): 23 | pass 24 | 25 | @property 26 | @abstractmethod 27 | def img_dims(self): 28 | pass 29 | 30 | 31 | class Base_dataset: 32 | 33 | def __init__(self): 34 | pass 35 | 36 | @property 37 | @abstractmethod 38 | def __iter__(self): 39 | pass 40 | 41 | 42 | @property 43 | @abstractmethod 44 | def __len__(self): 45 | pass 46 | 47 | @property 48 | @abstractmethod 49 | def __next__(self): 50 | pass -------------------------------------------------------------------------------- /detectors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/__init__.py -------------------------------------------------------------------------------- /detectors/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectors/__pycache__/base_detector.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/__pycache__/base_detector.cpython-37.pyc -------------------------------------------------------------------------------- /detectors/__pycache__/mmdetection_detector.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/__pycache__/mmdetection_detector.cpython-37.pyc -------------------------------------------------------------------------------- /detectors/base_detector.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class Base_detector: 4 | def __init__(self,cfg): 5 | self.cfg = cfg 6 | pass 7 | 8 | 9 | def detect(self): 10 | raise NotImplementedError("Please Implement this method") -------------------------------------------------------------------------------- /detectors/mmdetection/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /detectors/mmdetection/.github/ISSUE_TEMPLATE/error-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Error report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Thanks for your error report and we appreciate it a lot. 11 | 12 | **Checklist** 13 | 1. I have searched related issues but cannot get the expected help. 14 | 2. The bug has not been fixed in the latest version. 15 | 16 | **Describe the bug** 17 | A clear and concise description of what the bug is. 18 | 19 | **Reproduction** 20 | 1. What command or script did you run? 21 | ``` 22 | A placeholder for the command. 23 | ``` 24 | 2. Did you make any modifications on the code or config? Did you understand what you have modified? 25 | 3. What dataset did you use? 26 | 27 | **Environment** 28 | 29 | 1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment infomation and paste it here. 30 | 2. You may add addition that may be helpful for locating the problem, such as 31 | - How you installed PyTorch [e.g., pip, conda, source] 32 | - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) 33 | 34 | **Error traceback** 35 | If applicable, paste the error trackback here. 36 | ``` 37 | A placeholder for trackback. 38 | ``` 39 | 40 | **Bug fix** 41 | If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! 42 | -------------------------------------------------------------------------------- /detectors/mmdetection/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature** 11 | 12 | **Motivation** 13 | A clear and concise description of the motivation of the feature. 14 | Ex1. It is inconvenient when [....]. 15 | Ex2. There is a recent paper [....], which is very helpful for [....]. 16 | 17 | **Related resources** 18 | If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. 23 | -------------------------------------------------------------------------------- /detectors/mmdetection/.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /detectors/mmdetection/.isort.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = asynctest,cv2,matplotlib,mmcv,numpy,pycocotools,robustness_eval,roi_align,roi_pool,seaborn,six,terminaltables,torch,torchvision 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | -------------------------------------------------------------------------------- /detectors/mmdetection/.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/asottile/seed-isort-config 3 | rev: v1.9.3 4 | hooks: 5 | - id: seed-isort-config 6 | - repo: https://github.com/pre-commit/mirrors-isort 7 | rev: v4.3.21 8 | hooks: 9 | - id: isort 10 | - repo: https://github.com/pre-commit/mirrors-yapf 11 | rev: v0.29.0 12 | hooks: 13 | - id: yapf 14 | - repo: https://github.com/pre-commit/pre-commit-hooks 15 | rev: v2.4.0 16 | hooks: 17 | - id: flake8 18 | - id: trailing-whitespace 19 | - id: check-yaml 20 | - id: end-of-file-fixer 21 | - id: requirements-txt-fixer 22 | -------------------------------------------------------------------------------- /detectors/mmdetection/.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/.travis.yml: -------------------------------------------------------------------------------- 1 | dist: bionic # ubuntu 18.04 2 | language: python 3 | 4 | python: 5 | - "3.5" 6 | - "3.6" 7 | - "3.7" 8 | 9 | env: CUDA=10.1.105-1 CUDA_SHORT=10.1 UBUNTU_VERSION=ubuntu1804 FORCE_CUDA=1 10 | cache: pip 11 | 12 | # Ref to CUDA installation in Travis: https://github.com/jeremad/cuda-travis 13 | before_install: 14 | - INSTALLER=cuda-repo-${UBUNTU_VERSION}_${CUDA}_amd64.deb 15 | - wget http://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/${INSTALLER} 16 | - sudo dpkg -i ${INSTALLER} 17 | - wget https://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/7fa2af80.pub 18 | - sudo apt-key add 7fa2af80.pub 19 | - sudo apt update -qq 20 | - sudo apt install -y cuda-${CUDA_SHORT/./-} cuda-cufft-dev-${CUDA_SHORT/./-} 21 | - sudo apt clean 22 | - CUDA_HOME=/usr/local/cuda-${CUDA_SHORT} 23 | - LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${CUDA_HOME}/include:${LD_LIBRARY_PATH} 24 | - PATH=${CUDA_HOME}/bin:${PATH} 25 | 26 | install: 27 | - pip install Pillow==6.2.2 # remove this line when torchvision>=0.5 28 | - pip install torch==1.2 torchvision==0.4.0 # TODO: fix CI for pytorch>1.2 29 | - pip install "git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI" 30 | - pip install -r requirements.txt 31 | 32 | before_script: 33 | - flake8 . 34 | - isort -rc --check-only --diff mmdet/ tools/ tests/ 35 | - yapf -r -d --style .style.yapf mmdet/ tools/ tests/ configs/ 36 | 37 | script: 38 | - python setup.py check -m -s 39 | - python setup.py build_ext --inplace 40 | - coverage run --source mmdet -m py.test -v --xdoctest-modules tests mmdet 41 | 42 | after_success: 43 | - coverage report 44 | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/atss/README.md: -------------------------------------------------------------------------------- 1 | # Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection 2 | 3 | 4 | ## Introduction 5 | 6 | ``` 7 | @article{zhang2019bridging, 8 | title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, 9 | author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, 10 | journal = {arXiv preprint arXiv:1912.02424}, 11 | year = {2019} 12 | } 13 | ``` 14 | 15 | 16 | ## Results and Models 17 | 18 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 19 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 20 | | R-50 | pytorch | 1x | 3.6 | 0.357 | 12.8 | 39.2 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/atss/atss_r50_fpn_1x_20200113-a7aa251e.pth)| 21 | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/empirical_attention/README.md: -------------------------------------------------------------------------------- 1 | # An Empirical Study of Spatial Attention Mechanisms in Deep Networks 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{zhu2019empirical, 7 | title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, 8 | author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, 9 | journal={arXiv preprint arXiv:1904.05873}, 10 | year={2019} 11 | } 12 | ``` 13 | 14 | 15 | ## Results and Models 16 | 17 | | Backbone | Attention Component | DCN | Lr schd | box AP | Download | 18 | |:---------:|:-------------------:|:----:|:-------:|:------:|:--------:| 19 | | R-50 | 1111 | N | 1x | 38.6 | - | 20 | | R-50 | 0010 | N | 1x | 38.2 | - | 21 | | R-50 | 1111 | Y | 1x | 41.0 | - | 22 | | R-50 | 0010 | Y | 1x | 40.8 | - | 23 | 24 | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/free_anchor/README.md: -------------------------------------------------------------------------------- 1 | # FreeAnchor: Learning to Match Anchors for Visual Object Detection 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{zhang2019freeanchor, 7 | title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, 8 | author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, 9 | booktitle = {Neural Information Processing Systems}, 10 | year = {2019} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | |:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 18 | | R-50 | pytorch | 1x | 4.7 | 0.322 | 12.0 | 38.4 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_r50_fpn_1x_20190914-84db6585.pth) | 19 | | R-101 | pytorch | 1x | 6.6 | 0.437 | 9.7 | 40.3 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_r101_fpn_1x_20190914-c4e4db81.pth) | 20 | | X-101-32x4d | pytorch | 1x | 7.8 | 0.640 | 8.4 | 42.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/free_anchor/retinanet_free_anchor_x101-32x4d_fpn_1x_20190914-eb73b804.pth) | 21 | 22 | **Notes:** 23 | - We use 8 GPUs with 2 images/GPU. 24 | - For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor). 25 | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/ghm/README.md: -------------------------------------------------------------------------------- 1 | # Gradient Harmonized Single-stage Detector 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{li2019gradient, 7 | title={Gradient Harmonized Single-stage Detector}, 8 | author={Li, Buyu and Liu, Yu and Wang, Xiaogang}, 9 | booktitle={AAAI Conference on Artificial Intelligence}, 10 | year={2019} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 17 | | :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :------: | 18 | | R-50-FPN | pytorch | 1x | 3.9 | 0.500 | 9.4 | 36.9 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r50_fpn_1x_20190608-b9aa5862.pth) | 19 | | R-101-FPN | pytorch | 1x | 5.8 | 0.625 | 8.5 | 39.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_r101_fpn_1x_20190608-b885b74a.pth) | 20 | | X-101-32x4d-FPN | pytorch | 1x | 7.0 | 0.818 | 7.6 | 40.5 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_32x4d_fpn_1x_20190608-ed295d22.pth) | 21 | | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 1.191 | 6.1 | 41.6 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/ghm/retinanet_ghm_x101_64x4d_fpn_1x_20190608-7f2037ce.pth) | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/nas_fpn/README.md: -------------------------------------------------------------------------------- 1 | # NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{ghiasi2019fpn, 7 | title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, 8 | author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, 9 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 10 | pages={7036--7045}, 11 | year={2019} 12 | } 13 | ``` 14 | 15 | ## Results and Models 16 | 17 | We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. 18 | 19 | | Backbone | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 20 | |:-----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| 21 | | R-50-FPN | 50e | 12.8 | 0.513 | 15.3 | 37.0 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/nas_fpn/retinanet_crop640_r50_fpn_50e_190824-4d75bfa0.pth) | 22 | | R-50-NASFPN | 50e | 14.8 | 0.662 | 13.1 | 39.8 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/nas_fpn/retinanet_crop640_r50_nasfpn_50e_20191225-b82d3a86.pth) | 23 | 24 | 25 | **Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. 26 | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/pascal_voc/README.md: -------------------------------------------------------------------------------- 1 | ### SSD 2 | 3 | | Backbone | Size | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | 4 | | :------: | :---: | :---: | :-----: | :------: | :-----------------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------: | 5 | | VGG16 | 300 | caffe | 240e | 2.5 | 0.159 | 35.7 / 53.6 | 77.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_voc_vgg16_caffe_240e_20190501-7160d09a.pth) | 6 | | VGG16 | 512 | caffe | 240e | 4.3 | 0.214 | 27.5 / 35.9 | 80.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd512_voc_vgg16_caffe_240e_20190501-ff194be1.pth) | -------------------------------------------------------------------------------- /detectors/mmdetection/configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /detectors/mmdetection/configs/scratch/README.md: -------------------------------------------------------------------------------- 1 | # Rethinking ImageNet Pre-training 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{he2018rethinking, 7 | title={Rethinking imagenet pre-training}, 8 | author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, 9 | journal={arXiv preprint arXiv:1811.08883}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Model | Backbone | Style | Lr schd | box AP | mask AP | Download | 17 | |:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:--------:| 18 | | Faster R-CNN | R-50-FPN | pytorch | 6x | 40.1 | - | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/models/scratch/scratch_faster_rcnn_r50_fpn_gn_6x_20190515-ff554978.pth) | 19 | | Mask R-CNN | R-50-FPN | pytorch | 6x | 41.0 | 37.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_mask_rcnn_r50_fpn_gn_6x_20190515-96743f5e.pth) | 20 | 21 | Note: 22 | - The above models are trained with 16 GPUs. -------------------------------------------------------------------------------- /detectors/mmdetection/configs/wider_face/README.md: -------------------------------------------------------------------------------- 1 | ## WIDER Face Dataset 2 | 3 | To use the WIDER Face dataset you need to download it 4 | and extract to the `data/WIDERFace` folder. Annotation in the VOC format 5 | can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). 6 | You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders 7 | to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. 8 | Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. 9 | The directory should be like this: 10 | 11 | ``` 12 | mmdetection 13 | ├── mmdet 14 | ├── tools 15 | ├── configs 16 | ├── data 17 | │ ├── WIDERFace 18 | │ │ ├── WIDER_train 19 | │ | │ ├──0--Parade 20 | │ | │ ├── ... 21 | │ | │ ├── Annotations 22 | │ │ ├── WIDER_val 23 | │ | │ ├──0--Parade 24 | │ | │ ├── ... 25 | │ | │ ├── Annotations 26 | │ │ ├── val.txt 27 | │ │ ├── train.txt 28 | 29 | ``` 30 | 31 | After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or 32 | create your own config based on the presented one. 33 | -------------------------------------------------------------------------------- /detectors/mmdetection/demo/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/demo/coco_test_12510.jpg -------------------------------------------------------------------------------- /detectors/mmdetection/demo/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/demo/corruptions_sev_3.png -------------------------------------------------------------------------------- /detectors/mmdetection/demo/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/demo/data_pipeline.png -------------------------------------------------------------------------------- /detectors/mmdetection/demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/demo/demo.jpg -------------------------------------------------------------------------------- /detectors/mmdetection/demo/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/detectors/mmdetection/demo/loss_curve.png -------------------------------------------------------------------------------- /detectors/mmdetection/demo/webcam_demo.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import torch 5 | 6 | from mmdet.apis import inference_detector, init_detector, show_result 7 | 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description='MMDetection webcam demo') 11 | parser.add_argument('config', help='test config file path') 12 | parser.add_argument('checkpoint', help='checkpoint file') 13 | parser.add_argument('--device', type=int, default=0, help='CUDA device id') 14 | parser.add_argument( 15 | '--camera-id', type=int, default=0, help='camera device id') 16 | parser.add_argument( 17 | '--score-thr', type=float, default=0.5, help='bbox score threshold') 18 | args = parser.parse_args() 19 | return args 20 | 21 | 22 | def main(): 23 | args = parse_args() 24 | 25 | model = init_detector( 26 | args.config, args.checkpoint, device=torch.device('cuda', args.device)) 27 | 28 | camera = cv2.VideoCapture(args.camera_id) 29 | 30 | print('Press "Esc", "q" or "Q" to exit.') 31 | while True: 32 | ret_val, img = camera.read() 33 | result = inference_detector(model, img) 34 | 35 | ch = cv2.waitKey(1) 36 | if ch == 27 or ch == ord('q') or ch == ord('Q'): 37 | break 38 | 39 | show_result( 40 | img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1) 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /detectors/mmdetection/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTORCH="1.3" 2 | ARG CUDA="10.1" 3 | ARG CUDNN="7" 4 | 5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel 6 | 7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" 8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" 10 | 11 | RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxrender-dev libxext6 \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Install mmdetection 16 | RUN conda clean --all 17 | RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection 18 | WORKDIR /mmdetection 19 | ENV FORCE_CUDA="1" 20 | RUN pip install --no-cache-dir -e . 21 | -------------------------------------------------------------------------------- /detectors/mmdetection/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /detectors/mmdetection/docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to MMDetection's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | INSTALL.md 8 | GETTING_STARTED.md 9 | MODEL_ZOO.md 10 | TECHNICAL_DETAILS.md 11 | CHANGELOG.md 12 | 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`search` 20 | -------------------------------------------------------------------------------- /detectors/mmdetection/docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /detectors/mmdetection/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result, show_result_pyplot) 3 | from .train import get_root_logger, set_random_seed, train_detector 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 7 | 'async_inference_detector', 'inference_detector', 'show_result', 8 | 'show_result_pyplot' 9 | ] 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .fp16 import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .optimizer import * # noqa: F401, F403 7 | from .post_processing import * # noqa: F401, F403 8 | from .utils import * # noqa: F401, F403 9 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import (anchor_inside_flags, anchor_target, 3 | images_to_levels, unmap) 4 | from .guided_anchor_target import ga_loc_target, ga_shape_target 5 | from .point_generator import PointGenerator 6 | from .point_target import point_target 7 | 8 | __all__ = [ 9 | 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target', 10 | 'ga_shape_target', 'PointGenerator', 'point_target', 'images_to_levels', 11 | 'unmap' 12 | ] 13 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/anchor/point_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class PointGenerator(object): 5 | 6 | def _meshgrid(self, x, y, row_major=True): 7 | xx = x.repeat(len(y)) 8 | yy = y.view(-1, 1).repeat(1, len(x)).view(-1) 9 | if row_major: 10 | return xx, yy 11 | else: 12 | return yy, xx 13 | 14 | def grid_points(self, featmap_size, stride=16, device='cuda'): 15 | feat_h, feat_w = featmap_size 16 | shift_x = torch.arange(0., feat_w, device=device) * stride 17 | shift_y = torch.arange(0., feat_h, device=device) * stride 18 | shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) 19 | stride = shift_x.new_full((shift_xx.shape[0], ), stride) 20 | shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) 21 | all_points = shifts.to(device) 22 | return all_points 23 | 24 | def valid_flags(self, featmap_size, valid_size, device='cuda'): 25 | feat_h, feat_w = featmap_size 26 | valid_h, valid_w = valid_size 27 | assert valid_h <= feat_h and valid_w <= feat_w 28 | valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) 29 | valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) 30 | valid_x[:valid_w] = 1 31 | valid_y[:valid_h] = 1 32 | valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) 33 | valid = valid_xx & valid_yy 34 | return valid 35 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner 2 | from .bbox_target import bbox_target 3 | from .geometry import bbox_overlaps 4 | from .samplers import (BaseSampler, CombinedSampler, 5 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 6 | PseudoSampler, RandomSampler, SamplingResult) 7 | from .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip, 8 | bbox_mapping, bbox_mapping_back, delta2bbox, 9 | distance2bbox, roi2bbox) 10 | 11 | from .assign_sampling import ( # isort:skip, avoid recursive imports 12 | assign_and_sample, build_assigner, build_sampler) 13 | 14 | __all__ = [ 15 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 16 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 17 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 18 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 19 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 20 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 21 | 'distance2bbox', 'bbox_target' 22 | ] 23 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs) 11 | else: 12 | raise TypeError('Invalid type {} for building a sampler'.format( 13 | type(cfg))) 14 | 15 | 16 | def build_sampler(cfg, **kwargs): 17 | if isinstance(cfg, samplers.BaseSampler): 18 | return cfg 19 | elif isinstance(cfg, dict): 20 | return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs) 21 | else: 22 | raise TypeError('Invalid type {} for building a sampler'.format( 23 | type(cfg))) 24 | 25 | 26 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 27 | bbox_assigner = build_assigner(cfg.assigner) 28 | bbox_sampler = build_sampler(cfg.sampler) 29 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 30 | gt_labels) 31 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 32 | gt_labels) 33 | return assign_result, sampling_result 34 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .max_iou_assigner import MaxIoUAssigner 6 | from .point_assigner import PointAssigner 7 | 8 | __all__ = [ 9 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 10 | 'PointAssigner', 'ATSSAssigner' 11 | ] 12 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..assign_sampling import build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (coco_classes, dataset_aliases, get_classes, 2 | imagenet_det_classes, imagenet_vid_classes, 3 | voc_classes) 4 | from .eval_hooks import DistEvalHook 5 | from .mean_ap import average_precision, eval_map, print_map_summary 6 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 7 | print_recall_summary) 8 | 9 | __all__ = [ 10 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 11 | 'coco_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 12 | 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 13 | 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall' 14 | ] 15 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/fp16/utils.py: -------------------------------------------------------------------------------- 1 | from collections import abc 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def cast_tensor_type(inputs, src_type, dst_type): 8 | if isinstance(inputs, torch.Tensor): 9 | return inputs.to(dst_type) 10 | elif isinstance(inputs, str): 11 | return inputs 12 | elif isinstance(inputs, np.ndarray): 13 | return inputs 14 | elif isinstance(inputs, abc.Mapping): 15 | return type(inputs)({ 16 | k: cast_tensor_type(v, src_type, dst_type) 17 | for k, v in inputs.items() 18 | }) 19 | elif isinstance(inputs, abc.Iterable): 20 | return type(inputs)( 21 | cast_tensor_type(item, src_type, dst_type) for item in inputs) 22 | else: 23 | return inputs 24 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .utils import split_combined_polys 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_optimizer 2 | from .copy_of_sgd import CopyOfSGD 3 | from .registry import OPTIMIZERS 4 | 5 | __all__ = ['OPTIMIZERS', 'build_optimizer', 'CopyOfSGD'] 6 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/optimizer/copy_of_sgd.py: -------------------------------------------------------------------------------- 1 | from torch.optim import SGD 2 | 3 | from .registry import OPTIMIZERS 4 | 5 | 6 | @OPTIMIZERS.register_module 7 | class CopyOfSGD(SGD): 8 | """A clone of torch.optim.SGD. 9 | 10 | A customized optimizer could be defined like CopyOfSGD. 11 | You may derive from built-in optimizers in torch.optim, 12 | or directly implement a new optimizer. 13 | """ 14 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/optimizer/registry.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | import torch 4 | 5 | from mmdet.utils import Registry 6 | 7 | OPTIMIZERS = Registry('optimizer') 8 | 9 | 10 | def register_torch_optimizers(): 11 | torch_optimizers = [] 12 | for module_name in dir(torch.optim): 13 | if module_name.startswith('__'): 14 | continue 15 | _optim = getattr(torch.optim, module_name) 16 | if inspect.isclass(_optim) and issubclass(_optim, 17 | torch.optim.Optimizer): 18 | OPTIMIZERS.register_module(_optim) 19 | torch_optimizers.append(module_name) 20 | return torch_optimizers 21 | 22 | 23 | TORCH_OPTIMIZERS = register_torch_optimizers() 24 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_dataset 2 | from .cityscapes import CityscapesDataset 3 | from .coco import CocoDataset 4 | from .custom import CustomDataset 5 | from .dataset_wrappers import ConcatDataset, RepeatDataset 6 | from .loader import DistributedGroupSampler, GroupSampler, build_dataloader 7 | from .registry import DATASETS 8 | from .voc import VOCDataset 9 | from .wider_face import WIDERFaceDataset 10 | from .xml_style import XMLDataset 11 | from .gta_dataset import GtaDataset 12 | 13 | __all__ = [ 14 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 15 | 'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler', 16 | 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset', 17 | 'DATASETS', 'build_dataset','GtaDataset' 18 | ] 19 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/builder.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from mmdet.utils import build_from_cfg 4 | from .dataset_wrappers import ConcatDataset, RepeatDataset 5 | from .registry import DATASETS 6 | 7 | 8 | def _concat_dataset(cfg, default_args=None): 9 | ann_files = cfg['ann_file'] 10 | img_prefixes = cfg.get('img_prefix', None) 11 | seg_prefixes = cfg.get('seg_prefix', None) 12 | proposal_files = cfg.get('proposal_file', None) 13 | 14 | datasets = [] 15 | num_dset = len(ann_files) 16 | for i in range(num_dset): 17 | data_cfg = copy.deepcopy(cfg) 18 | data_cfg['ann_file'] = ann_files[i] 19 | if isinstance(img_prefixes, (list, tuple)): 20 | data_cfg['img_prefix'] = img_prefixes[i] 21 | if isinstance(seg_prefixes, (list, tuple)): 22 | data_cfg['seg_prefix'] = seg_prefixes[i] 23 | if isinstance(proposal_files, (list, tuple)): 24 | data_cfg['proposal_file'] = proposal_files[i] 25 | datasets.append(build_dataset(data_cfg, default_args)) 26 | 27 | return ConcatDataset(datasets) 28 | 29 | 30 | def build_dataset(cfg, default_args=None): 31 | if isinstance(cfg, (list, tuple)): 32 | dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) 33 | elif cfg['type'] == 'RepeatDataset': 34 | dataset = RepeatDataset( 35 | build_dataset(cfg['dataset'], default_args), cfg['times']) 36 | elif isinstance(cfg['ann_file'], (list, tuple)): 37 | dataset = _concat_dataset(cfg, default_args) 38 | else: 39 | dataset = build_from_cfg(cfg, DATASETS, default_args) 40 | 41 | return dataset 42 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/cityscapes.py: -------------------------------------------------------------------------------- 1 | from .coco import CocoDataset 2 | from .registry import DATASETS 3 | 4 | 5 | @DATASETS.register_module 6 | class CityscapesDataset(CocoDataset): 7 | 8 | CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 9 | 'bicycle') 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/loader/__init__.py: -------------------------------------------------------------------------------- 1 | from .build_loader import build_dataloader 2 | from .sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader'] 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .compose import Compose 2 | from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, 3 | Transpose, to_tensor) 4 | from .instaboost import InstaBoost 5 | from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals 6 | from .test_aug import MultiScaleFlipAug 7 | from .transforms import (Albu, Expand, MinIoURandomCrop, Normalize, Pad, 8 | PhotoMetricDistortion, RandomCrop, RandomFlip, Resize, 9 | SegRescale) 10 | 11 | __all__ = [ 12 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 13 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 14 | 'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 15 | 'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand', 16 | 'PhotoMetricDistortion', 'Albu', 'InstaBoost' 17 | ] 18 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | from mmdet.utils import build_from_cfg 4 | from ..registry import PIPELINES 5 | 6 | 7 | @PIPELINES.register_module 8 | class Compose(object): 9 | 10 | def __init__(self, transforms): 11 | assert isinstance(transforms, collections.abc.Sequence) 12 | self.transforms = [] 13 | for transform in transforms: 14 | if isinstance(transform, dict): 15 | transform = build_from_cfg(transform, PIPELINES) 16 | self.transforms.append(transform) 17 | elif callable(transform): 18 | self.transforms.append(transform) 19 | else: 20 | raise TypeError('transform must be callable or a dict') 21 | 22 | def __call__(self, data): 23 | for t in self.transforms: 24 | data = t(data) 25 | if data is None: 26 | return None 27 | return data 28 | 29 | def __repr__(self): 30 | format_string = self.__class__.__name__ + '(' 31 | for t in self.transforms: 32 | format_string += '\n' 33 | format_string += ' {0}'.format(t) 34 | format_string += '\n)' 35 | return format_string 36 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/pipelines/test_aug.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from ..registry import PIPELINES 4 | from .compose import Compose 5 | 6 | 7 | @PIPELINES.register_module 8 | class MultiScaleFlipAug(object): 9 | 10 | def __init__(self, transforms, img_scale, flip=False): 11 | self.transforms = Compose(transforms) 12 | self.img_scale = img_scale if isinstance(img_scale, 13 | list) else [img_scale] 14 | assert mmcv.is_list_of(self.img_scale, tuple) 15 | self.flip = flip 16 | 17 | def __call__(self, results): 18 | aug_data = [] 19 | flip_aug = [False, True] if self.flip else [False] 20 | for scale in self.img_scale: 21 | for flip in flip_aug: 22 | _results = results.copy() 23 | _results['scale'] = scale 24 | _results['flip'] = flip 25 | data = self.transforms(_results) 26 | aug_data.append(data) 27 | # list of dict to dict of list 28 | aug_data_dict = {key: [] for key in aug_data[0]} 29 | for data in aug_data: 30 | for key, val in data.items(): 31 | aug_data_dict[key].append(val) 32 | return aug_data_dict 33 | 34 | def __repr__(self): 35 | repr_str = self.__class__.__name__ 36 | repr_str += '(transforms={}, img_scale={}, flip={})'.format( 37 | self.transforms, self.img_scale, self.flip) 38 | return repr_str 39 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | DATASETS = Registry('dataset') 4 | PIPELINES = Registry('pipeline') 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/datasets/wider_face.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import xml.etree.ElementTree as ET 3 | 4 | import mmcv 5 | 6 | from .registry import DATASETS 7 | from .xml_style import XMLDataset 8 | 9 | 10 | @DATASETS.register_module 11 | class WIDERFaceDataset(XMLDataset): 12 | """ 13 | Reader for the WIDER Face dataset in PASCAL VOC format. 14 | Conversion scripts can be found in 15 | https://github.com/sovrasov/wider-face-pascal-voc-annotations 16 | """ 17 | CLASSES = ('face', ) 18 | 19 | def __init__(self, **kwargs): 20 | super(WIDERFaceDataset, self).__init__(**kwargs) 21 | 22 | def load_annotations(self, ann_file): 23 | img_infos = [] 24 | img_ids = mmcv.list_from_file(ann_file) 25 | for img_id in img_ids: 26 | filename = '{}.jpg'.format(img_id) 27 | xml_path = osp.join(self.img_prefix, 'Annotations', 28 | '{}.xml'.format(img_id)) 29 | tree = ET.parse(xml_path) 30 | root = tree.getroot() 31 | size = root.find('size') 32 | width = int(size.find('width').text) 33 | height = int(size.find('height').text) 34 | folder = root.find('folder').text 35 | img_infos.append( 36 | dict( 37 | id=img_id, 38 | filename=osp.join(folder, filename), 39 | width=width, 40 | height=height)) 41 | 42 | return img_infos 43 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_heads import * # noqa: F401,F403 2 | from .backbones import * # noqa: F401,F403 3 | from .bbox_heads import * # noqa: F401,F403 4 | from .builder import (build_backbone, build_detector, build_head, build_loss, 5 | build_neck, build_roi_extractor, build_shared_head) 6 | from .detectors import * # noqa: F401,F403 7 | from .losses import * # noqa: F401,F403 8 | from .mask_heads import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 11 | ROI_EXTRACTORS, SHARED_HEADS) 12 | from .roi_extractors import * # noqa: F401,F403 13 | from .shared_heads import * # noqa: F401,F403 14 | 15 | __all__ = [ 16 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 17 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 18 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 19 | ] 20 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .atss_head import ATSSHead 3 | from .fcos_head import FCOSHead 4 | from .fovea_head import FoveaHead 5 | from .free_anchor_retina_head import FreeAnchorRetinaHead 6 | from .ga_retina_head import GARetinaHead 7 | from .ga_rpn_head import GARPNHead 8 | from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead 9 | from .reppoints_head import RepPointsHead 10 | from .retina_head import RetinaHead 11 | from .retina_sepbn_head import RetinaSepBNHead 12 | from .rpn_head import RPNHead 13 | from .ssd_head import SSDHead 14 | 15 | __all__ = [ 16 | 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 17 | 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 18 | 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 19 | 'ATSSHead' 20 | ] 21 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .hrnet import HRNet 2 | from .resnet import ResNet, make_res_layer 3 | from .resnext import ResNeXt 4 | from .ssd_vgg import SSDVGG 5 | 6 | __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet'] 7 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | from .double_bbox_head import DoubleConvFCBBoxHead 4 | 5 | __all__ = [ 6 | 'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'DoubleConvFCBBoxHead' 7 | ] 8 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from mmdet.utils import build_from_cfg 4 | from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 5 | ROI_EXTRACTORS, SHARED_HEADS) 6 | 7 | 8 | def build(cfg, registry, default_args=None): 9 | if isinstance(cfg, list): 10 | modules = [ 11 | build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg 12 | ] 13 | return nn.Sequential(*modules) 14 | else: 15 | return build_from_cfg(cfg, registry, default_args) 16 | 17 | 18 | def build_backbone(cfg): 19 | return build(cfg, BACKBONES) 20 | 21 | 22 | def build_neck(cfg): 23 | return build(cfg, NECKS) 24 | 25 | 26 | def build_roi_extractor(cfg): 27 | return build(cfg, ROI_EXTRACTORS) 28 | 29 | 30 | def build_shared_head(cfg): 31 | return build(cfg, SHARED_HEADS) 32 | 33 | 34 | def build_head(cfg): 35 | return build(cfg, HEADS) 36 | 37 | 38 | def build_loss(cfg): 39 | return build(cfg, LOSSES) 40 | 41 | 42 | def build_detector(cfg, train_cfg=None, test_cfg=None): 43 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 44 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .atss import ATSS 2 | from .base import BaseDetector 3 | from .cascade_rcnn import CascadeRCNN 4 | from .double_head_rcnn import DoubleHeadRCNN 5 | from .fast_rcnn import FastRCNN 6 | from .faster_rcnn import FasterRCNN 7 | from .fcos import FCOS 8 | from .fovea import FOVEA 9 | from .grid_rcnn import GridRCNN 10 | from .htc import HybridTaskCascade 11 | from .mask_rcnn import MaskRCNN 12 | from .mask_scoring_rcnn import MaskScoringRCNN 13 | from .reppoints_detector import RepPointsDetector 14 | from .retinanet import RetinaNet 15 | from .rpn import RPN 16 | from .single_stage import SingleStageDetector 17 | from .two_stage import TwoStageDetector 18 | 19 | __all__ = [ 20 | 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', 21 | 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 22 | 'DoubleHeadRCNN', 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 23 | 'RepPointsDetector', 'FOVEA' 24 | ] 25 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class ATSS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FasterRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | shared_head=None, 17 | pretrained=None): 18 | super(FasterRCNN, self).__init__( 19 | backbone=backbone, 20 | neck=neck, 21 | shared_head=shared_head, 22 | rpn_head=rpn_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FCOS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FOVEA(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class MaskRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | mask_roi_extractor, 14 | mask_head, 15 | train_cfg, 16 | test_cfg, 17 | neck=None, 18 | shared_head=None, 19 | pretrained=None): 20 | super(MaskRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | shared_head=shared_head, 24 | rpn_head=rpn_head, 25 | bbox_roi_extractor=bbox_roi_extractor, 26 | bbox_head=bbox_head, 27 | mask_roi_extractor=mask_roi_extractor, 28 | mask_head=mask_head, 29 | train_cfg=train_cfg, 30 | test_cfg=test_cfg, 31 | pretrained=pretrained) 32 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class RetinaNet(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .accuracy import Accuracy, accuracy 2 | from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss 3 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 4 | cross_entropy, mask_cross_entropy) 5 | from .focal_loss import FocalLoss, sigmoid_focal_loss 6 | from .ghm_loss import GHMC, GHMR 7 | from .iou_loss import (BoundedIoULoss, GIoULoss, IoULoss, bounded_iou_loss, 8 | iou_loss) 9 | from .mse_loss import MSELoss, mse_loss 10 | from .smooth_l1_loss import SmoothL1Loss, smooth_l1_loss 11 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 12 | 13 | __all__ = [ 14 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 15 | 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', 16 | 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', 17 | 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', 18 | 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 19 | 'weight_reduce_loss', 'weighted_loss' 20 | ] 21 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/losses/accuracy.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def accuracy(pred, target, topk=1): 5 | assert isinstance(topk, (int, tuple)) 6 | if isinstance(topk, int): 7 | topk = (topk, ) 8 | return_single = True 9 | else: 10 | return_single = False 11 | 12 | maxk = max(topk) 13 | _, pred_label = pred.topk(maxk, dim=1) 14 | pred_label = pred_label.t() 15 | correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) 16 | 17 | res = [] 18 | for k in topk: 19 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) 20 | res.append(correct_k.mul_(100.0 / pred.size(0))) 21 | return res[0] if return_single else res 22 | 23 | 24 | class Accuracy(nn.Module): 25 | 26 | def __init__(self, topk=(1, )): 27 | super().__init__() 28 | self.topk = topk 29 | 30 | def forward(self, pred, target): 31 | return accuracy(pred, target, self.topk) 32 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/losses/mse_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | from ..registry import LOSSES 5 | from .utils import weighted_loss 6 | 7 | mse_loss = weighted_loss(F.mse_loss) 8 | 9 | 10 | @LOSSES.register_module 11 | class MSELoss(nn.Module): 12 | 13 | def __init__(self, reduction='mean', loss_weight=1.0): 14 | super().__init__() 15 | self.reduction = reduction 16 | self.loss_weight = loss_weight 17 | 18 | def forward(self, pred, target, weight=None, avg_factor=None): 19 | loss = self.loss_weight * mse_loss( 20 | pred, 21 | target, 22 | weight, 23 | reduction=self.reduction, 24 | avg_factor=avg_factor) 25 | return loss 26 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/losses/smooth_l1_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from ..registry import LOSSES 5 | from .utils import weighted_loss 6 | 7 | 8 | @weighted_loss 9 | def smooth_l1_loss(pred, target, beta=1.0): 10 | assert beta > 0 11 | assert pred.size() == target.size() and target.numel() > 0 12 | diff = torch.abs(pred - target) 13 | loss = torch.where(diff < beta, 0.5 * diff * diff / beta, 14 | diff - 0.5 * beta) 15 | return loss 16 | 17 | 18 | @LOSSES.register_module 19 | class SmoothL1Loss(nn.Module): 20 | 21 | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): 22 | super(SmoothL1Loss, self).__init__() 23 | self.beta = beta 24 | self.reduction = reduction 25 | self.loss_weight = loss_weight 26 | 27 | def forward(self, 28 | pred, 29 | target, 30 | weight=None, 31 | avg_factor=None, 32 | reduction_override=None, 33 | **kwargs): 34 | assert reduction_override in (None, 'none', 'mean', 'sum') 35 | reduction = ( 36 | reduction_override if reduction_override else self.reduction) 37 | loss_bbox = self.loss_weight * smooth_l1_loss( 38 | pred, 39 | target, 40 | weight, 41 | beta=self.beta, 42 | reduction=reduction, 43 | avg_factor=avg_factor, 44 | **kwargs) 45 | return loss_bbox 46 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | from .fused_semantic_head import FusedSemanticHead 3 | from .grid_head import GridHead 4 | from .htc_mask_head import HTCMaskHead 5 | from .maskiou_head import MaskIoUHead 6 | 7 | __all__ = [ 8 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 9 | 'MaskIoUHead' 10 | ] 11 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/mask_heads/htc_mask_head.py: -------------------------------------------------------------------------------- 1 | from ..registry import HEADS 2 | from ..utils import ConvModule 3 | from .fcn_mask_head import FCNMaskHead 4 | 5 | 6 | @HEADS.register_module 7 | class HTCMaskHead(FCNMaskHead): 8 | 9 | def __init__(self, with_conv_res=True, *args, **kwargs): 10 | super(HTCMaskHead, self).__init__(*args, **kwargs) 11 | self.with_conv_res = with_conv_res 12 | if self.with_conv_res: 13 | self.conv_res = ConvModule( 14 | self.conv_out_channels, 15 | self.conv_out_channels, 16 | 1, 17 | conv_cfg=self.conv_cfg, 18 | norm_cfg=self.norm_cfg) 19 | 20 | def init_weights(self): 21 | super(HTCMaskHead, self).init_weights() 22 | if self.with_conv_res: 23 | self.conv_res.init_weights() 24 | 25 | def forward(self, x, res_feat=None, return_logits=True, return_feat=True): 26 | if res_feat is not None: 27 | assert self.with_conv_res 28 | res_feat = self.conv_res(res_feat) 29 | x = x + res_feat 30 | for conv in self.convs: 31 | x = conv(x) 32 | res_feat = x 33 | outs = [] 34 | if return_logits: 35 | x = self.upsample(x) 36 | if self.upsample_method == 'deconv': 37 | x = self.relu(x) 38 | mask_pred = self.conv_logits(x) 39 | outs.append(mask_pred) 40 | if return_feat: 41 | outs.append(res_feat) 42 | return outs if len(outs) > 1 else outs[0] 43 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .fpn import FPN 3 | from .fpn_carafe import FPN_CARAFE 4 | from .hrfpn import HRFPN 5 | from .nas_fpn import NASFPN 6 | 7 | __all__ = ['FPN', 'BFP', 'HRFPN', 'NASFPN', 'FPN_CARAFE'] 8 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .generalized_attention import GeneralizedAttention 2 | from .non_local import NonLocal2D 3 | 4 | __all__ = ['NonLocal2D', 'GeneralizedAttention'] 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | BACKBONES = Registry('backbone') 4 | NECKS = Registry('neck') 5 | ROI_EXTRACTORS = Registry('roi_extractor') 6 | SHARED_HEADS = Registry('shared_head') 7 | HEADS = Registry('head') 8 | LOSSES = Registry('loss') 9 | DETECTORS = Registry('detector') 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .single_level import SingleRoIExtractor 2 | 3 | __all__ = ['SingleRoIExtractor'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule, build_conv_layer 2 | from .conv_ws import ConvWS2d, conv_ws_2d 3 | from .norm import build_norm_layer 4 | from .scale import Scale 5 | from .upsample import build_upsample_layer 6 | from .weight_init import (bias_init_with_prob, kaiming_init, normal_init, 7 | uniform_init, xavier_init) 8 | 9 | __all__ = [ 10 | 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', 11 | 'build_norm_layer', 'build_upsample_layer', 'xavier_init', 'normal_init', 12 | 'uniform_init', 'kaiming_init', 'bias_init_with_prob', 'Scale' 13 | ] 14 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/utils/conv_ws.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | def conv_ws_2d(input, 6 | weight, 7 | bias=None, 8 | stride=1, 9 | padding=0, 10 | dilation=1, 11 | groups=1, 12 | eps=1e-5): 13 | c_in = weight.size(0) 14 | weight_flat = weight.view(c_in, -1) 15 | mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) 16 | std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) 17 | weight = (weight - mean) / (std + eps) 18 | return F.conv2d(input, weight, bias, stride, padding, dilation, groups) 19 | 20 | 21 | class ConvWS2d(nn.Conv2d): 22 | 23 | def __init__(self, 24 | in_channels, 25 | out_channels, 26 | kernel_size, 27 | stride=1, 28 | padding=0, 29 | dilation=1, 30 | groups=1, 31 | bias=True, 32 | eps=1e-5): 33 | super(ConvWS2d, self).__init__( 34 | in_channels, 35 | out_channels, 36 | kernel_size, 37 | stride=stride, 38 | padding=padding, 39 | dilation=dilation, 40 | groups=groups, 41 | bias=bias) 42 | self.eps = eps 43 | 44 | def forward(self, x): 45 | return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, 46 | self.dilation, self.groups, self.eps) 47 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/utils/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | """ 7 | A learnable scale parameter 8 | """ 9 | 10 | def __init__(self, scale=1.0): 11 | super(Scale, self).__init__() 12 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 13 | 14 | def forward(self, x): 15 | return x * self.scale 16 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/models/utils/weight_init.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | 4 | 5 | def xavier_init(module, gain=1, bias=0, distribution='normal'): 6 | assert distribution in ['uniform', 'normal'] 7 | if distribution == 'uniform': 8 | nn.init.xavier_uniform_(module.weight, gain=gain) 9 | else: 10 | nn.init.xavier_normal_(module.weight, gain=gain) 11 | if hasattr(module, 'bias'): 12 | nn.init.constant_(module.bias, bias) 13 | 14 | 15 | def normal_init(module, mean=0, std=1, bias=0): 16 | nn.init.normal_(module.weight, mean, std) 17 | if hasattr(module, 'bias'): 18 | nn.init.constant_(module.bias, bias) 19 | 20 | 21 | def uniform_init(module, a=0, b=1, bias=0): 22 | nn.init.uniform_(module.weight, a, b) 23 | if hasattr(module, 'bias'): 24 | nn.init.constant_(module.bias, bias) 25 | 26 | 27 | def kaiming_init(module, 28 | mode='fan_out', 29 | nonlinearity='relu', 30 | bias=0, 31 | distribution='normal'): 32 | assert distribution in ['uniform', 'normal'] 33 | if distribution == 'uniform': 34 | nn.init.kaiming_uniform_( 35 | module.weight, mode=mode, nonlinearity=nonlinearity) 36 | else: 37 | nn.init.kaiming_normal_( 38 | module.weight, mode=mode, nonlinearity=nonlinearity) 39 | if hasattr(module, 'bias'): 40 | nn.init.constant_(module.bias, bias) 41 | 42 | 43 | def bias_init_with_prob(prior_prob): 44 | """ initialize conv/fc bias value according to giving probablity""" 45 | bias_init = float(-np.log((1 - prior_prob) / prior_prob)) 46 | return bias_init 47 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .context_block import ContextBlock 2 | from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling, 3 | DeformRoIPoolingPack, ModulatedDeformConv, 4 | ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack, 5 | deform_conv, deform_roi_pooling, modulated_deform_conv) 6 | from .masked_conv import MaskedConv2d 7 | from .nms import nms, soft_nms 8 | from .roi_align import RoIAlign, roi_align 9 | from .roi_pool import RoIPool, roi_pool 10 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 11 | from .utils import get_compiler_version, get_compiling_cuda_version 12 | 13 | __all__ = [ 14 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 15 | 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 16 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 17 | 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', 18 | 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss', 19 | 'MaskedConv2d', 'ContextBlock', 'get_compiler_version', 20 | 'get_compiling_cuda_version' 21 | ] 22 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/carafe/__init__.py: -------------------------------------------------------------------------------- 1 | from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive 2 | 3 | __all__ = ['carafe', 'carafe_naive', 'CARAFE', 'CARAFENaive', 'CARAFEPack'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/carafe/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 4 | 5 | NVCC_ARGS = [ 6 | '-D__CUDA_NO_HALF_OPERATORS__', 7 | '-D__CUDA_NO_HALF_CONVERSIONS__', 8 | '-D__CUDA_NO_HALF2_OPERATORS__', 9 | ] 10 | 11 | setup( 12 | name='carafe', 13 | ext_modules=[ 14 | CUDAExtension( 15 | 'carafe_cuda', 16 | ['src/carafe_cuda.cpp', 'src/carafe_cuda_kernel.cu'], 17 | extra_compile_args={ 18 | 'cxx': [], 19 | 'nvcc': NVCC_ARGS 20 | }), 21 | CUDAExtension( 22 | 'carafe_naive_cuda', 23 | ['src/carafe_naive_cuda.cpp', 'src/carafe_naive_cuda_kernel.cu'], 24 | extra_compile_args={ 25 | 'cxx': [], 26 | 'nvcc': NVCC_ARGS 27 | }) 28 | ], 29 | cmdclass={'build_ext': BuildExtension}) 30 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/dcn/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, 2 | ModulatedDeformConvPack, deform_conv, 3 | modulated_deform_conv) 4 | from .deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, 5 | ModulatedDeformRoIPoolingPack, deform_roi_pooling) 6 | 7 | __all__ = [ 8 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 9 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 10 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 11 | 'deform_roi_pooling' 12 | ] 13 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .masked_conv import MaskedConv2d, masked_conv2d 2 | 3 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/nms/src/nms_cuda.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 5 | 6 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); 7 | 8 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 9 | CHECK_CUDA(dets); 10 | if (dets.numel() == 0) 11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 12 | return nms_cuda(dets, threshold); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("nms", &nms, "non-maximum suppression"); 17 | } -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align import RoIAlign, roi_align 2 | 3 | __all__ = ['roi_align', 'RoIAlign'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/roi_align/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import numpy as np 5 | import torch 6 | from torch.autograd import gradcheck 7 | 8 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 9 | from roi_align import RoIAlign # noqa: E402, isort:skip 10 | 11 | feat_size = 15 12 | spatial_scale = 1.0 / 8 13 | img_size = feat_size / spatial_scale 14 | num_imgs = 2 15 | num_rois = 20 16 | 17 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) 18 | rois = np.random.rand(num_rois, 4) * img_size * 0.5 19 | rois[:, 2:] += img_size * 0.5 20 | rois = np.hstack((batch_ind, rois)) 21 | 22 | feat = torch.randn( 23 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') 24 | rois = torch.from_numpy(rois).float().cuda() 25 | inputs = (feat, rois) 26 | print('Gradcheck for roi align...') 27 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) 28 | print(test) 29 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) 30 | print(test) 31 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_pool import RoIPool, roi_pool 2 | 3 | __all__ = ['roi_pool', 'RoIPool'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import torch 5 | from torch.autograd import gradcheck 6 | 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_pool import RoIPool # noqa: E402, isort:skip 9 | 10 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 11 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 12 | [1, 67, 40, 110, 120]]).cuda() 13 | inputs = (feat, rois) 14 | print('Gradcheck for roi pooling...') 15 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 16 | print(test) 17 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from . import compiling_info 2 | from .compiling_info import get_compiler_version, get_compiling_cuda_version 3 | 4 | # get_compiler_version = compiling_info.get_compiler_version 5 | # get_compiling_cuda_version = compiling_info.get_compiling_cuda_version 6 | 7 | __all__ = ['get_compiler_version', 'get_compiling_cuda_version'] 8 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/ops/utils/src/compiling_info.cpp: -------------------------------------------------------------------------------- 1 | // modified from 2 | // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/vision.cpp 3 | #include 4 | #include 5 | 6 | #ifdef WITH_CUDA 7 | int get_cudart_version() { return CUDART_VERSION; } 8 | #endif 9 | 10 | std::string get_compiling_cuda_version() { 11 | #ifdef WITH_CUDA 12 | std::ostringstream oss; 13 | 14 | // copied from 15 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 16 | auto printCudaStyleVersion = [&](int v) { 17 | oss << (v / 1000) << "." << (v / 10 % 100); 18 | if (v % 10 != 0) { 19 | oss << "." << (v % 10); 20 | } 21 | }; 22 | printCudaStyleVersion(get_cudart_version()); 23 | return oss.str(); 24 | #else 25 | return std::string("not available"); 26 | #endif 27 | } 28 | 29 | // similar to 30 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp 31 | std::string get_compiler_version() { 32 | std::ostringstream ss; 33 | #if defined(__GNUC__) 34 | #ifndef __clang__ 35 | { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } 36 | #endif 37 | #endif 38 | 39 | #if defined(__clang_major__) 40 | { 41 | ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." 42 | << __clang_patchlevel__; 43 | } 44 | #endif 45 | 46 | #if defined(_MSC_VER) 47 | { ss << "MSVC " << _MSC_FULL_VER; } 48 | #endif 49 | return ss.str(); 50 | } 51 | 52 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 53 | m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); 54 | m.def("get_compiling_cuda_version", &get_compiling_cuda_version, 55 | "get_compiling_cuda_version"); 56 | } 57 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .flops_counter import get_model_complexity_info 3 | from .logger import get_root_logger, print_log 4 | from .registry import Registry, build_from_cfg 5 | 6 | __all__ = [ 7 | 'Registry', 'build_from_cfg', 'get_model_complexity_info', 8 | 'get_root_logger', 'print_log', 'collect_env' 9 | ] 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/mmdet/utils/profiling.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import sys 3 | import time 4 | 5 | import torch 6 | 7 | if sys.version_info >= (3, 7): 8 | 9 | @contextlib.contextmanager 10 | def profile_time(trace_name, 11 | name, 12 | enabled=True, 13 | stream=None, 14 | end_stream=None): 15 | """Print time spent by CPU and GPU. 16 | 17 | Useful as a temporary context manager to find sweet spots of 18 | code suitable for async implementation. 19 | 20 | """ 21 | if (not enabled) or not torch.cuda.is_available(): 22 | yield 23 | return 24 | stream = stream if stream else torch.cuda.current_stream() 25 | end_stream = end_stream if end_stream else stream 26 | start = torch.cuda.Event(enable_timing=True) 27 | end = torch.cuda.Event(enable_timing=True) 28 | stream.record_event(start) 29 | try: 30 | cpu_start = time.monotonic() 31 | yield 32 | finally: 33 | cpu_end = time.monotonic() 34 | end_stream.record_event(end) 35 | end.synchronize() 36 | cpu_time = (cpu_end - cpu_start) * 1000 37 | gpu_time = start.elapsed_time(end) 38 | msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name, 39 | cpu_time) 40 | msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream) 41 | print(msg, end_stream) 42 | -------------------------------------------------------------------------------- /detectors/mmdetection/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /detectors/mmdetection/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/runtime.txt 2 | -r requirements/optional.txt 3 | -r requirements/tests.txt 4 | -r requirements/build.txt 5 | -------------------------------------------------------------------------------- /detectors/mmdetection/requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | numpy 3 | torch>=1.1 4 | -------------------------------------------------------------------------------- /detectors/mmdetection/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | imagecorruptions 3 | -------------------------------------------------------------------------------- /detectors/mmdetection/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmcv>=0.3.1 3 | numpy 4 | # need older pillow until torchvision is fixed 5 | Pillow<=6.2.2 6 | six 7 | terminaltables 8 | torch>=1.1 9 | torchvision 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | isort 5 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 6 | kwarray 7 | pytest 8 | pytest-cov 9 | pytest-runner 10 | ubelt 11 | xdoctest >= 0.10.0 12 | yapf 13 | -------------------------------------------------------------------------------- /detectors/mmdetection/tests/test_soft_nms.py: -------------------------------------------------------------------------------- 1 | """ 2 | CommandLine: 3 | pytest tests/test_soft_nms.py 4 | """ 5 | import numpy as np 6 | import torch 7 | 8 | from mmdet.ops.nms.nms_wrapper import soft_nms 9 | 10 | 11 | def test_soft_nms_device_and_dtypes_cpu(): 12 | """ 13 | CommandLine: 14 | xdoctest -m tests/test_soft_nms.py test_soft_nms_device_and_dtypes_cpu 15 | """ 16 | iou_thr = 0.7 17 | base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], 18 | [49.3, 32.9, 51.0, 35.3, 0.9], 19 | [35.3, 11.5, 39.9, 14.5, 0.4], 20 | [35.2, 11.7, 39.7, 15.7, 0.3]]) 21 | 22 | # CPU can handle float32 and float64 23 | dets = base_dets.astype(np.float32) 24 | new_dets, inds = soft_nms(dets, iou_thr) 25 | assert dets.dtype == new_dets.dtype 26 | assert len(inds) == len(new_dets) == 4 27 | 28 | dets = torch.FloatTensor(base_dets) 29 | new_dets, inds = soft_nms(dets, iou_thr) 30 | assert dets.dtype == new_dets.dtype 31 | assert len(inds) == len(new_dets) == 4 32 | 33 | dets = base_dets.astype(np.float64) 34 | new_dets, inds = soft_nms(dets, iou_thr) 35 | assert dets.dtype == new_dets.dtype 36 | assert len(inds) == len(new_dets) == 4 37 | 38 | dets = torch.DoubleTensor(base_dets) 39 | new_dets, inds = soft_nms(dets, iou_thr) 40 | assert dets.dtype == new_dets.dtype 41 | assert len(inds) == len(new_dets) == 4 42 | -------------------------------------------------------------------------------- /detectors/mmdetection/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy.testing as npt 2 | 3 | from mmdet.utils.flops_counter import params_to_string 4 | 5 | 6 | def test_params_to_string(): 7 | npt.assert_equal(params_to_string(1e9), '1000.0 M') 8 | npt.assert_equal(params_to_string(2e5), '200.0 k') 9 | npt.assert_equal(params_to_string(3e-9), '3e-09') 10 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | CHECKPOINT=$2 7 | GPUS=$3 8 | PORT=${PORT:-29500} 9 | 10 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 11 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 12 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | GPUS=$2 7 | PORT=${PORT:-29500} 8 | 9 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 11 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/publish_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import subprocess 3 | 4 | import torch 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser( 9 | description='Process a checkpoint to be published') 10 | parser.add_argument('in_file', help='input checkpoint filename') 11 | parser.add_argument('out_file', help='output checkpoint filename') 12 | args = parser.parse_args() 13 | return args 14 | 15 | 16 | def process_checkpoint(in_file, out_file): 17 | checkpoint = torch.load(in_file, map_location='cpu') 18 | # remove optimizer for smaller file size 19 | if 'optimizer' in checkpoint: 20 | del checkpoint['optimizer'] 21 | # if it is necessary to remove some sensitive data in checkpoint['meta'], 22 | # add the code here. 23 | torch.save(checkpoint, out_file) 24 | sha = subprocess.check_output(['sha256sum', out_file]).decode() 25 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) 26 | subprocess.Popen(['mv', out_file, final_file]) 27 | 28 | 29 | def main(): 30 | args = parse_args() 31 | process_checkpoint(args.in_file, args.out_file) 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/reshape_faster_rcnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | pretrained_weights = torch.load('/home/koehlp/Downloads/work_dirs/detector/faster_rcnn_x101/faster_rcnn_x101_64x4d_fpn_1x_20181218-c9c69c8f.pth') 5 | 6 | num_class = 2 7 | pretrained_weights['state_dict']['bbox_head.fc_cls.weight'].resize_(num_class, 1024) 8 | pretrained_weights['state_dict']['bbox_head.fc_cls.bias'].resize_(num_class) 9 | pretrained_weights['state_dict']['bbox_head.fc_reg.weight'].resize_(num_class*4, 1024) 10 | pretrained_weights['state_dict']['bbox_head.fc_reg.bias'].resize_(num_class*4) 11 | 12 | #You still need to add a hash 13 | torch.save(pretrained_weights, "/home/koehlp/Downloads/work_dirs/detector/faster_rcnn_x101/faster_rcnn_x101_64x4d_fpn_1x_20181218-c9c69c8f_2cls.pth") -------------------------------------------------------------------------------- /detectors/mmdetection/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${5:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${PY_ARGS:-"--validate"} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /detectors/mmdetection/tools/upgrade_model_version.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import re 3 | from collections import OrderedDict 4 | 5 | import torch 6 | 7 | 8 | def convert(in_file, out_file): 9 | """Convert keys in checkpoints. 10 | 11 | There can be some breaking changes during the development of mmdetection, 12 | and this tool is used for upgrading checkpoints trained with old versions 13 | to the latest one. 14 | """ 15 | checkpoint = torch.load(in_file) 16 | in_state_dict = checkpoint.pop('state_dict') 17 | out_state_dict = OrderedDict() 18 | for key, val in in_state_dict.items(): 19 | # Use ConvModule instead of nn.Conv2d in RetinaNet 20 | # cls_convs.0.weight -> cls_convs.0.conv.weight 21 | m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) 22 | if m is not None: 23 | param = m.groups()[1] 24 | new_key = key.replace(param, 'conv.{}'.format(param)) 25 | out_state_dict[new_key] = val 26 | continue 27 | 28 | out_state_dict[key] = val 29 | checkpoint['state_dict'] = out_state_dict 30 | torch.save(checkpoint, out_file) 31 | 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser(description='Upgrade model version') 35 | parser.add_argument('in_file', help='input checkpoint file') 36 | parser.add_argument('out_file', help='output checkpoint file') 37 | args = parser.parse_args() 38 | convert(args.in_file, args.out_file) 39 | 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/__init__.py -------------------------------------------------------------------------------- /evaluation/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /evaluation/__pycache__/motmetrics_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/__pycache__/motmetrics_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /evaluation/__pycache__/multicam_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/__pycache__/multicam_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /evaluation/py_motmetrics/.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows Installer files 12 | *.cab 13 | *.msi 14 | *.msm 15 | *.msp 16 | 17 | # Windows shortcuts 18 | *.lnk 19 | 20 | # ========================= 21 | # Operating System Files 22 | # ========================= 23 | 24 | # OSX 25 | # ========================= 26 | 27 | .DS_Store 28 | .AppleDouble 29 | .LSOverride 30 | 31 | # Thumbnails 32 | ._* 33 | 34 | # Files that might appear in the root of a volume 35 | .DocumentRevisions-V100 36 | .fseventsd 37 | .Spotlight-V100 38 | .TemporaryItems 39 | .Trashes 40 | .VolumeIcon.icns 41 | 42 | # Directories potentially created on remote AFP share 43 | .AppleDB 44 | .AppleDesktop 45 | Network Trash Folder 46 | Temporary Items 47 | .apdisk 48 | *.pyc 49 | .cache/v/cache/lastfailed 50 | *.egg-info/ 51 | build/ 52 | dist/ 53 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | env: 3 | - PYTHON=3.5 PANDAS>=0.19.2 4 | - PYTHON=3.6 PANDAS>=0.19.2 5 | install: 6 | # Install conda 7 | - if [[ "$PYTHON" == "2.7" ]]; then 8 | wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; 9 | else 10 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 11 | fi 12 | - bash miniconda.sh -b -p $HOME/miniconda 13 | - export PATH="$HOME/miniconda/bin:$PATH" 14 | - hash -r 15 | - conda config --set always_yes yes --set changeps1 no 16 | - conda config --add channels pandas 17 | - conda update -q conda 18 | - conda info -a 19 | 20 | # Install deps 21 | - deps='pip numpy scipy cython' 22 | - conda create -q -n pyenv python=$PYTHON pandas=$PANDAS $deps 23 | - source activate pyenv 24 | - python -m pip install -U pip 25 | - pip install pytest 26 | - pip install . 27 | - pip install ortools 28 | 29 | script: pytest -------------------------------------------------------------------------------- /evaluation/py_motmetrics/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | MAINTAINER Avgerinos Christos 4 | 5 | #ARG GT_DIR 6 | #ARG TEST_DIR 7 | 8 | RUN apt-get update \ 9 | && apt-get install -y python3-pip python3-dev vim \ 10 | && cd /usr/local/bin \ 11 | && ln -s /usr/bin/python3 python \ 12 | && pip3 install --upgrade pip 13 | 14 | RUN pip3 install --no-cache-dir numpy scipy 15 | RUN pip install -Iv pandas==0.21.0 16 | RUN mkdir -p /motmetrics/py-motmetrics 17 | RUN mkdir -p /motmetrics/2DMOT2015 18 | 19 | COPY ./py-motmetrics /motmetrics/py-motmetrics 20 | COPY ./data /motmetrics/data 21 | 22 | #RUN pip install motmetrics 23 | RUN pip install -e ./motmetrics/py-motmetrics/ 24 | 25 | #RUN pip install -r motmetrics/py-motmetrics/requirements.txt 26 | 27 | ENV GT_DIR motmetrics/data/train/ 28 | ENV TEST_DIR motmetrics/data/test/ 29 | 30 | #ENTRYPOINT python3 -m motmetrics.apps.eval_motchallenge motmetrics/data/train/ motmetrics/data/test/ && /bin/bash 31 | CMD ["sh", "-c", "python3 -m motmetrics.apps.eval_motchallenge ${GT_DIR} ${TEST_DIR} && /bin/bash"] 32 | 33 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Christoph Heindl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /evaluation/py_motmetrics/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md LICENSE requirements.txt 2 | recursive-include motmetrics/data * 3 | recursive-include motmetrics/etc * -------------------------------------------------------------------------------- /evaluation/py_motmetrics/Release.md: -------------------------------------------------------------------------------- 1 | 2 | ## Release 3 | 4 | - git flow release start 5 | - version bump motmetrics/__init__.py 6 | - conda env create -f environment.yml 7 | - activate motmetrics-env 8 | - [pip install lapsolver] 9 | - pip install . 10 | - pytest 11 | - deactivate 12 | - conda env remove -n motmetrics-env 13 | - git add, commit 14 | - git flow release finish 15 | - git push 16 | - git push --tags 17 | - git checkout master 18 | - git push 19 | - git checkout develop 20 | - check appveyor, travis and pypi -------------------------------------------------------------------------------- /evaluation/py_motmetrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/py_motmetrics/__init__.py -------------------------------------------------------------------------------- /evaluation/py_motmetrics/environment.yml: -------------------------------------------------------------------------------- 1 | # conda env create -f environment.yml 2 | name: motmetrics-env 3 | dependencies: 4 | - python=3.6 5 | - numpy 6 | - scipy 7 | - pandas 8 | - pip 9 | - pip: 10 | - pytest -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .mot import MOTAccumulator 3 | import motmetrics.lap 4 | import motmetrics.metrics 5 | import motmetrics.distances 6 | import motmetrics.io 7 | import motmetrics.utils 8 | 9 | 10 | # Needs to be last line 11 | __version__ = '1.1.3' -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/apps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/py_motmetrics/motmetrics/apps/__init__.py -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/apps/list_metrics.py: -------------------------------------------------------------------------------- 1 | """py-motmetrics - metrics for multiple object tracker (MOT) benchmarking. 2 | 3 | Christoph Heindl, 2017 4 | https://github.com/cheind/py-motmetrics 5 | """ 6 | 7 | if __name__ == '__main__': 8 | import motmetrics 9 | 10 | mh = motmetrics.metrics.create() 11 | print(mh.list_metrics_markdown()) -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/data/iotest/motchallenge.txt: -------------------------------------------------------------------------------- 1 | 1,1,399,182,121,229,1,-1,-1,-1 2 | 1,2,282,201,92,184,1,-1,-1,-1 3 | 2,2,269,202,87,182,1,-1,-1,-1 4 | 2,3,71,151,100,284,1,-1,-1,-1 5 | 2,4,200,206,55,137,1,-1,-1,-1 -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/data/iotest/vatic.txt: -------------------------------------------------------------------------------- 1 | 0 412 0 842 124 0 0 0 0 "worker" 2 | 0 412 10 842 124 1 0 0 1 "pc" "attr1" "attr3" 3 | 1 412 0 842 124 1 0 0 1 "pc" "attr2" 4 | 2 412 0 842 124 2 0 0 1 "worker" "attr4" "attr1" "attr2" 5 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/etc/mot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/py_motmetrics/motmetrics/etc/mot.png -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/evaluation/py_motmetrics/motmetrics/tests/__init__.py -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/tests/test_distances.py: -------------------------------------------------------------------------------- 1 | from pytest import approx 2 | import numpy as np 3 | 4 | import motmetrics as mm 5 | 6 | def test_norm2squared(): 7 | a = np.array([ 8 | [1., 2], 9 | [2., 2], 10 | [3., 2], 11 | ]) 12 | 13 | b = np.array([ 14 | [0., 0], 15 | [1., 1], 16 | ]) 17 | 18 | C = mm.distances.norm2squared_matrix(a, b) 19 | np.testing.assert_allclose( 20 | C, 21 | [ 22 | [5, 1], 23 | [8, 2], 24 | [13, 5] 25 | ] 26 | ) 27 | 28 | C = mm.distances.norm2squared_matrix(a, b, max_d2=5) 29 | np.testing.assert_allclose( 30 | C, 31 | [ 32 | [5, 1], 33 | [np.nan, 2], 34 | [np.nan, 5] 35 | ] 36 | ) 37 | 38 | def test_norm2squared_empty(): 39 | a = [] 40 | b = np.array([[0., 0],[1., 1]]) 41 | C = mm.distances.norm2squared_matrix(a, b) 42 | assert C.size == 0 43 | C = mm.distances.norm2squared_matrix(b, a) 44 | assert C.size == 0 45 | 46 | def test_iou_matrix(): 47 | a = np.array([ 48 | [0, 0, 1, 2], 49 | ]) 50 | 51 | b = np.array([ 52 | [0, 0, 1, 2], 53 | [0, 0, 1, 1], 54 | [1, 1, 1, 1], 55 | [0.5, 0, 1, 1], 56 | [0, 1, 1, 1], 57 | ]) 58 | np.testing.assert_allclose( 59 | mm.distances.iou_matrix(a, b), 60 | [[0, 0.5, 1, 0.8, 0.5]], 61 | atol=1e-4 62 | ) 63 | 64 | np.testing.assert_allclose( 65 | mm.distances.iou_matrix(a, b, max_iou=0.5), 66 | [[0, 0.5, np.nan, np.nan, 0.5]], 67 | atol=1e-4 68 | ) 69 | 70 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/tests/test_io.py: -------------------------------------------------------------------------------- 1 | import motmetrics as mm 2 | import pandas as pd 3 | import os 4 | 5 | DATA_DIR = os.path.join(os.path.dirname(__file__), '../data') 6 | 7 | def test_load_vatic(): 8 | df = mm.io.loadtxt(os.path.join(DATA_DIR, 'iotest/vatic.txt'), fmt=mm.io.Format.VATIC_TXT) 9 | 10 | expected = pd.DataFrame([ 11 | #F,ID,Y,W,H,L,O,G,F,A1,A2,A3,A4 12 | (0,0,412,0,430,124,0,0,0,'worker',0,0,0,0), 13 | (1,0,412,10,430,114,0,0,1,'pc',1,0,1,0), 14 | (1,1,412,0,430,124,0,0,1,'pc',0,1,0,0), 15 | (2,2,412,0,430,124,0,0,1,'worker',1,1,0,1) 16 | ]) 17 | 18 | assert (df.reset_index().values == expected.values).all() 19 | 20 | def test_load_motchallenge(): 21 | df = mm.io.loadtxt(os.path.join(DATA_DIR, 'iotest/motchallenge.txt'), fmt=mm.io.Format.MOT15_2D) 22 | 23 | expected = pd.DataFrame([ 24 | (1,1,398,181,121,229,1,-1,-1), #Note -1 on x and y for correcting matlab 25 | (1,2,281,200,92,184,1,-1,-1), 26 | (2,2,268,201,87,182,1,-1,-1), 27 | (2,3,70,150,100,284,1,-1,-1), 28 | (2,4,199,205,55,137,1,-1,-1), 29 | ]) 30 | 31 | assert (df.reset_index().values == expected.values).all() -------------------------------------------------------------------------------- /evaluation/py_motmetrics/motmetrics/tests/test_lap.py: -------------------------------------------------------------------------------- 1 | from pytest import approx 2 | import numpy as np 3 | import motmetrics.lap as lap 4 | 5 | 6 | def test_lap_solvers(): 7 | assert len(lap.available_solvers) > 0 8 | print(lap.available_solvers) 9 | 10 | costs = np.array([[6, 9, 1],[10, 3, 2],[8, 7, 4.]]) 11 | costs_copy = costs.copy() 12 | results = [lap.linear_sum_assignment(costs, solver=s) for s in lap.available_solvers] 13 | expected = np.array([[0, 1, 2], [2, 1, 0]]) 14 | [np.testing.assert_allclose(r, expected) for r in results] 15 | np.testing.assert_allclose(costs, costs_copy) 16 | 17 | 18 | costs = np.array([[5, 9, np.nan],[10, np.nan, 2],[8, 7, 4.]]) 19 | costs_copy = costs.copy() 20 | results = [lap.linear_sum_assignment(costs, solver=s) for s in lap.available_solvers] 21 | expected = np.array([[0, 1, 2], [0, 2, 1]]) 22 | [np.testing.assert_allclose(r, expected) for r in results] 23 | np.testing.assert_allclose(costs, costs_copy) 24 | 25 | def test_change_solver(): 26 | 27 | def mysolver(x): 28 | mysolver.called += 1 29 | return None, None 30 | mysolver.called = 0 31 | 32 | costs = np.array([[6, 9, 1],[10, 3, 2],[8, 7, 4.]]) 33 | 34 | with lap.set_default_solver(mysolver): 35 | rids, cids = lap.linear_sum_assignment(costs) 36 | assert mysolver.called == 1 37 | rids, cids = lap.linear_sum_assignment(costs) 38 | assert mysolver.called == 1 39 | 40 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.12.1 2 | pandas>=0.23.1 3 | scipy>=0.19.0 4 | -------------------------------------------------------------------------------- /evaluation/py_motmetrics/setup.py: -------------------------------------------------------------------------------- 1 | """py-motmetrics - metrics for multiple object tracker (MOT) benchmarking. 2 | 3 | Christoph Heindl, 2017 4 | https://github.com/cheind/py-motmetrics 5 | """ 6 | 7 | import os 8 | try: 9 | from setuptools import setup 10 | except ImportError: 11 | from distutils.core import setup 12 | 13 | with open('requirements.txt') as f: 14 | required = f.read().splitlines() 15 | 16 | setup( 17 | name='motmetrics', 18 | version=open('motmetrics/__init__.py').readlines()[-1].split()[-1].strip('\''), 19 | description='Metrics for multiple object tracker benchmarking.', 20 | author='Christoph Heindl', 21 | url='https://github.com/cheind/py-motmetrics', 22 | license='MIT', 23 | install_requires=required, 24 | packages=['motmetrics', 'motmetrics.tests', 'motmetrics.apps'], 25 | include_package_data=True, 26 | keywords='tracker MOT evaluation metrics compare' 27 | ) -------------------------------------------------------------------------------- /feature_extractors/.gitignore: -------------------------------------------------------------------------------- 1 | *.pth 2 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Kaiyang Zhou 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/doc_images/Arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/doc_images/Arch.png -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/doc_images/JET_VIS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/doc_images/JET_VIS.png -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/doc_images/att.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/doc_images/att.png -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/doc_images/qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/doc_images/qr.png -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython 2 | h5py 3 | numpy 4 | Pillow 5 | scipy>=1.0.0 6 | torch>=0.4.1 7 | torchvision>=0.2.1 -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | deep-person-reid 3 | == 4 | 5 | Description: PyTorch implementation of deep person re-identification models. 6 | 7 | Github page: https://github.com/KaiyangZhou/deep-person-reid 8 | """ 9 | 10 | __author__ = 'Kaiyang Zhou' 11 | __email__ = 'k.zhou@qmul.ac.uk' -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/torchreid/components/__init__.py -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/components/shallow_cam.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .attention import CAM_Module 4 | 5 | __all__ = ['ShallowCAM'] 6 | 7 | class ShallowCAM(nn.Module): 8 | 9 | def __init__(self, args, feature_dim: int): 10 | 11 | super().__init__() 12 | self.input_feature_dim = feature_dim 13 | 14 | use = args['shallow_cam'] 15 | 16 | if use: 17 | self._cam_module = cam_module = CAM_Module(self.input_feature_dim) 18 | 19 | if args['compatibility']: 20 | self._cam_module_abc = cam_module # Forward Compatibility 21 | else: 22 | self._cam_module = None 23 | 24 | def forward(self, x): 25 | 26 | if self._cam_module is not None: 27 | x = self._cam_module(x) 28 | 29 | return x 30 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/eval_cylib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python setup.py build_ext --inplace 3 | rm -rf build 4 | 5 | clean: 6 | rm -rf build 7 | rm -f eval_metrics_cy.c *.so -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/eval_cylib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/torchreid/eval_cylib/__init__.py -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/eval_cylib/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy as np 5 | 6 | 7 | def numpy_include(): 8 | try: 9 | numpy_include = np.get_include() 10 | except AttributeError: 11 | numpy_include = np.get_numpy_include() 12 | return numpy_include 13 | 14 | ext_modules = [ 15 | Extension('eval_metrics_cy', 16 | ['eval_metrics_cy.pyx'], 17 | include_dirs=[numpy_include()], 18 | ) 19 | ] 20 | 21 | setup( 22 | name='Cython-based reid evaluation code', 23 | ext_modules=cythonize(ext_modules) 24 | ) -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from .cross_entropy_loss import CrossEntropyLoss 6 | from .hard_mine_triplet_loss import TripletLoss 7 | from .center_loss import CenterLoss 8 | from .ring_loss import RingLoss 9 | 10 | 11 | def DeepSupervision(criterion, xs, y): 12 | """ 13 | Args: 14 | - criterion: loss function 15 | - xs: tuple of inputs 16 | - y: ground truth 17 | """ 18 | loss = 0. 19 | for x in xs: 20 | loss += criterion(x, y) 21 | loss /= len(xs) 22 | return loss -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/incidence_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | from torchreid.utils.environ import get_env_or_raise 7 | 8 | THRESHOLD = get_env_or_raise(float, 'IL_THRESHOLD') 9 | NORM = get_env_or_raise(str, 'IL_NORM') 10 | print(THRESHOLD, NORM) 11 | 12 | 13 | class IncidenceLoss(nn.Module): 14 | 15 | def forward(self, x, pids): 16 | 17 | _, _, features, _ = x 18 | features: 'N x C x W x H' 19 | 20 | W = features.view(features.size(0), -1) 21 | W = F.normalize(W, p=2, dim=1) 22 | WWT = W @ W.permute(1, 0) 23 | 24 | targets = pids.data.cpu().numpy() 25 | # print(list(targets)) 26 | # print('Kinds:', len(set(list(targets)))) 27 | A = [ 28 | [1. if i == j else 0. for j in targets] 29 | for i in targets 30 | ] 31 | A = torch.tensor(A, requires_grad=True).cuda() 32 | # print('WWT', WWT.size()) 33 | # print('A', A.size()) 34 | # 35 | 36 | W = WWT - A 37 | 38 | norm_p = {'inf': float('inf'), 'l2': 2}[NORM] 39 | 40 | W = torch.clamp(torch.abs(W), min=THRESHOLD) 41 | 42 | p = torch.norm(W, p=norm_p, dim=1) 43 | return p.sum() 44 | 45 | # return ((WWT - A)**2).sum() ** (1 / 2) 46 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/incidence_xent_loss.py: -------------------------------------------------------------------------------- 1 | from .incidence_loss import IncidenceLoss 2 | from .cross_entropy_loss import CrossEntropyLoss 3 | 4 | from torchreid.utils.environ import get_env_or_raise 5 | 6 | 7 | def IncidenceXentLoss(num_classes, use_gpu=True, label_smooth=True): 8 | 9 | beta = get_env_or_raise(float, 'incidence_beta', 'beta') 10 | incidence_loss = IncidenceLoss() 11 | xent_loss = CrossEntropyLoss(num_classes, use_gpu=use_gpu, label_smooth=label_smooth) 12 | 13 | def _loss(x, pids): 14 | 15 | return ( 16 | xent_loss(x[1], pids) + 17 | beta * incidence_loss(x, pids) 18 | ) 19 | 20 | return _loss 21 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/lowrank_loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | import os 8 | 9 | from .cross_entropy_loss import CrossEntropyLoss 10 | 11 | CONSTRAINT_WEIGHTS = os.environ.get('constraint_weights') is not None 12 | 13 | 14 | class LowRankLoss(nn.Module): 15 | 16 | def __init__(self, num_classes, *, use_gpu=True, label_smooth=True, beta=None): 17 | super().__init__() 18 | 19 | os_beta = None 20 | 21 | try: 22 | os_beta = float(os.environ.get('beta')) 23 | except ValueError: 24 | raise RuntimeError('No beta specified. ABORTED.') 25 | 26 | self.beta = beta if not os_beta else os_beta 27 | self.xent_loss = CrossEntropyLoss(num_classes, use_gpu, label_smooth) 28 | 29 | def forward(self, inputs, pids): 30 | 31 | x, y, _, weights = inputs 32 | 33 | if CONSTRAINT_WEIGHTS: 34 | height, width = weights.size() 35 | batches = 1 36 | channels = height 37 | W = weights.view(1, height, width) 38 | else: 39 | batches, channels, height, width = x.size() 40 | W = x.view(batches, channels, -1) 41 | WT = W.permute(0, 2, 1) 42 | WWT = torch.bmm(W, WT) 43 | I = torch.eye(channels).expand(batches, channels, channels).cuda() # noqa 44 | delta = WWT - I 45 | norm = torch.norm(delta.view(batches, -1), 2, 1) ** 2 46 | return norm.sum() * self.beta + self.xent_loss(y, pids) 47 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/ring_loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | 4 | import warnings 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class RingLoss(nn.Module): 11 | """Ring loss. 12 | 13 | Reference: 14 | Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. 15 | """ 16 | def __init__(self): 17 | super(RingLoss, self).__init__() 18 | warnings.warn("This method is deprecated") 19 | self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) 20 | 21 | def forward(self, x): 22 | loss = ((x.norm(p=2, dim=1) - self.radius)**2).mean() 23 | return loss -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/sa_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | 4 | 5 | def sa_loss(features_dict): 6 | 7 | if os.environ.get('sa'): 8 | layer3, layer4_1, layer4_2 = features_dict['layers'] 9 | 10 | layer3 = torch.norm(layer3, dim=1, p=2) ** 2 / 1024 11 | layer3 = layer3.view(layer3.size(0), -1) 12 | layer4_1 = torch.norm(layer4_1, dim=1, p=2) ** 2 / 2048 13 | layer4_1 = layer4_1.view(layer4_1.size(0), -1) 14 | # layer4_2 = torch.norm(layer4_2, dim=1, p=2) ** 2 / 2048 15 | # layer4_2 = layer4_2.view(layer4_2.size(0), -1) 16 | 17 | as_loss = (((layer3 - layer4_1) ** 2).sum()) * .1 18 | print(as_loss) 19 | else: 20 | as_loss = 0. 21 | 22 | return as_loss 23 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/losses/singular_triplet_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .wrapped_triplet_loss import WrappedTripletLoss 3 | from .singular_loss import SingularLoss 4 | 5 | import os 6 | 7 | 8 | def SingularTripletLoss(num_classes: int, use_gpu: bool, args, param_controller) -> 'func': 9 | 10 | xent_loss = SingularLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position) 11 | htri_loss = WrappedTripletLoss(num_classes, use_gpu, args, param_controller, htri_only=True) 12 | 13 | def _loss(x, pids): 14 | 15 | _, y, v, features_dict = x 16 | 17 | from .sa_loss import sa_loss 18 | 19 | sa_loss_value = sa_loss(features_dict) 20 | 21 | loss = ( 22 | args.lambda_xent * xent_loss(x, pids) + 23 | args.lambda_htri * htri_loss(x, pids) * param_controller.get_value() 24 | ) 25 | 26 | return loss + sa_loss_value 27 | 28 | return _loss 29 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/optimizers.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | 6 | def init_optimizer(params, 7 | optim='adam', 8 | lr=0.003, 9 | weight_decay=5e-4, 10 | momentum=0.9, # momentum factor for sgd and rmsprop 11 | sgd_dampening=0, # sgd's dampening for momentum 12 | sgd_nesterov=False, # whether to enable sgd's Nesterov momentum 13 | rmsprop_alpha=0.99, # rmsprop's smoothing constant 14 | adam_beta1=0.9, # exponential decay rate for adam's first moment 15 | adam_beta2=0.999 # # exponential decay rate for adam's second moment 16 | ): 17 | if optim == 'adam': 18 | return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay, 19 | betas=(adam_beta1, adam_beta2)) 20 | 21 | elif optim == 'amsgrad': 22 | return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay, 23 | betas=(adam_beta1, adam_beta2), amsgrad=True) 24 | 25 | elif optim == 'sgd': 26 | return torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay, 27 | dampening=sgd_dampening, nesterov=sgd_nesterov) 28 | 29 | elif optim == 'rmsprop': 30 | return torch.optim.RMSprop(params, lr=lr, momentum=momentum, weight_decay=weight_decay, 31 | alpha=rmsprop_alpha) 32 | 33 | else: 34 | raise ValueError("Unsupported optimizer: {}".format(optim)) -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/regularizers/LSVO.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/torchreid/regularizers/LSVO.py -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/regularizers/NR.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class NoneRegularizer(nn.Module): 6 | 7 | def __init__(self, *args, **kwargs): 8 | super().__init__() 9 | 10 | def forward(self, _): 11 | return torch.tensor(0.0).cuda() 12 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/regularizers/SO.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | import os 8 | 9 | w_rate = 1e-4 10 | 11 | 12 | class SORegularizer(nn.Module): 13 | 14 | def __init__(self, controller): 15 | super().__init__() 16 | 17 | os_beta = None 18 | 19 | # try: 20 | # os_beta = float(os.environ.get('beta')) 21 | # except (ValueError, TypeError): 22 | # raise RuntimeError('No beta specified. ABORTED.') 23 | # self.beta = os_beta 24 | self.param_controller = controller 25 | 26 | def dominant_eigenvalue(self, A: 'N x N'): 27 | 28 | N, _ = A.size() 29 | x = torch.rand(N, 1, device='cuda') 30 | 31 | Ax = (A @ x).squeeze() 32 | AAx = (A @ Ax).squeeze() 33 | 34 | return torch.norm(AAx, p=2) / torch.norm(Ax, p=2) 35 | 36 | def forward(self, W: 'C x S x H x W'): 37 | 38 | # old_W = W 39 | old_size = W.size() 40 | 41 | W = W.view(old_size[0], -1).permute(1, 0) 42 | # W = W.permute(2, 3, 0, 1).view(old_size[0] * old_size[2] * old_size[3], old_size[1]) 43 | 44 | d_ev = self.dominant_eigenvalue( 45 | W.permute(1, 0) @ W - torch.eye(old_size[0], device='cuda') 46 | ) 47 | return ( 48 | self.param_controller.get_value() * d_ev 49 | ).squeeze() 50 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/regularizers/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .NR import NoneRegularizer 5 | from .SVMO import SVMORegularizer 6 | from .SVDO import SVDORegularizer 7 | from .SO import SORegularizer 8 | 9 | mapping = { 10 | False: NoneRegularizer, 11 | True: SVMORegularizer, 12 | } 13 | 14 | 15 | class ConvRegularizer(nn.Module): 16 | 17 | def __init__(self, klass, controller): 18 | super().__init__() 19 | self.reg_instance = klass(controller) 20 | 21 | def get_all_conv_layers(self, module): 22 | 23 | if isinstance(module, (nn.Sequential, list)): 24 | for m in module: 25 | yield from self.get_all_conv_layers(m) 26 | 27 | if isinstance(module, nn.Conv2d): 28 | yield module 29 | 30 | def forward(self, net, ignore=False): 31 | 32 | accumulator = torch.tensor(0.0).cuda() 33 | 34 | if ignore: 35 | return accumulator 36 | 37 | for conv in self.get_all_conv_layers(net.module.backbone_modules()): 38 | accumulator += self.reg_instance(conv.weight) 39 | 40 | # print(accumulator.data) 41 | return accumulator 42 | 43 | 44 | def get_regularizer(args): 45 | 46 | name = args['use_ow'] 47 | 48 | return ConvRegularizer(mapping[name], args) 49 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/regularizers/param_controller.py: -------------------------------------------------------------------------------- 1 | class ParamController: 2 | 3 | def __init__(self, initial_value=0.01): 4 | 5 | self._value = initial_value 6 | self._epoch = 0 7 | 8 | def set_epoch(self, epoch): 9 | 10 | self._epoch = epoch 11 | 12 | def get_value(self): 13 | 14 | import os 15 | if os.environ.get('reg_const') is not None: 16 | return self._value 17 | 18 | if self._epoch <= 20: 19 | return self._value 20 | elif self._epoch <= 60: 21 | return self._value * 1e-3 22 | 23 | return self._value 24 | 25 | 26 | class HtriParamController: 27 | 28 | def __init__(self, initial_value=1.): 29 | 30 | self._value = initial_value 31 | self._epoch = 0 32 | 33 | def set_epoch(self, epoch): 34 | 35 | self._epoch = epoch 36 | 37 | def get_value(self): 38 | 39 | import os 40 | 41 | try: 42 | decay_to = float(os.environ.get('htri_decay')) 43 | except (TypeError, ValueError): 44 | return self._value 45 | 46 | if self._epoch > 100: 47 | return decay_to 48 | else: 49 | return self._value + (decay_to - self._value) * (self._epoch / 100) 50 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/ABD_Net/torchreid/utils/__init__.py -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/utils/avgmeter.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | 4 | 5 | class AverageMeter(object): 6 | """Computes and stores the average and current value. 7 | 8 | Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262 9 | """ 10 | def __init__(self): 11 | self.reset() 12 | 13 | def reset(self): 14 | self.val = 0 15 | self.avg = 0 16 | self.sum = 0 17 | self.count = 0 18 | 19 | def update(self, val, n=1): 20 | self.val = val 21 | self.sum += val * n 22 | self.count += n 23 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/utils/environ.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def get_env_or_raise(checker, *names): 5 | 6 | for name in names: 7 | try: 8 | value = checker(os.environ.get(name)) 9 | except Exception as e: 10 | print(e) 11 | else: 12 | return value 13 | 14 | raise RuntimeError('No suitable envvar found in names {}'.format(names)) 15 | -------------------------------------------------------------------------------- /feature_extractors/ABD_Net/torchreid/utils/iotools.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import os 4 | import os.path as osp 5 | import errno 6 | import json 7 | import shutil 8 | 9 | import torch 10 | 11 | 12 | def mkdir_if_missing(directory): 13 | if not osp.exists(directory): 14 | try: 15 | os.makedirs(directory) 16 | except OSError as e: 17 | if e.errno != errno.EEXIST: 18 | raise 19 | 20 | 21 | def check_isfile(path): 22 | isfile = osp.isfile(path) 23 | if not isfile: 24 | print("=> Warning: no file found at '{}' (ignored)".format(path)) 25 | return isfile 26 | 27 | 28 | def read_json(fpath): 29 | with open(fpath, 'r') as f: 30 | obj = json.load(f) 31 | return obj 32 | 33 | 34 | def write_json(obj, fpath): 35 | mkdir_if_missing(osp.dirname(fpath)) 36 | with open(fpath, 'w') as f: 37 | json.dump(obj, f, indent=4, separators=(',', ': ')) 38 | 39 | 40 | def save_checkpoint(state, is_best=False, fpath='checkpoint.pth.tar'): 41 | if len(osp.dirname(fpath)) != 0: 42 | mkdir_if_missing(osp.dirname(fpath)) 43 | torch.save(state, fpath) 44 | if is_best: 45 | shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar')) -------------------------------------------------------------------------------- /feature_extractors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/__init__.py -------------------------------------------------------------------------------- /feature_extractors/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /feature_extractors/__pycache__/abd_net_extractor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/__pycache__/abd_net_extractor.cpython-37.pyc -------------------------------------------------------------------------------- /feature_extractors/__pycache__/reid_strong_extractor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/__pycache__/reid_strong_extractor.cpython-37.pyc -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-tri_center-duke.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks with center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | python3 tools/train.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('3')" DATASETS.NAMES "('dukemtmc')" DATASETS.ROOT_DIR "('/home/haoluo/data')" OUTPUT_DIR "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/dukemtmc/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-tri_center-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks with center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | python3 tools/train.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('2')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/haoluo/data')" OUTPUT_DIR "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-without_center-duke.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | python3 tools/train.py --config_file='configs/softmax_triplet.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('dukemtmc')" DATASETS.ROOT_DIR "('/home/haoluo/data')" OUTPUT_DIR "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/dukemtmc/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-without_center-gta-clean.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | python3 tools/train.py --config_file='/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/feature_extractors/reid-strong-baseline/configs/softmax_triplet.yml' MODEL.DEVICE_ID "('5')" DATASETS.NAMES "('gta2207clean')" DATASETS.ROOT_DIR "('/home/koehlp/Downloads/work_dirs/datasets/')" OUTPUT_DIR "('/home/koehlp/Downloads/work_dirs/feature_extractor/strong_reid_baseline/logs/gta2207/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-clean')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-without_center-gta.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | python3 tools/train.py --config_file='/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/feature_extractors/reid-strong-baseline/configs/softmax_triplet.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('gta2207')" DATASETS.ROOT_DIR "('/home/koehlp/Downloads/work_dirs/datasets/')" OUTPUT_DIR "('/home/koehlp/Downloads/work_dirs/feature_extractor/strong_reid_baseline/logs/gta2207/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-without_center-gta_track_images.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | python3 tools/train.py \ 12 | --config_file=configs/softmax_triplet.yml \ 13 | MODEL.PRETRAIN_PATH "('/media/philipp/philippkoehl_ssd/work_dirs/feature_extractor/pretrained/resnet50-19c8e357.pth')" \ 14 | MODEL.DEVICE_ID "('0')" \ 15 | DATASETS.NAMES "('gta_track_images')" \ 16 | DATASETS.ROOT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/datasets')" \ 17 | OUTPUT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/track_images/logs/gta2207/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-clean')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-all_tricks-without_center-gta_track_images_iosb.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | python3 tools/train.py \ 12 | --config_file=configs/softmax_triplet.yml \ 13 | MODEL.PRETRAIN_PATH "('/home/koehlp/Downloads/work_dirs/feature_extractor/pretrained/resnet50-19c8e357.pth')" \ 14 | MODEL.DEVICE_ID "('0')" \ 15 | DATASETS.NAMES "('gta_track_images')" \ 16 | DATASETS.ROOT_DIR "('/home/koehlp/Downloads/work_dirs/datasets')" \ 17 | OUTPUT_DIR "('/home/koehlp/Downloads/work_dirs/track_images/logs/image_tracks/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-clean-track_images')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-pretrain_choice-all_tricks-tri_center-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks with center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | python3 tools/train.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('2')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" MODEL.PRETRAIN_PATH "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-pretrain_choice_all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005/resnet50_model_2.pth')" OUTPUT_DIR "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-pretrain_choice_all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Experiment-seresnext50-all_tricks-tri_center-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks with center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | python3 tools/train.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('0')" MODEL.NAME "('se_resnext50')" MODEL.PRETRAIN_PATH "('/home/haoluo/.torch/models/se_resnext50_32x4d-a260b3a4.pth')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/haoluo/data')" OUTPUT_DIR "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-seresnext50-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/LICENCE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [year] [fullname] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-tri_center-feat_after_bn-cos-duke.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | # without re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('dukemtmc')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/dukemtmc/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005/resnet50_model_120.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-tri_center-feat_after_bn-cos-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | # without re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('0')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005/resnet50_model_120.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-duke-downloaded_model.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('dukemtmc')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/philipp/work_dirs/feature_extractor/strong_reid_baseline/market_resnet50_model_120_rank1_945.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-duke.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('dukemtmc')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/dukemtmc/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on/resnet50_model_120.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-gta2207-rzr.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on (=raw all trick, softmax_triplet.yml) 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py \ 13 | --config_file=configs/softmax_triplet.yml \ 14 | MODEL.DEVICE_ID "('0')" \ 15 | DATASETS.NAMES "('gta2207')" \ 16 | DATASETS.ROOT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/datasets')" \ 17 | MODEL.PRETRAIN_CHOICE "('self')" \ 18 | TEST.WEIGHT "('/media/philipp/philippkoehl_ssd/work_dirs/feature_extractor/strong_reid_baseline/market_resnet50_model_120_rank1_945.pth')" \ 19 | OUTPUT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/feature_extractor/strong_reid_baseline/logs')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-gta2207.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on (=raw all trick, softmax_triplet.yml) 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py \ 13 | --config_file=/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/feature_extractors/reid-strong-baseline/configs/softmax_triplet.yml \ 14 | MODEL.DEVICE_ID "('4')" \ 15 | DATASETS.NAMES "('gta2207')" \ 16 | DATASETS.ROOT_DIR "('/home/koehlp/Downloads/work_dirs/datasets/')" \ 17 | MODEL.PRETRAIN_CHOICE "('self')" \ 18 | TEST.WEIGHT "('/home/koehlp/Downloads/work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-gta_track_images-rzr.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on (=raw all trick, softmax_triplet.yml) 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py \ 13 | --config_file=configs/softmax_triplet.yml \ 14 | MODEL.DEVICE_ID "('0')" \ 15 | DATASETS.NAMES "('gta_track_images')" \ 16 | DATASETS.ROOT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/datasets')" \ 17 | MODEL.PRETRAIN_CHOICE "('self')" \ 18 | TEST.WEIGHT "('/media/philipp/philippkoehl_ssd/work_dirs/feature_extractor/strong_reid_baseline/market_resnet50_model_120_rank1_945.pth')" \ 19 | OUTPUT_DIR "('/media/philipp/philippkoehl_ssd/work_dirs/track_images/logs')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-all_tricks-without_center-feat_after_bn-cos-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss without re-ranking: 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on (=raw all trick, softmax_triplet.yml) 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # without center loss 11 | # without re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet.yml' MODEL.DEVICE_ID "('0')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/koehlp/Downloads/work_dirs/datasets/')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/koehlp/Downloads/work_dirs/feature_extractor/strong_reid_baseline/market_resnet50_model_120_rank1_945.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-reranking-all_tricks-tri_center-feat_after_bn-cos-duke.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss with re-ranking : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 2: dukemtmc 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | # with re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('1')" DATASETS.NAMES "('dukemtmc')" TEST.RE_RANKING "('yes')" DATASETS.ROOT_DIR "('/home/haoluo/data')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/dukemtmc/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005/resnet50_model_120.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/Test-reranking-all_tricks-tri_center-feat_after_bn-cos-market.sh: -------------------------------------------------------------------------------- 1 | # Experiment all tricks without center loss with re-ranking : 256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005 2 | # Dataset 1: market1501 3 | # imagesize: 256x128 4 | # batchsize: 16x4 5 | # warmup_step 10 6 | # random erase prob 0.5 7 | # labelsmooth: on 8 | # last stride 1 9 | # bnneck on 10 | # with center loss 11 | # with re-ranking 12 | python3 tools/test.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('0')" DATASETS.NAMES "('market1501')" DATASETS.ROOT_DIR "('/home/haoluo/data')" TEST.RE_RANKING "('yes')" MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005/resnet50_model_120.pth')" -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/reid_strong_baseline/__init__.py -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/config/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .defaults import _C as cfg 8 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/configs/baseline.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/haoluo/.torch/models/resnet50-19c8e357.pth' 4 | LAST_STRIDE: 2 5 | NECK: 'no' 6 | METRIC_LOSS_TYPE: 'triplet' 7 | IF_LABELSMOOTH: 'off' 8 | IF_WITH_CENTER: 'no' 9 | 10 | 11 | INPUT: 12 | SIZE_TRAIN: [256, 128] 13 | SIZE_TEST: [256, 128] 14 | PROB: 0.5 # random horizontal flip 15 | RE_PROB: 0.0 # random erasing 16 | PADDING: 10 17 | 18 | DATASETS: 19 | NAMES: ('market1501') 20 | 21 | DATALOADER: 22 | SAMPLER: 'softmax_triplet' 23 | NUM_INSTANCE: 4 24 | NUM_WORKERS: 8 25 | 26 | SOLVER: 27 | OPTIMIZER_NAME: 'Adam' 28 | MAX_EPOCHS: 120 29 | BASE_LR: 0.00035 30 | 31 | CLUSTER_MARGIN: 0.3 32 | 33 | CENTER_LR: 0.5 34 | CENTER_LOSS_WEIGHT: 0.0005 35 | 36 | RANGE_K: 2 37 | RANGE_MARGIN: 0.3 38 | RANGE_ALPHA: 0 39 | RANGE_BETA: 1 40 | RANGE_LOSS_WEIGHT: 1 41 | 42 | BIAS_LR_FACTOR: 1 43 | WEIGHT_DECAY: 0.0005 44 | WEIGHT_DECAY_BIAS: 0.0005 45 | IMS_PER_BATCH: 64 46 | 47 | STEPS: [40, 70] 48 | GAMMA: 0.1 49 | 50 | WARMUP_FACTOR: 0.01 51 | WARMUP_ITERS: 0 52 | WARMUP_METHOD: 'linear' 53 | 54 | CHECKPOINT_PERIOD: 40 55 | LOG_PERIOD: 20 56 | EVAL_PERIOD: 40 57 | 58 | TEST: 59 | IMS_PER_BATCH: 128 60 | RE_RANKING: 'no' 61 | WEIGHT: "path" 62 | NECK_FEAT: 'after' 63 | FEAT_NORM: 'yes' 64 | 65 | OUTPUT_DIR: "/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-all-tricks-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on" 66 | 67 | 68 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/configs/softmax.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_PATH: '/home/philipp/work_dirs/feature_extractor/pretrained/resnet50-19c8e357.pth' 3 | 4 | 5 | INPUT: 6 | SIZE_TRAIN: [256, 128] 7 | SIZE_TEST: [256, 128] 8 | PROB: 0.5 # random horizontal flip 9 | RE_PROB: 0.5 # random erasing 10 | PADDING: 10 11 | 12 | DATASETS: 13 | NAMES: ('market1501') 14 | 15 | DATALOADER: 16 | SAMPLER: 'softmax' 17 | NUM_WORKERS: 8 18 | 19 | SOLVER: 20 | OPTIMIZER_NAME: 'Adam' 21 | MAX_EPOCHS: 120 22 | BASE_LR: 0.00035 23 | BIAS_LR_FACTOR: 1 24 | WEIGHT_DECAY: 0.0005 25 | WEIGHT_DECAY_BIAS: 0.0005 26 | IMS_PER_BATCH: 64 27 | 28 | STEPS: [30, 55] 29 | GAMMA: 0.1 30 | 31 | WARMUP_FACTOR: 0.01 32 | WARMUP_ITERS: 5 33 | WARMUP_METHOD: 'linear' 34 | 35 | CHECKPOINT_PERIOD: 20 36 | LOG_PERIOD: 20 37 | EVAL_PERIOD: 20 38 | 39 | TEST: 40 | IMS_PER_BATCH: 128 41 | 42 | OUTPUT_DIR: "/home/haoluo/log/reid/market1501/softmax_bs64_256x128" 43 | 44 | 45 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/koehlp/Downloads/work_dirs/feature_extractor/pretrained/resnet50-19c8e357.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | IF_LABELSMOOTH: 'on' 6 | IF_WITH_CENTER: 'no' 7 | 8 | 9 | 10 | 11 | INPUT: 12 | SIZE_TRAIN: [256, 128] 13 | SIZE_TEST: [256, 128] 14 | PROB: 0.5 # random horizontal flip 15 | RE_PROB: 0.5 # random erasing 16 | PADDING: 10 17 | 18 | DATASETS: 19 | NAMES: ('market1501') 20 | 21 | DATALOADER: 22 | SAMPLER: 'softmax_triplet' 23 | NUM_INSTANCE: 4 24 | NUM_WORKERS: 8 25 | 26 | SOLVER: 27 | OPTIMIZER_NAME: 'Adam' 28 | MAX_EPOCHS: 120 29 | BASE_LR: 0.00035 30 | 31 | CLUSTER_MARGIN: 0.3 32 | 33 | CENTER_LR: 0.5 34 | CENTER_LOSS_WEIGHT: 0.0005 35 | 36 | RANGE_K: 2 37 | RANGE_MARGIN: 0.3 38 | RANGE_ALPHA: 0 39 | RANGE_BETA: 1 40 | RANGE_LOSS_WEIGHT: 1 41 | 42 | BIAS_LR_FACTOR: 1 43 | WEIGHT_DECAY: 0.0005 44 | WEIGHT_DECAY_BIAS: 0.0005 45 | IMS_PER_BATCH: 64 46 | 47 | STEPS: [40, 70] 48 | GAMMA: 0.1 49 | 50 | WARMUP_FACTOR: 0.01 51 | WARMUP_ITERS: 10 52 | WARMUP_METHOD: 'linear' 53 | 54 | CHECKPOINT_PERIOD: 40 55 | LOG_PERIOD: 20 56 | EVAL_PERIOD: 40 57 | 58 | TEST: 59 | IMS_PER_BATCH: 64 60 | RE_RANKING: 'no' 61 | WEIGHT: "path" 62 | NECK_FEAT: 'after' 63 | FEAT_NORM: 'yes' 64 | 65 | OUTPUT_DIR: "/home/koehlp/Downloads/work_dirs/feature_extractor/strong_reid_baseline/logs" 66 | 67 | 68 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/configs/softmax_triplet_with_center.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/haoluo/.torch/models/resnet50-19c8e357.pth' 4 | METRIC_LOSS_TYPE: 'triplet_center' 5 | IF_LABELSMOOTH: 'on' 6 | IF_WITH_CENTER: 'yes' 7 | 8 | 9 | 10 | 11 | INPUT: 12 | SIZE_TRAIN: [256, 128] 13 | SIZE_TEST: [256, 128] 14 | PROB: 0.5 # random horizontal flip 15 | RE_PROB: 0.5 # random erasing 16 | PADDING: 10 17 | 18 | DATASETS: 19 | NAMES: ('market1501') 20 | 21 | DATALOADER: 22 | SAMPLER: 'softmax_triplet' 23 | NUM_INSTANCE: 4 24 | NUM_WORKERS: 8 25 | 26 | SOLVER: 27 | OPTIMIZER_NAME: 'Adam' 28 | MAX_EPOCHS: 120 29 | BASE_LR: 0.00035 30 | 31 | CLUSTER_MARGIN: 0.3 32 | 33 | CENTER_LR: 0.5 34 | CENTER_LOSS_WEIGHT: 0.0005 35 | 36 | RANGE_K: 2 37 | RANGE_MARGIN: 0.3 38 | RANGE_ALPHA: 0 39 | RANGE_BETA: 1 40 | RANGE_LOSS_WEIGHT: 1 41 | 42 | BIAS_LR_FACTOR: 1 43 | WEIGHT_DECAY: 0.0005 44 | WEIGHT_DECAY_BIAS: 0.0005 45 | IMS_PER_BATCH: 64 46 | 47 | STEPS: [40, 70] 48 | GAMMA: 0.1 49 | 50 | WARMUP_FACTOR: 0.01 51 | WARMUP_ITERS: 10 52 | WARMUP_METHOD: 'linear' 53 | 54 | CHECKPOINT_PERIOD: 40 55 | LOG_PERIOD: 20 56 | EVAL_PERIOD: 40 57 | 58 | TEST: 59 | IMS_PER_BATCH: 128 60 | RE_RANKING: 'no' 61 | WEIGHT: "path" 62 | NECK_FEAT: 'after' 63 | FEAT_NORM: 'yes' 64 | 65 | OUTPUT_DIR: "/home/haoluo/log/gu/reid_baseline_review/Opensource_test/market1501/Experiment-all-tricks-tri_center-256x128-bs16x4-warmup10-erase0_5-labelsmooth_on-laststride1-bnneck_on-triplet_centerloss0_0005" 66 | 67 | 68 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import make_data_loader 8 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/collate_batch.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | 9 | 10 | def train_collate_fn(batch): 11 | imgs, pids, _, _, = zip(*batch) 12 | pids = torch.tensor(pids, dtype=torch.int64) 13 | return torch.stack(imgs, dim=0), pids 14 | 15 | 16 | def val_collate_fn(batch): 17 | imgs, pids, camids, _ = zip(*batch) 18 | return torch.stack(imgs, dim=0), pids, camids 19 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | # from .cuhk03 import CUHK03 7 | from .gta2207 import Gta2207 8 | from .gta2207clean import Gta2207clean 9 | from .gta2207largeimgs import Gta2207largeimgs 10 | from .gta2207daynight import Gta2207daynight 11 | from .dukemtmcreid import DukeMTMCreID 12 | from .market1501 import Market1501 13 | from .gta_track_images import Gta_track_images 14 | from .msmt17 import MSMT17 15 | from .veri import VeRi 16 | from .dataset_loader import ImageDataset 17 | 18 | __factory = { 19 | 'market1501': Market1501, 20 | # 'cuhk03': CUHK03, 21 | 'dukemtmc': DukeMTMCreID, 22 | 'msmt17': MSMT17, 23 | 'veri': VeRi, 24 | 'gta2207' : Gta2207, 25 | 'gta2207clean' : Gta2207clean, 26 | 'gta2207largeimgs' : Gta2207largeimgs, 27 | 'gta2207daynight' : Gta2207daynight, 28 | 'gta_track_images' : Gta_track_images 29 | } 30 | 31 | 32 | def get_names(): 33 | return __factory.keys() 34 | 35 | 36 | def init_dataset(name, *args, **kwargs): 37 | if name not in __factory.keys(): 38 | raise KeyError("Unknown datasets: {}".format(name)) 39 | return __factory[name](*args, **kwargs) 40 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/datasets/dataset_loader.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os.path as osp 8 | from PIL import Image 9 | from torch.utils.data import Dataset 10 | 11 | 12 | def read_image(img_path): 13 | """Keep reading image until succeed. 14 | This can avoid IOError incurred by heavy IO process.""" 15 | got_img = False 16 | if not osp.exists(img_path): 17 | raise IOError("{} does not exist".format(img_path)) 18 | while not got_img: 19 | try: 20 | img = Image.open(img_path).convert('RGB') 21 | got_img = True 22 | except IOError: 23 | print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) 24 | pass 25 | return img 26 | 27 | 28 | class ImageDataset(Dataset): 29 | """Image Person ReID Dataset""" 30 | 31 | def __init__(self, dataset, transform=None): 32 | self.dataset = dataset 33 | self.transform = transform 34 | 35 | def __len__(self): 36 | return len(self.dataset) 37 | 38 | def __getitem__(self, index): 39 | img_path, pid, camid = self.dataset[index] 40 | img = read_image(img_path) 41 | 42 | if self.transform is not None: 43 | img = self.transform(img) 44 | 45 | return img, pid, camid, img_path 46 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_sampler import RandomIdentitySampler, RandomIdentitySampler_alignedreid # new add by gu 8 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import build_transforms 8 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/data/transforms/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: liaoxingyu2@jd.com 5 | """ 6 | 7 | import torchvision.transforms as T 8 | 9 | from .transforms import RandomErasing 10 | 11 | 12 | def build_transforms(cfg, is_train=True): 13 | normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD) 14 | if is_train: 15 | transform = T.Compose([ 16 | T.Resize(cfg.INPUT.SIZE_TRAIN), 17 | T.RandomHorizontalFlip(p=cfg.INPUT.PROB), 18 | T.Pad(cfg.INPUT.PADDING), 19 | T.RandomCrop(cfg.INPUT.SIZE_TRAIN), 20 | T.ToTensor(), 21 | normalize_transform, 22 | RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN) 23 | ]) 24 | else: 25 | transform = T.Compose([ 26 | T.Resize(cfg.INPUT.SIZE_TEST), 27 | T.ToTensor(), 28 | normalize_transform 29 | ]) 30 | 31 | return transform 32 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/imgs/pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/feature_extractors/reid_strong_baseline/imgs/pipeline.jpg -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .baseline import Baseline 8 | 9 | 10 | def build_model(cfg, num_classes): 11 | # if cfg.MODEL.NAME == 'resnet50': 12 | # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT) 13 | model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE) 14 | return model 15 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/modeling/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import make_optimizer, make_optimizer_with_center 8 | from .lr_scheduler import WarmupMultiStepLR -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/tests/lr_scheduler_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | import torch 5 | from torch import nn 6 | 7 | sys.path.append('.') 8 | from solver.lr_scheduler import WarmupMultiStepLR 9 | from solver.build import make_optimizer 10 | from config import cfg 11 | 12 | 13 | class MyTestCase(unittest.TestCase): 14 | def test_something(self): 15 | net = nn.Linear(10, 10) 16 | optimizer = make_optimizer(cfg, net) 17 | lr_scheduler = WarmupMultiStepLR(optimizer, [20, 40], warmup_iters=10) 18 | for i in range(50): 19 | lr_scheduler.step() 20 | for j in range(3): 21 | print(i, lr_scheduler.get_lr()[0]) 22 | optimizer.step() 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/utils/iotools.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import errno 8 | import json 9 | import os 10 | 11 | import os.path as osp 12 | 13 | 14 | def mkdir_if_missing(directory): 15 | if not osp.exists(directory): 16 | try: 17 | os.makedirs(directory) 18 | except OSError as e: 19 | if e.errno != errno.EEXIST: 20 | raise 21 | 22 | 23 | def check_isfile(path): 24 | isfile = osp.isfile(path) 25 | if not isfile: 26 | print("=> Warning: no file found at '{}' (ignored)".format(path)) 27 | return isfile 28 | 29 | 30 | def read_json(fpath): 31 | with open(fpath, 'r') as f: 32 | obj = json.load(f) 33 | return obj 34 | 35 | 36 | def write_json(obj, fpath): 37 | mkdir_if_missing(osp.dirname(fpath)) 38 | with open(fpath, 'w') as f: 39 | json.dump(obj, f, indent=4, separators=(',', ': ')) 40 | -------------------------------------------------------------------------------- /feature_extractors/reid_strong_baseline/utils/logger.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import logging 8 | import os 9 | import sys 10 | 11 | 12 | def setup_logger(name, save_dir, distributed_rank): 13 | logger = logging.getLogger(name) 14 | logger.setLevel(logging.DEBUG) 15 | # don't log results for the non-master process 16 | if distributed_rank > 0: 17 | return logger 18 | ch = logging.StreamHandler(stream=sys.stdout) 19 | ch.setLevel(logging.DEBUG) 20 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 21 | ch.setFormatter(formatter) 22 | logger.addHandler(ch) 23 | 24 | if save_dir: 25 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 26 | fh.setLevel(logging.DEBUG) 27 | fh.setFormatter(formatter) 28 | logger.addHandler(fh) 29 | 30 | return logger 31 | -------------------------------------------------------------------------------- /readme_files/feature_graphics_all_cams_500_frames.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/readme_files/feature_graphics_all_cams_500_frames.jpg -------------------------------------------------------------------------------- /readme_files/img_hid_31_oid_1934.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/readme_files/img_hid_31_oid_1934.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | addict==2.2.1 2 | certifi==2019.11.28 3 | cycler==0.10.0 4 | Cython==0.29.16 5 | joblib==0.14.1 6 | kiwisolver==1.1.0 7 | lapsolver==1.1.0 8 | matplotlib==3.2.1 9 | mkl-fft==1.0.15 10 | mkl-random==1.1.0 11 | mkl-service==2.3.0 12 | mmcv==0.4.2 13 | numpy==1.22.0 14 | olefile==0.46 15 | opencv-python==4.2.0.32 16 | pandas==1.0.3 17 | Pillow==9.0.1 18 | pycocotools==2.0 19 | pyparsing==2.4.6 20 | python-dateutil==2.8.1 21 | pytz==2019.3 22 | PyYAML==5.4 23 | scikit-learn==0.22.2.post1 24 | scipy==1.4.1 25 | Shapely==1.7.0 26 | six==1.14.0 27 | sklearn==0.0 28 | terminaltables==3.1.0 29 | torch==1.4.0 30 | torchvision==0.5.0 31 | tqdm==4.44.1 32 | yacs==0.1.6 33 | -------------------------------------------------------------------------------- /start_run_tracker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python run_tracker.py --config configs/tracker_configs/frcnn50_new_abd.py -------------------------------------------------------------------------------- /trackers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/__init__.py -------------------------------------------------------------------------------- /trackers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/__init__.py: -------------------------------------------------------------------------------- 1 | from .deep_sort import DeepSort -------------------------------------------------------------------------------- /trackers/deep_sort/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/__pycache__/deep_sort.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/__pycache__/deep_sort.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__init__.py -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/detection.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/detection.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/iou_matching.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/iou_matching.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/kalman_filter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/kalman_filter.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/linear_assignment.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/linear_assignment.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/nn_matching.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/nn_matching.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/track.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/track.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/__pycache__/tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/deep_sort/sort/__pycache__/tracker.cpython-37.pyc -------------------------------------------------------------------------------- /trackers/deep_sort/sort/detection.py: -------------------------------------------------------------------------------- 1 | # vim: expandtab:ts=4:sw=4 2 | import numpy as np 3 | 4 | 5 | class Detection(object): 6 | """ 7 | This class represents a bounding box detection in a single image. 8 | 9 | Parameters 10 | ---------- 11 | tlwh : array_like 12 | Bounding box in format `(x, y, w, h)`. 13 | confidence : float 14 | Detector confidence score. 15 | feature : array_like 16 | A feature vector that describes the object contained in this image. 17 | 18 | Attributes 19 | ---------- 20 | tlwh : ndarray 21 | Bounding box in format `(top left x, top left y, width, height)`. 22 | confidence : ndarray 23 | Detector confidence score. 24 | feature : ndarray | NoneType 25 | A feature vector that describes the object contained in this image. 26 | 27 | """ 28 | 29 | def __init__(self, tlwh, confidence, feature): 30 | self.tlwh = np.asarray(tlwh, dtype=np.float) 31 | self.confidence = float(confidence) 32 | self.feature = np.asarray(feature, dtype=np.float32) 33 | 34 | 35 | def to_tlbr(self): 36 | """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., 37 | `(top left, bottom right)`. 38 | """ 39 | ret = self.tlwh.copy() 40 | ret[2:] += ret[:2] 41 | return ret 42 | 43 | def to_xyah(self): 44 | """Convert bounding box to format `(center x, center y, aspect ratio, 45 | height)`, where the aspect ratio is `width / height`. 46 | """ 47 | ret = self.tlwh.copy() 48 | ret[:2] += ret[2:] / 2 49 | ret[2] /= ret[3] 50 | return ret 51 | -------------------------------------------------------------------------------- /trackers/iou_tracker/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | # tilde 104 | *~ 105 | -------------------------------------------------------------------------------- /trackers/iou_tracker/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 TU Berlin, Communication Systems Group 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /trackers/iou_tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/trackers/iou_tracker/__init__.py -------------------------------------------------------------------------------- /trackers/iou_tracker/run_tracker.m: -------------------------------------------------------------------------------- 1 | function [stateInfo, speed] = run_tracker(curSequence, baselinedetections) 2 | %% tracker configuration 3 | 4 | %% Mask R-CNN (frcnn) 5 | sigma_l = 0; 6 | sigma_h = 0.95; 7 | sigma_iou = 0.6; 8 | t_min = 7; 9 | 10 | % %% R-CNN 11 | % sigma_l = 0; 12 | % sigma_h = 0.7; 13 | % sigma_iou = 0.5; 14 | % t_min = 2; 15 | 16 | % %% ACF 17 | % sigma_l = 0; 18 | % sigma_h = 0.3; 19 | % sigma_iou = 0.5; 20 | % t_min = 3; 21 | 22 | % %% CompACT 23 | % sigma_l = 0; 24 | % sigma_h = 0.2; 25 | % sigma_iou = 0.4; 26 | % t_min = 2; 27 | 28 | % %% EB 29 | % sigma_l = 0; 30 | % sigma_h = 0.8; 31 | % sigma_iou = 0.5; 32 | % t_min = 2; 33 | 34 | %% running tracking algorithm 35 | try 36 | ret = py.iou_tracker.track_iou_matlab_wrapper(py.numpy.array(baselinedetections(:).'), sigma_l, sigma_h, sigma_iou, t_min); 37 | 38 | catch exception 39 | disp('error while calling the python tracking module: ') 40 | disp(' ') 41 | disp(getReport(exception)) 42 | end 43 | speed = ret{1}; 44 | track_result = cell2mat(reshape(ret{2}.cell.', 6, []).'); 45 | 46 | %% convert and save the mot style track_result 47 | stateInfo = saveStateInfo(track_result, numel(curSequence.frameNums)); 48 | -------------------------------------------------------------------------------- /trackers/iou_tracker/saveStateInfo.m: -------------------------------------------------------------------------------- 1 | function stateInfo = saveStateInfo(track_result, frame_num) 2 | 3 | try 4 | sorted_result = sortrows(track_result,6); 5 | detect_num = size(sorted_result,1); 6 | catch 7 | detect_num = 0; 8 | 9 | stateInfo.X(:,1) = zeros(frame_num,1); 10 | stateInfo.Y(:,1) = zeros(frame_num,1); 11 | stateInfo.Xi(:,1) = zeros(frame_num,1); 12 | stateInfo.Yi(:,1) = zeros(frame_num,1); 13 | stateInfo.W(:,1) = zeros(frame_num,1); 14 | stateInfo.H(:,1) = zeros(frame_num,1); 15 | end 16 | 17 | stateInfo.F = frame_num; 18 | stateInfo.frameNums = 1:frame_num; 19 | index = 0; 20 | cur_id = -1; 21 | 22 | for i = 1:detect_num 23 | if (cur_id ~= sorted_result(i,6)) 24 | cur_id = sorted_result(i,6); 25 | index = index + 1; 26 | stateInfo.X(:,index) = zeros(frame_num,1); 27 | stateInfo.Y(:,index) = zeros(frame_num,1); 28 | stateInfo.Xi(:,index) = zeros(frame_num,1); 29 | stateInfo.Yi(:,index) = zeros(frame_num,1); 30 | stateInfo.W(:,index) = zeros(frame_num,1); 31 | stateInfo.H(:,index) = zeros(frame_num,1); 32 | end 33 | bbox = sorted_result(i,:); 34 | n = bbox(1,5); 35 | stateInfo.X(n,index) = bbox(1,1)+0.5*bbox(1,3); 36 | stateInfo.Y(n,index) = bbox(1,2)+bbox(1,4); 37 | stateInfo.Xi(n,index) = stateInfo.X(n,index); 38 | stateInfo.Yi(n,index) = stateInfo.Y(n,index); 39 | stateInfo.W(n,index) = bbox(1,3); 40 | stateInfo.H(n,index) = bbox(1,4); 41 | end 42 | -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/frcnn-all.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-01-FRCNN 3 | MOT17-02-FRCNN 4 | MOT17-03-FRCNN 5 | MOT17-04-FRCNN 6 | MOT17-05-FRCNN 7 | MOT17-06-FRCNN 8 | MOT17-07-FRCNN 9 | MOT17-08-FRCNN 10 | MOT17-09-FRCNN 11 | MOT17-10-FRCNN 12 | MOT17-11-FRCNN 13 | MOT17-12-FRCNN 14 | MOT17-13-FRCNN 15 | MOT17-14-FRCNN 16 | -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/frcnn-test.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-01-FRCNN 3 | MOT17-03-FRCNN 4 | MOT17-06-FRCNN 5 | MOT17-07-FRCNN 6 | MOT17-08-FRCNN 7 | MOT17-12-FRCNN 8 | MOT17-14-FRCNN -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/frcnn-train.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-02-FRCNN 3 | MOT17-04-FRCNN 4 | MOT17-05-FRCNN 5 | MOT17-09-FRCNN 6 | MOT17-10-FRCNN 7 | MOT17-11-FRCNN 8 | MOT17-13-FRCNN 9 | -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/sdp-all.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-01-SDP 3 | MOT17-02-SDP 4 | MOT17-03-SDP 5 | MOT17-04-SDP 6 | MOT17-05-SDP 7 | MOT17-06-SDP 8 | MOT17-07-SDP 9 | MOT17-08-SDP 10 | MOT17-09-SDP 11 | MOT17-10-SDP 12 | MOT17-11-SDP 13 | MOT17-12-SDP 14 | MOT17-13-SDP 15 | MOT17-14-SDP 16 | -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/sdp-test.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-01-SDP 3 | MOT17-03-SDP 4 | MOT17-06-SDP 5 | MOT17-07-SDP 6 | MOT17-08-SDP 7 | MOT17-12-SDP 8 | MOT17-14-SDP -------------------------------------------------------------------------------- /trackers/iou_tracker/seqmaps/sdp-train.txt: -------------------------------------------------------------------------------- 1 | name 2 | MOT17-02-SDP 3 | MOT17-04-SDP 4 | MOT17-05-SDP 5 | MOT17-09-SDP 6 | MOT17-10-SDP 7 | MOT17-11-SDP 8 | MOT17-13-SDP 9 | -------------------------------------------------------------------------------- /utilities/.idea/inspectionProfiles/profiles_settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | -------------------------------------------------------------------------------- /utilities/.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | -------------------------------------------------------------------------------- /utilities/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /utilities/.idea/utilities.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | -------------------------------------------------------------------------------- /utilities/.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /utilities/BoundingBoxTracked.py: -------------------------------------------------------------------------------- 1 | class BoundingBoxTracked: 2 | topLeft = None 3 | topRight = None 4 | bottomRight = None 5 | bottomLeft = None -------------------------------------------------------------------------------- /utilities/__pycache__/dataset_statistics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/dataset_statistics.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/helper.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/helper.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/joint.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/joint.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/non_daemonic_pool.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/non_daemonic_pool.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/pandas_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/pandas_loader.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/pose.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/pose.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/preprocessing.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/preprocessing.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/python_path_utility.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/python_path_utility.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/__pycache__/track_result_statistics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/__pycache__/track_result_statistics.cpython-37.pyc -------------------------------------------------------------------------------- /utilities/download_pytorch_model.py: -------------------------------------------------------------------------------- 1 | from torchvision import models 2 | 3 | if __name__ == "__main__": 4 | 5 | 6 | resnet50 = models.resnet50(pretrained=True) 7 | print(resnet50) -------------------------------------------------------------------------------- /utilities/glasbey/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | bin/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mr Developer 41 | .mr.developer.cfg 42 | .project 43 | .pydevproject 44 | 45 | # Rope 46 | .ropeproject 47 | 48 | # Django stuff: 49 | *.log 50 | *.pot 51 | 52 | # Sphinx documentation 53 | docs/_build/ 54 | 55 | # Generated color lookup table 56 | *_lut.npz 57 | -------------------------------------------------------------------------------- /utilities/glasbey/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Sergey Alexandrov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utilities/glasbey/__init__.py: -------------------------------------------------------------------------------- 1 | from .glasbey import Glasbey 2 | -------------------------------------------------------------------------------- /utilities/glasbey/images/palette-set1-30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/glasbey/images/palette-set1-30.png -------------------------------------------------------------------------------- /utilities/glasbey/images/palette-white-30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/koehlp/wda_tracker/1a6209cbbf1b8b2ea423eca8de4ac33e6ec8b815/utilities/glasbey/images/palette-white-30.png -------------------------------------------------------------------------------- /utilities/glasbey/palettes/accent.txt: -------------------------------------------------------------------------------- 1 | 127,201,127 2 | 190,174,212 3 | 253,192,134 4 | 255,255,153 5 | 56,108,176 6 | 240,2,127 7 | 191,91,23 8 | 102,102,102 9 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/dark2.txt: -------------------------------------------------------------------------------- 1 | 27,158,119 2 | 217,95,2 3 | 117,112,179 4 | 231,41,138 5 | 102,166,30 6 | 230,171,2 7 | 166,118,29 8 | 102,102,102 9 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/paired.txt: -------------------------------------------------------------------------------- 1 | 166,206,227 2 | 31,120,180 3 | 178,223,138 4 | 51,160,44 5 | 251,154,153 6 | 227,26,28 7 | 253,191,111 8 | 255,127,0 9 | 202,178,214 10 | 106,61,154 11 | 255,255,153 12 | 177,89,40 13 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/pastel1.txt: -------------------------------------------------------------------------------- 1 | 251,180,174 2 | 179,205,227 3 | 204,235,197 4 | 222,203,228 5 | 254,217,166 6 | 255,255,204 7 | 229,216,189 8 | 253,218,236 9 | 242,242,242 10 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/pastel2.txt: -------------------------------------------------------------------------------- 1 | 179,226,205 2 | 253,205,172 3 | 203,213,232 4 | 244,202,228 5 | 230,245,201 6 | 255,242,174 7 | 241,226,204 8 | 204,204,204 9 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/set1.txt: -------------------------------------------------------------------------------- 1 | 228,26,28 2 | 55,126,184 3 | 77,175,74 4 | 152,78,163 5 | 255,127,0 6 | 255,255,51 7 | 166,86,40 8 | 247,129,191 9 | 153,153,153 10 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/set2.txt: -------------------------------------------------------------------------------- 1 | 102,194,165 2 | 252,141,98 3 | 141,160,203 4 | 231,138,195 5 | 166,216,84 6 | 255,217,47 7 | 229,196,148 8 | 179,179,179 9 | -------------------------------------------------------------------------------- /utilities/glasbey/palettes/set3.txt: -------------------------------------------------------------------------------- 1 | 141,211,199 2 | 255,255,179 3 | 190,186,218 4 | 251,128,114 5 | 128,177,211 6 | 253,180,98 7 | 179,222,105 8 | 252,205,229 9 | 217,217,217 10 | 188,128,189 11 | 204,235,197 12 | 255,237,111 13 | -------------------------------------------------------------------------------- /utilities/glasbey/view_palette.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import argparse 5 | 6 | import numpy as np 7 | 8 | 9 | def palette_to_image(palette): 10 | from PIL import Image 11 | 12 | WIDTH = 180 13 | HEIGHT_SEGMENT = 20 14 | img = Image.new("RGB", (WIDTH, HEIGHT_SEGMENT * len(palette)), "black") 15 | pixels = img.load() 16 | for i, color in enumerate(palette): 17 | if isinstance(color, int): 18 | b = (color >> 0) % 256 19 | g = (color >> 8) % 256 20 | r = (color >> 16) % 256 21 | color = (r, g, b) 22 | elif isinstance(color, np.ndarray): 23 | color = tuple(int(round(k * 255)) for k in color) 24 | for x in range(WIDTH): 25 | for y in range(HEIGHT_SEGMENT): 26 | pixels[x, y + i * HEIGHT_SEGMENT] = color 27 | return img 28 | 29 | 30 | if __name__ == "__main__": 31 | parser = argparse.ArgumentParser( 32 | description=""" 33 | View a palette stored in a given file. The script requires PIL (Python 34 | Imaging Library). 35 | """, 36 | formatter_class=argparse.RawDescriptionHelpFormatter, 37 | ) 38 | parser.add_argument("palette", type=argparse.FileType("r"), help="palette filename") 39 | parser.add_argument("--save", type=str, help="save as a PNG file") 40 | args = parser.parse_args() 41 | 42 | palette = list() 43 | for line in args.palette.readlines(): 44 | rgb = [int(c) for c in line.strip().split(",")] 45 | palette.append((rgb[0], rgb[1], rgb[2])) 46 | 47 | img = palette_to_image(palette) 48 | if args.save: 49 | img.save(args.save) 50 | else: 51 | img.show() 52 | -------------------------------------------------------------------------------- /utilities/log_statistics.csv: -------------------------------------------------------------------------------- 1 | utc_time,swap_percent,virtual_percent,cpu_percent,process_count 2 | 2019-09-20 16:43:21.278714,0.0,29.4,12.8,390 3 | 2019-09-20 16:43:21.796309,0.0,29.4,9.1,390 4 | 2019-09-20 16:43:22.324699,0.0,29.4,12.3,390 5 | 2019-09-20 16:43:22.874904,0.0,29.4,9.0,390 6 | 2019-09-20 16:43:23.425410,0.0,29.4,9.1,390 7 | 2019-09-20 16:43:23.979368,0.0,29.4,11.8,390 8 | 2019-09-20 16:43:24.527770,0.0,29.4,12.7,390 9 | 2019-09-20 16:43:25.064347,0.0,29.4,5.2,390 10 | 2019-09-20 16:43:25.586090,0.0,29.4,14.9,390 11 | 2019-09-20 16:43:26.135196,0.0,29.4,9.0,390 12 | 2019-09-20 16:43:26.676088,0.0,29.4,15.8,390 13 | 2019-09-20 16:43:27.222991,0.0,29.4,7.0,390 14 | 2019-09-20 16:43:27.774605,0.0,29.4,9.6,390 15 | 2019-09-20 16:43:28.297189,0.0,29.4,15.2,390 16 | 2019-09-20 16:43:28.846550,0.0,29.4,14.4,390 17 | -------------------------------------------------------------------------------- /utilities/log_statistics.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | import psutil 4 | # gives a single float value 5 | import sys 6 | from datetime import datetime 7 | 8 | 9 | def count_processes(): 10 | count = 0 11 | for proc in psutil.process_iter(): 12 | try: 13 | # Get process name & pid from process object. 14 | processName = proc.name() 15 | processID = proc.pid 16 | 17 | 18 | #print(processName, ' ::: ', processID) 19 | count += 1 20 | except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): 21 | pass 22 | return count 23 | 24 | 25 | class Log_init: 26 | 27 | def __init__(self): 28 | log_file_path = "./log_statistics.csv" 29 | import os 30 | print(os.path.abspath(log_file_path)) 31 | self.log_file = open(log_file_path, "w") 32 | 33 | print("utc_time,swap_percent,virtual_percent,cpu_percent,process_count", file=self.log_file) 34 | 35 | def log(self): 36 | 37 | while True: 38 | time.sleep(0.5) 39 | print("{},{},{},{},{}".format(datetime.utcnow(), 40 | psutil.swap_memory().percent 41 | ,psutil.virtual_memory().percent 42 | ,psutil.cpu_percent() 43 | ,count_processes()),file=self.log_file) 44 | 45 | 46 | if __name__ == "__main__": 47 | Log_init().log() 48 | -------------------------------------------------------------------------------- /utilities/non_daemonic_pool.py: -------------------------------------------------------------------------------- 1 | 2 | import multiprocessing 3 | # We must import this explicitly, it is not imported by the top-level 4 | # multiprocessing module. 5 | import multiprocessing.pool 6 | import time 7 | 8 | from random import randint 9 | 10 | 11 | class NoDaemonProcess(multiprocessing.Process): 12 | # make 'daemon' attribute always return False 13 | def _get_daemon(self): 14 | return False 15 | def _set_daemon(self, value): 16 | pass 17 | daemon = property(_get_daemon, _set_daemon) 18 | 19 | # We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool 20 | # because the latter is only a wrapper function, not a proper class. 21 | class NonDeamonicPool(multiprocessing.pool.Pool): 22 | Process = NoDaemonProcess -------------------------------------------------------------------------------- /utilities/pandas_loader.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | import os 4 | import os.path as osp 5 | 6 | 7 | def load_csv(working_dir,csv_path): 8 | 9 | cache_folder_name = "cache/" 10 | cache_folder_path = osp.join(working_dir,cache_folder_name) 11 | os.makedirs(cache_folder_path,exist_ok=True) 12 | 13 | pickle_file_name = os.path.basename(csv_path) + ".pkl" 14 | 15 | csv_folder = os.path.dirname(csv_path) 16 | 17 | csv_folder = csv_folder[1:] 18 | 19 | cache_pickle_folder = osp.join(cache_folder_path,csv_folder) 20 | 21 | os.makedirs(cache_pickle_folder,exist_ok=True) 22 | 23 | pickle_path = osp.join(cache_pickle_folder,pickle_file_name) 24 | 25 | 26 | 27 | if not os.path.isfile(pickle_path): 28 | print("pkl not found: {}".format(pickle_path)) 29 | dataframe = pd.read_csv(csv_path) 30 | dataframe.to_pickle(pickle_path) 31 | else: 32 | print("pkl found: {}".format(pickle_path)) 33 | dataframe = pd.read_pickle(pickle_path) 34 | 35 | return dataframe 36 | 37 | 38 | if __name__ == "__main__": 39 | load_csv("/home/koehlp/Downloads/work_dirs","/net/merkur/storage/deeplearning/users/koehl/gta/GTA_Dataset_22.07.2019/test/cam_0/coords_cam_0.csv" ) -------------------------------------------------------------------------------- /utilities/python_path_utility.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import os 4 | 5 | def append_to_pythonpath(paths,run_file_path): 6 | for path in paths: 7 | my_path = os.path.abspath(os.path.dirname(run_file_path)) 8 | abs_path = os.path.join(my_path,path) 9 | sys.path.append(abs_path) 10 | -------------------------------------------------------------------------------- /utilities/reid_dataset_statistics.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import scipy.io as sio 3 | 4 | import os 5 | 6 | class Reid_dataset_statistics: 7 | def __init__(self,dataset_path): 8 | self.dataset_path = dataset_path 9 | 10 | def draw_bbox_height_histogram(self): 11 | 12 | test_mat = "0791_c6s3_092792_00_good.mat" 13 | 14 | test_mat_path = os.path.join(self.dataset_path,test_mat) 15 | 16 | print(sio.loadmat(test_mat_path)) 17 | pass 18 | 19 | 20 | if __name__ == "__main__": 21 | 22 | rds = Reid_dataset_statistics(dataset_path="/net/merkur/storage/deeplearning/datasets/reid/market-1501/gt_query") 23 | 24 | rds.draw_bbox_height_histogram() 25 | pass -------------------------------------------------------------------------------- /utilities/sort_mot_eval_columns.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | 5 | 6 | def sort_mot_eval_columns(in_path_or_dataframe,out_path): 7 | 8 | 9 | if isinstance(in_path_or_dataframe,str): 10 | in_path_or_dataframe = pd.read_csv(in_path_or_dataframe) 11 | 12 | column_names_sorted = ["Method", 'IDF1', 'IDP', 'IDR', 'Rcll', 'Prcn' 13 | , 'GT', 'MT', 'PT', 'ML' 14 | , 'FP', 'FN', 'IDs', 'FM', 'MOTA', 'MOTP'] 15 | 16 | in_path_or_dataframe = in_path_or_dataframe[column_names_sorted] 17 | 18 | in_path_or_dataframe.to_csv(out_path) 19 | 20 | 21 | 22 | 23 | 24 | 25 | if __name__ == "__main__": 26 | sort_mot_eval_columns( 27 | in_path_or_dataframe="/media/philipp/philippkoehl_ssd/work_dirs/evaluation_results_mta/MTA_FRCNN.csv" 28 | , out_path="/media/philipp/philippkoehl_ssd/work_dirs/evaluation_results_mta/MTA_FRCNN_sorted.csv") -------------------------------------------------------------------------------- /utilities/track_result_statistics.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import pandas as pd 5 | import sys 6 | 7 | def count_tracks(track_results_path): 8 | 9 | if isinstance(track_results_path,str): 10 | track_results = pd.read_csv(track_results_path) 11 | 12 | 13 | if len(track_results) > 0: 14 | track_results = track_results.groupby("person_id",as_index=False).mean() 15 | 16 | number_of_tracks = len(track_results) 17 | 18 | result = "Track results from: {} \n" \ 19 | "Number of tracks: {}".format(track_results_path,number_of_tracks) 20 | 21 | return result 22 | 23 | 24 | def print_track_lengths(track_results_path): 25 | 26 | if isinstance(track_results_path,str): 27 | track_results = pd.read_csv(track_results_path) 28 | 29 | 30 | 31 | track_results = track_results.groupby("person_id",as_index=False).count() 32 | 33 | for index, row in track_results.iterrows(): 34 | 35 | print("person_id: {} track_length: {}".format(row[0],row[1])) 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | if __name__ == "__main__": 44 | result = print_track_lengths("/home/philipp/work_dirs/clustering/single_camera_refinement/track_results_2.txt") 45 | 46 | print(result) --------------------------------------------------------------------------------