├── INSTALL_pytracking.md ├── LICENSE ├── README.md ├── README_pytracking.md ├── install.sh ├── install_pytracking.sh ├── ltr ├── README.md ├── __init__.py ├── __pycache__ │ └── __init__.cpython-37.pyc ├── actors │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── base_actor.cpython-37.pyc │ │ ├── bbreg.cpython-37.pyc │ │ └── siam_sel.cpython-37.pyc │ ├── base_actor.py │ ├── bbreg.py │ ├── siam_sel.py │ └── siam_sel2.py ├── admin │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── environment.cpython-37.pyc │ │ ├── loading.cpython-37.pyc │ │ ├── local.cpython-37.pyc │ │ ├── model_constructor.cpython-37.pyc │ │ ├── settings.cpython-37.pyc │ │ ├── stats.cpython-37.pyc │ │ └── tensorboard.cpython-37.pyc │ ├── environment.py │ ├── loading.py │ ├── local.py │ ├── model_constructor.py │ ├── settings.py │ ├── stats.py │ └── tensorboard.py ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── image_loader.cpython-37.pyc │ │ ├── loader.cpython-37.pyc │ │ ├── processing.cpython-37.pyc │ │ ├── processing_utils.cpython-37.pyc │ │ ├── sampler.cpython-37.pyc │ │ └── transforms.cpython-37.pyc │ ├── image_loader.py │ ├── loader.py │ ├── processing.py │ ├── processing_utils.py │ ├── sampler.py │ └── transforms.py ├── data_specs │ ├── got10k_train_split.txt │ ├── got10k_val_split.txt │ └── lasot_train_split.txt ├── dataset │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── base_dataset.cpython-37.pyc │ │ ├── coco_seq.cpython-37.pyc │ │ ├── got10k.cpython-37.pyc │ │ ├── imagenetvid.cpython-37.pyc │ │ ├── lasot.cpython-37.pyc │ │ └── tracking_net.cpython-37.pyc │ ├── base_dataset.py │ ├── coco_seq.py │ ├── got10k.py │ ├── imagenetvid.py │ ├── lasot.py │ └── tracking_net.py ├── external │ └── PreciseRoIPooling │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── _assets │ │ └── prroi_visualization.png │ │ ├── pytorch │ │ ├── prroi_pool │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── functional.py │ │ │ ├── prroi_pool.py │ │ │ └── src │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ └── tests │ │ │ └── test_prroi_pooling2d.py │ │ └── src │ │ ├── prroi_pooling_gpu_impl.cu │ │ └── prroi_pooling_gpu_impl.cuh ├── models │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-37.pyc │ ├── backbone │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── resnet.cpython-37.pyc │ │ │ └── resnet18_vggm.cpython-37.pyc │ │ ├── resnet.py │ │ └── resnet18_vggm.py │ ├── bbreg │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── atom.cpython-37.pyc │ │ │ └── atom_iou_net.cpython-37.pyc │ │ ├── atom.py │ │ └── atom_iou_net.py │ ├── layers │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── blocks.cpython-37.pyc │ │ ├── batch_norm.py │ │ └── blocks.py │ └── siam_sel │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── siam_sel.cpython-37.pyc │ │ ├── siam_sel.py │ │ ├── siam_sel1_3.py │ │ ├── siam_sel1_4.py │ │ └── siam_sel2.py ├── run_training.py ├── run_training2.py ├── train_settings │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-37.pyc │ ├── bbreg │ │ ├── __init__.py │ │ └── atom_default.py │ └── siam_sel │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── bounding_box.cpython-37.pyc │ │ └── default3_1.cpython-37.pyc │ │ ├── bounding_box.py │ │ ├── default.py │ │ ├── default1_1.py │ │ ├── default1_2.py │ │ ├── default1_3.py │ │ ├── default1_4.py │ │ ├── default2.py │ │ └── default3_1.py └── trainers │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── base_trainer.cpython-37.pyc │ └── ltr_trainer.cpython-37.pyc │ ├── base_trainer.py │ └── ltr_trainer.py ├── py37.yaml ├── pytracking ├── README.md ├── __init__.py ├── evaluation │ ├── __init__.py │ ├── data.py │ ├── environment.py │ ├── got10kdataset.py │ ├── lasotdataset.py │ ├── local.py │ ├── nfsdataset.py │ ├── otbdataset.py │ ├── running.py │ ├── tpldataset.py │ ├── tracker.py │ ├── trackingnetdataset.py │ ├── uavdataset.py │ └── votdataset.py ├── experiments │ ├── __init__.py │ └── myexperiments.py ├── features │ ├── __init__.py │ ├── augmentation.py │ ├── color.py │ ├── deep.py │ ├── extractor.py │ ├── featurebase.py │ ├── preprocessing.py │ └── util.py ├── libs │ ├── __init__.py │ ├── complex.py │ ├── dcf.py │ ├── fourier.py │ ├── operation.py │ ├── optimization.py │ ├── tensordict.py │ └── tensorlist.py ├── mmdetection │ ├── .gitignore │ ├── .style.yapf │ ├── .travis.yml │ ├── CONTRIBUTING.md │ ├── GETTING_STARTED.md │ ├── INSTALL.md │ ├── LICENSE │ ├── MODEL_ZOO.md │ ├── README.md │ ├── TECHNICAL_DETAILS.md │ ├── compile.sh │ ├── configs │ │ ├── cascade_mask_rcnn_r101_fpn_1x.py │ │ ├── cascade_mask_rcnn_r50_caffe_c4_1x.py │ │ ├── cascade_mask_rcnn_r50_fpn_1x.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x.py │ │ ├── cascade_rcnn_r101_fpn_1x.py │ │ ├── cascade_rcnn_r50_caffe_c4_1x.py │ │ ├── cascade_rcnn_r50_fpn_1x.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x.py │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x.py │ │ ├── dcn │ │ │ ├── README.md │ │ │ ├── cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py │ │ │ ├── faster_rcnn_dpool_r50_fpn_1x.py │ │ │ ├── faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py │ │ │ ├── faster_rcnn_mdpool_r50_fpn_1x.py │ │ │ └── mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ │ ├── fast_mask_rcnn_r101_fpn_1x.py │ │ ├── fast_mask_rcnn_r50_caffe_c4_1x.py │ │ ├── fast_mask_rcnn_r50_fpn_1x.py │ │ ├── fast_rcnn_r101_fpn_1x.py │ │ ├── fast_rcnn_r50_caffe_c4_1x.py │ │ ├── fast_rcnn_r50_fpn_1x.py │ │ ├── faster_rcnn_ohem_r50_fpn_1x.py │ │ ├── faster_rcnn_r101_fpn_1x.py │ │ ├── faster_rcnn_r50_caffe_c4_1x.py │ │ ├── faster_rcnn_r50_fpn_1x.py │ │ ├── faster_rcnn_x101_32x4d_fpn_1x.py │ │ ├── faster_rcnn_x101_64x4d_fpn_1x.py │ │ ├── fcos │ │ │ ├── README.md │ │ │ ├── fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py │ │ │ ├── fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py │ │ │ └── fcos_r50_caffe_fpn_gn_1x_4gpu.py │ │ ├── gn+ws │ │ │ ├── README.md │ │ │ ├── faster_rcnn_r50_fpn_gn_ws_1x.py │ │ │ ├── mask_rcnn_r50_fpn_gn_ws_20_23_24e.py │ │ │ ├── mask_rcnn_r50_fpn_gn_ws_2x.py │ │ │ └── mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py │ │ ├── gn │ │ │ ├── README.md │ │ │ ├── mask_rcnn_r101_fpn_gn_2x.py │ │ │ ├── mask_rcnn_r50_fpn_gn_2x.py │ │ │ └── mask_rcnn_r50_fpn_gn_contrib_2x.py │ │ ├── guided_anchoring │ │ │ ├── README.md │ │ │ ├── ga_fast_r50_caffe_fpn_1x.py │ │ │ ├── ga_faster_r50_caffe_fpn_1x.py │ │ │ ├── ga_faster_x101_32x4d_fpn_1x.py │ │ │ ├── ga_retinanet_r50_caffe_fpn_1x.py │ │ │ ├── ga_retinanet_x101_32x4d_fpn_1x.py │ │ │ ├── ga_rpn_r101_caffe_rpn_1x.py │ │ │ ├── ga_rpn_r50_caffe_fpn_1x.py │ │ │ └── ga_rpn_x101_32x4d_fpn_1x.py │ │ ├── hrnet │ │ │ ├── README.md │ │ │ ├── cascade_rcnn_hrnetv2p_w32_20e.py │ │ │ ├── faster_rcnn_hrnetv2p_w18_1x.py │ │ │ ├── faster_rcnn_hrnetv2p_w32_1x.py │ │ │ ├── faster_rcnn_hrnetv2p_w40_1x.py │ │ │ ├── mask_rcnn_hrnetv2p_w18_1x.py │ │ │ └── mask_rcnn_hrnetv2p_w32_1x.py │ │ ├── htc │ │ │ ├── README.md │ │ │ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e (copy).py │ │ │ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py │ │ │ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_train.py │ │ │ ├── htc_r101_fpn_20e.py │ │ │ ├── htc_r50_fpn_1x.py │ │ │ ├── htc_r50_fpn_20e.py │ │ │ ├── htc_without_semantic_r50_fpn_1x.py │ │ │ ├── htc_x101_32x4d_fpn_20e_16gpu.py │ │ │ └── htc_x101_64x4d_fpn_20e_16gpu.py │ │ ├── mask_rcnn_r101_fpn_1x.py │ │ ├── mask_rcnn_r50_caffe_c4_1x.py │ │ ├── mask_rcnn_r50_fpn_1x.py │ │ ├── mask_rcnn_x101_32x4d_fpn_1x.py │ │ ├── mask_rcnn_x101_64x4d_fpn_1x.py │ │ ├── pascal_voc │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ │ ├── ssd300_voc.py │ │ │ └── ssd512_voc.py │ │ ├── retinanet_r101_fpn_1x.py │ │ ├── retinanet_r50_fpn_1x.py │ │ ├── retinanet_x101_32x4d_fpn_1x.py │ │ ├── retinanet_x101_64x4d_fpn_1x.py │ │ ├── rpn_r101_fpn_1x.py │ │ ├── rpn_r50_caffe_c4_1x.py │ │ ├── rpn_r50_fpn_1x.py │ │ ├── rpn_x101_32x4d_fpn_1x.py │ │ ├── rpn_x101_64x4d_fpn_1x.py │ │ ├── scratch │ │ │ ├── README.md │ │ │ ├── scratch_faster_rcnn_r50_fpn_gn_6x.py │ │ │ └── scratch_mask_rcnn_r50_fpn_gn_6x.py │ │ ├── ssd300_coco.py │ │ └── ssd512_coco.py │ ├── demo │ │ └── coco_test_12510.jpg │ ├── mmdet │ │ ├── __init__.py │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── env.py │ │ │ ├── inference.py │ │ │ └── train.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── anchor │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_generator.py │ │ │ │ ├── anchor_target.py │ │ │ │ └── guided_anchor_target.py │ │ │ ├── bbox │ │ │ │ ├── __init__.py │ │ │ │ ├── assign_sampling.py │ │ │ │ ├── assigners │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ │ ├── assign_result.py │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ └── max_iou_assigner.py │ │ │ │ ├── bbox_target.py │ │ │ │ ├── geometry.py │ │ │ │ ├── samplers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ └── sampling_result.py │ │ │ │ └── transforms.py │ │ │ ├── evaluation │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_overlaps.py │ │ │ │ ├── class_names.py │ │ │ │ ├── coco_utils.py │ │ │ │ ├── eval_hooks.py │ │ │ │ ├── mean_ap.py │ │ │ │ └── recall.py │ │ │ ├── loss │ │ │ │ ├── __init__.py │ │ │ │ └── losses.py │ │ │ ├── mask │ │ │ │ ├── __init__.py │ │ │ │ ├── mask_target.py │ │ │ │ └── utils.py │ │ │ ├── post_processing │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_nms.py │ │ │ │ └── merge_augs.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── dist_utils.py │ │ │ │ └── misc.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── coco.py │ │ │ ├── concat_dataset.py │ │ │ ├── custom.py │ │ │ ├── extra_aug.py │ │ │ ├── loader │ │ │ │ ├── __init__.py │ │ │ │ ├── build_loader.py │ │ │ │ └── sampler.py │ │ │ ├── repeat_dataset.py │ │ │ ├── transforms.py │ │ │ ├── utils.py │ │ │ ├── voc.py │ │ │ └── xml_style.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── anchor_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_head.py │ │ │ │ ├── fcos_head.py │ │ │ │ ├── ga_retina_head.py │ │ │ │ ├── ga_rpn_head.py │ │ │ │ ├── guided_anchor_head.py │ │ │ │ ├── retina_head.py │ │ │ │ ├── rpn_head.py │ │ │ │ └── ssd_head.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── hrnet.py │ │ │ │ ├── resnet.py │ │ │ │ ├── resnext.py │ │ │ │ └── ssd_vgg.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ └── convfc_bbox_head.py │ │ │ ├── builder.py │ │ │ ├── detectors │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── cascade_rcnn.py │ │ │ │ ├── fast_rcnn.py │ │ │ │ ├── faster_rcnn.py │ │ │ │ ├── fcos.py │ │ │ │ ├── htc.py │ │ │ │ ├── mask_rcnn.py │ │ │ │ ├── retinanet.py │ │ │ │ ├── rpn.py │ │ │ │ ├── single_stage.py │ │ │ │ ├── test_mixins.py │ │ │ │ └── two_stage.py │ │ │ ├── losses │ │ │ │ ├── __init__.py │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ ├── focal_loss.py │ │ │ │ ├── iou_loss.py │ │ │ │ └── smooth_l1_loss.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── fcn_mask_head.py │ │ │ │ ├── fused_semantic_head.py │ │ │ │ └── htc_mask_head.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ ├── fpn.py │ │ │ │ └── hrfpn.py │ │ │ ├── registry.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ └── single_level.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── res_layer.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_module.py │ │ │ │ ├── conv_ws.py │ │ │ │ ├── norm.py │ │ │ │ ├── scale.py │ │ │ │ └── weight_init.py │ │ └── ops │ │ │ ├── __init__.py │ │ │ ├── dcn │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── deform_conv.py │ │ │ │ └── deform_pool.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── deform_conv.py │ │ │ │ └── deform_pool.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── deform_conv_cuda.cpp │ │ │ │ ├── deform_conv_cuda_kernel.cu │ │ │ │ ├── deform_pool_cuda.cpp │ │ │ │ └── deform_pool_cuda_kernel.cu │ │ │ ├── masked_conv │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ └── masked_conv.py │ │ │ ├── modules │ │ │ │ ├── __Init__.py │ │ │ │ └── masked_conv.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── masked_conv2d_cuda.cpp │ │ │ │ └── masked_conv2d_kernel.cu │ │ │ ├── nms │ │ │ ├── __init__.py │ │ │ ├── nms_wrapper.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── nms_cpu.cpp │ │ │ │ ├── nms_cuda.cpp │ │ │ │ ├── nms_kernel.cu │ │ │ │ └── soft_nms_cpu.pyx │ │ │ ├── roi_align │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ └── roi_align.py │ │ │ ├── gradcheck.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ └── roi_align.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── roi_align_cuda.cpp │ │ │ │ └── roi_align_kernel.cu │ │ │ ├── roi_pool │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ └── roi_pool.py │ │ │ ├── gradcheck.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ └── roi_pool.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── roi_pool_cuda.cpp │ │ │ │ └── roi_pool_kernel.cu │ │ │ └── sigmoid_focal_loss │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ ├── __init__.py │ │ │ └── sigmoid_focal_loss.py │ │ │ ├── modules │ │ │ ├── __init__.py │ │ │ └── sigmoid_focal_loss.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ ├── sigmoid_focal_loss.cpp │ │ │ └── sigmoid_focal_loss_cuda.cu │ ├── setup.py │ └── tools │ │ ├── analyze_logs.py │ │ ├── coco_eval.py │ │ ├── convert_datasets │ │ └── pascal_voc.py │ │ ├── detectron2pytorch.py │ │ ├── dist_test.sh │ │ ├── dist_train.sh │ │ ├── publish_model.py │ │ ├── slurm_test.sh │ │ ├── slurm_train.sh │ │ ├── test.py │ │ ├── train.py │ │ ├── upgrade_model_version.py │ │ ├── voc_eval.py │ │ └── work_dirs │ │ └── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e │ │ └── 20190609_193702.log.json ├── parameter │ ├── ATCAIS │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── default.cpython-37.pyc │ │ ├── default.py │ │ └── default_vot.py │ ├── ATCAIS_cpu │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── default.cpython-37.pyc │ │ ├── default.py │ │ └── default_vot.py │ ├── __init__.py │ ├── atom │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── default.cpython-37.pyc │ │ ├── default.py │ │ └── default_vot.py │ └── eco │ │ ├── __init__.py │ │ └── default.py ├── run_experiment.py ├── run_tracker.py ├── run_tracker2.py ├── run_video.py ├── run_webcam.py ├── tracker │ ├── ATCAIS │ │ ├── __init__.py │ │ ├── atom.py │ │ ├── bounding_box.py │ │ ├── det.py │ │ ├── det_mmdet.py │ │ └── optim.py │ ├── ATCAIS_cpu │ │ ├── __init__.py │ │ ├── atom.py │ │ ├── bounding_box.py │ │ ├── det.py │ │ ├── det_mmdet.py │ │ └── optim.py │ ├── __init__.py │ ├── atom │ │ ├── __init__.py │ │ ├── atom.py │ │ └── optim.py │ ├── base │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── basetracker.cpython-37.pyc │ │ └── basetracker.py │ └── eco │ │ ├── __init__.py │ │ ├── eco.py │ │ └── optim.py ├── utils │ ├── __init__.py │ ├── atom_overview.png │ ├── gdrive_download │ ├── params.py │ ├── plotting.py │ └── stat_depth.py └── vot │ ├── vot.py │ ├── vot_ATCAIS.py │ └── vot_ATCAIS_cpu.py ├── tracker_ATCAIS.m ├── tracker_ATCAIS_cpu.m └── vot_rgbd2019_result └── ATCAIS.zip /README_pytracking.md: -------------------------------------------------------------------------------- 1 | # PyTracking 2 | A general python framework for training and running visual object trackers, based on **PyTorch**. 3 | 4 | **News:** Upgraded to latest version of PyTorch (v1.x). 5 | 6 | ## Highlights 7 | 8 | ### [ATOM](https://arxiv.org/pdf/1811.07628.pdf) 9 | 10 | Official implementation of the **ATOM** tracker (CVPR 2019), including complete **training code** and trained models. 11 | 12 | ### [Tracking Libraries](pytracking) 13 | 14 | Libraries for implementing and evaluating visual trackers. Including: 15 | 16 | * All common tracking datasets. 17 | * General building blocks, including **optimization**, **feature extraction** and utilities for **correlation filter** tracking. 18 | 19 | ### [Training Code](ltr) 20 | 21 | General framework for training networks for visual tracking. 22 | 23 | * All common training datasets for visual tracking. 24 | * Functions for data sampling, processing etc. 25 | * Integration of ATOM models 26 | * More to come ... ;) 27 | 28 | 29 | ## Installation 30 | 31 | #### Clone the GIT repository. 32 | ```bash 33 | git clone https://github.com/visionml/pytracking.git 34 | ``` 35 | 36 | #### Clone the submodules. 37 | In the repository directory, run the commands: 38 | ```bash 39 | git submodule update --init 40 | ``` 41 | #### Install dependencies 42 | Run the installation script to install all the dependencies. You need to provide the conda install path (e.g. ~/anaconda3) and the name for the created conda environment (here ```pytracking```). 43 | ```bash 44 | bash install.sh conda_install_path pytracking 45 | ``` 46 | This script will also download the default networks and set-up the environment. 47 | 48 | **Note:** The install script has been tested on an Ubuntu 18.04 system. In case of issues, check the [detailed installation instructions](INSTALL.md). 49 | 50 | 51 | #### Let's test it! 52 | Activate the conda environment and run the script pytracking/run_webcam.py to run ATOM using the webcam input. 53 | ```bash 54 | conda activate pytracking 55 | cd pytracking 56 | python run_webcam.py atom default 57 | ``` 58 | 59 | ## What's next? 60 | 61 | #### [pytracking](pytracking) - for implementing your tracker 62 | 63 | #### [ltr](ltr) - for training your tracker 64 | 65 | ## Contributors 66 | 67 | * [Martin Danelljan](https://martin-danelljan.github.io/) 68 | * [Goutam Bhat](https://www.vision.ee.ethz.ch/en/members/detail/407/) 69 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | conda env create -n py37 -f py37.yaml 4 | conda activate py37 5 | pip install cython 6 | pip install tqdm 7 | 8 | cd pytracking/mmdetection 9 | sh ./compile.sh 10 | python setup.py develop 11 | cd ../.. 12 | 13 | 14 | 15 | 16 | echo "" 17 | echo "" 18 | echo "****************** Installing jpeg4py ******************" 19 | while true; do 20 | read -p "Install jpeg4py for reading images? This step required sudo privilege. Installing jpeg4py is optional, however recommended. [y,n] " install_flag 21 | case $install_flag in 22 | [Yy]* ) sudo apt-get install libturbojpeg; break;; 23 | [Nn]* ) echo "Skipping jpeg4py installation!"; break;; 24 | * ) echo "Please answer y or n ";; 25 | esac 26 | done 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /ltr/__init__.py: -------------------------------------------------------------------------------- 1 | from .admin.loading import load_network 2 | from .admin.model_constructor import model_constructor -------------------------------------------------------------------------------- /ltr/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/actors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_actor import BaseActor 2 | from .bbreg import AtomActor 3 | from .siam_sel import SiamSelActor 4 | -------------------------------------------------------------------------------- /ltr/actors/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/actors/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/actors/__pycache__/base_actor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/actors/__pycache__/base_actor.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/actors/__pycache__/bbreg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/actors/__pycache__/bbreg.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/actors/__pycache__/siam_sel.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/actors/__pycache__/siam_sel.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/actors/base_actor.py: -------------------------------------------------------------------------------- 1 | from pytracking import TensorDict 2 | 3 | 4 | class BaseActor: 5 | """ Base class for actor. The actor class handles the passing of the data through the network 6 | and calculation the loss""" 7 | def __init__(self, net, objective): 8 | """ 9 | args: 10 | net - The network to train 11 | objective - The loss function 12 | """ 13 | self.net = net 14 | self.objective = objective 15 | 16 | def __call__(self, data: TensorDict): 17 | """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and 18 | return the training stats for the input data 19 | args: 20 | data - A TensorDict containing all the necessary data blocks. 21 | 22 | returns: 23 | loss - loss for the input data 24 | stats - a dict containing detailed losses 25 | """ 26 | raise NotImplementedError 27 | 28 | def to(self, device): 29 | """ Move the network to device 30 | args: 31 | device - device to use. 'cpu' or 'cuda' 32 | """ 33 | self.net.to(device) 34 | 35 | def train(self, mode=True): 36 | """ Set whether the network is in train mode. 37 | args: 38 | mode (True) - Bool specifying whether in training mode. 39 | """ 40 | self.net.train(mode) 41 | 42 | def eval(self): 43 | """ Set network to eval mode""" 44 | self.train(False) -------------------------------------------------------------------------------- /ltr/actors/bbreg.py: -------------------------------------------------------------------------------- 1 | from . import BaseActor 2 | 3 | 4 | class AtomActor(BaseActor): 5 | """ Actor for training the IoU-Net in ATOM""" 6 | def __call__(self, data): 7 | """ 8 | args: 9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 10 | 'test_proposals' and 'proposal_iou'. 11 | 12 | returns: 13 | loss - the training loss 14 | states - dict containing detailed losses 15 | """ 16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals' 17 | iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) 18 | 19 | iou_pred = iou_pred.view(-1, iou_pred.shape[2]) 20 | iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2]) 21 | 22 | # Compute loss 23 | loss = self.objective(iou_pred, iou_gt) 24 | 25 | # Return training stats 26 | stats = {'Loss/total': loss.item(), 27 | 'Loss/iou': loss.item()} 28 | 29 | return loss, stats -------------------------------------------------------------------------------- /ltr/actors/siam_sel.py: -------------------------------------------------------------------------------- 1 | from . import BaseActor 2 | 3 | 4 | class SiamSelActor(BaseActor): 5 | """ Actor for training the IoU-Net in ATOM""" 6 | def __call__(self, data): 7 | """ 8 | args: 9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 10 | 'test_proposals' and 'proposal_iou'. 11 | 12 | returns: 13 | loss - the training loss 14 | states - dict containing detailed losses 15 | """ 16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals' 17 | predict_scores1,labels1,predict_scores2,labels2 = self.net(**data) 18 | 19 | 20 | # # Compute loss 21 | loss1 = self.objective(predict_scores1.reshape(-1), labels1.reshape(-1)) 22 | loss2 = self.objective(predict_scores2.reshape(-1), labels2.reshape(-1)) 23 | loss=loss1+loss2 24 | 25 | stats={'loss':loss} 26 | 27 | return loss, stats -------------------------------------------------------------------------------- /ltr/actors/siam_sel2.py: -------------------------------------------------------------------------------- 1 | from . import BaseActor 2 | import torch 3 | from collections import OrderedDict 4 | class SiamSelActor(BaseActor): 5 | """ Actor for training the IoU-Net in ATOM""" 6 | def __call__(self, data): 7 | """ 8 | args: 9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 10 | 'test_proposals' and 'proposal_iou'. 11 | 12 | returns: 13 | loss - the training loss 14 | states - dict containing detailed losses 15 | """ 16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals' 17 | predict_scores1,labels1,predict_scores2,labels2,loss_det1,loss_det2 = self.net(**data) 18 | 19 | 20 | # # Compute loss 21 | loss1 = self.objective(predict_scores1.reshape(-1), labels1.reshape(-1)) 22 | loss2 = self.objective(predict_scores2.reshape(-1), labels2.reshape(-1)) 23 | 24 | loss_cls1,_=self.parse_losses(loss_det1) 25 | loss_cls2,_=self.parse_losses(loss_det2) 26 | # loss_det=0 27 | # for key in loss_det1: 28 | # if key.find("loss")>=0 and key.find("cls")>=0: 29 | # if isinstance(loss_det1[key],torch.Tensor): 30 | # loss_det+=loss_det1[key] 31 | # else: 32 | # for value in loss_det1[key]: 33 | # loss_det+=value 34 | # 35 | # for key in loss_det2: 36 | # if key.find("loss")>=0 and key.find("cls")>=0: 37 | # if isinstance(loss_det2[key],torch.Tensor): 38 | # loss_det+=loss_det2[key] 39 | # else: 40 | # for value in loss_det2[key]: 41 | # loss_det+=value 42 | 43 | loss=0.1*loss_cls1+0.1*loss_cls2+loss1+loss2 44 | 45 | stats={'loss_sum':loss.item(),'loss_cls':0.1*loss_cls1.item()+0.1*loss_cls2.item(),'loss_sel':loss1.item()+loss2.item()} 46 | 47 | return loss, stats 48 | 49 | def parse_losses(self,losses): 50 | log_vars = OrderedDict() 51 | for loss_name, loss_value in losses.items(): 52 | if isinstance(loss_value, torch.Tensor): 53 | log_vars[loss_name] = loss_value.mean() 54 | elif isinstance(loss_value, list): 55 | log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) 56 | else: 57 | raise TypeError( 58 | '{} is not a tensor or list of tensors'.format(loss_name)) 59 | 60 | loss = sum(_value for _key, _value in log_vars.items() if 'cls' in _key) 61 | 62 | log_vars['loss'] = loss 63 | for name in log_vars: 64 | log_vars[name] = log_vars[name].item() 65 | 66 | return loss, log_vars -------------------------------------------------------------------------------- /ltr/admin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__init__.py -------------------------------------------------------------------------------- /ltr/admin/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/environment.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/environment.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/loading.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/loading.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/local.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/local.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/model_constructor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/model_constructor.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/settings.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/settings.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/stats.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/stats.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/__pycache__/tensorboard.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/admin/__pycache__/tensorboard.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/admin/environment.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | from collections import OrderedDict 4 | 5 | 6 | def create_default_local_file(): 7 | path = os.path.join(os.path.dirname(__file__), 'local.py') 8 | 9 | empty_str = '\'\'' 10 | default_settings = OrderedDict({ 11 | 'workspace_dir': empty_str, 12 | 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'', 13 | 'lasot_dir': empty_str, 14 | 'got10k_dir': empty_str, 15 | 'trackingnet_dir': empty_str, 16 | 'coco_dir': empty_str, 17 | 'imagenet_dir': empty_str, 18 | 'imagenetdet_dir': empty_str}) 19 | 20 | comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 21 | 'tensorboard_dir': 'Directory for tensorboard files.'} 22 | 23 | with open(path, 'w') as f: 24 | f.write('class EnvironmentSettings:\n') 25 | f.write(' def __init__(self):\n') 26 | 27 | for attr, attr_val in default_settings.items(): 28 | comment_str = None 29 | if attr in comment: 30 | comment_str = comment[attr] 31 | if comment_str is None: 32 | f.write(' self.{} = {}\n'.format(attr, attr_val)) 33 | else: 34 | f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str)) 35 | 36 | 37 | def env_settings(): 38 | env_module_name = 'ltr.admin.local' 39 | try: 40 | env_module = importlib.import_module(env_module_name) 41 | return env_module.EnvironmentSettings() 42 | except: 43 | env_file = os.path.join(os.path.dirname(__file__), 'local.py') 44 | 45 | create_default_local_file() 46 | raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) 47 | -------------------------------------------------------------------------------- /ltr/admin/local.py: -------------------------------------------------------------------------------- 1 | class EnvironmentSettings: 2 | def __init__(self): 3 | self.workspace_dir = './checkpoint' # Base directory for saving network checkpoints. 4 | self.tensorboard_dir = self.workspace_dir + '/tensorboard/' # Directory for tensorboard files. 5 | self.lasot_dir = '' 6 | self.got10k_dir = '' 7 | self.trackingnet_dir = '' 8 | self.coco_dir = '' 9 | self.imagenet_dir = '' 10 | self.imagenetdet_dir = '' 11 | self.lasot_dir = '/media/tangjiuqi097/ext/LaSOTBenchmark' 12 | self.trackingnet_dir = '/media/tangjiuqi097/ext/TrackingNet' 13 | self.coco_dir = '/home/tangjiuqi097/data/coco' 14 | 15 | -------------------------------------------------------------------------------- /ltr/admin/model_constructor.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | import importlib 3 | 4 | 5 | def model_constructor(f): 6 | """ Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned 7 | by 'f'. This field contains an instance of the 'NetConstructor' class, which contains the information needed to 8 | re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can 9 | be easily constructed from a saved checkpoint by calling NetConstructor.get() function. 10 | """ 11 | @wraps(f) 12 | def f_wrapper(*args, **kwds): 13 | net_constr = NetConstructor(f.__name__, f.__module__, args, kwds) 14 | output = f(*args, **kwds) 15 | if isinstance(output, (tuple, list)): 16 | # Assume first argument is the network 17 | output[0].constructor = net_constr 18 | else: 19 | output.constructor = net_constr 20 | return output 21 | return f_wrapper 22 | 23 | 24 | class NetConstructor: 25 | """ Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module 26 | which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network 27 | function. The class object can then be stored along with the network weights to re-construct the network.""" 28 | def __init__(self, fun_name, fun_module, args, kwds): 29 | """ 30 | args: 31 | fun_name - The function which returns the network 32 | fun_module - the module which contains the network function 33 | args - arguments which are passed to the network function 34 | kwds - arguments which are passed to the network function 35 | """ 36 | self.fun_name = fun_name 37 | self.fun_module = fun_module 38 | self.args = args 39 | self.kwds = kwds 40 | 41 | def get(self): 42 | """ Rebuild the network by calling the network function with the correct arguments. """ 43 | net_module = importlib.import_module(self.fun_module) 44 | net_fun = getattr(net_module, self.fun_name) 45 | return net_fun(*self.args, **self.kwds) 46 | -------------------------------------------------------------------------------- /ltr/admin/settings.py: -------------------------------------------------------------------------------- 1 | from ltr.admin.environment import env_settings 2 | 3 | 4 | class Settings: 5 | """ Training settings, e.g. the paths to datasets and networks.""" 6 | def __init__(self): 7 | self.set_default() 8 | 9 | def set_default(self): 10 | self.env = env_settings() 11 | self.use_gpu = True 12 | 13 | 14 | -------------------------------------------------------------------------------- /ltr/admin/stats.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class StatValue: 4 | def __init__(self): 5 | self.clear() 6 | 7 | def reset(self): 8 | self.val = 0 9 | 10 | def clear(self): 11 | self.reset() 12 | self.history = [] 13 | 14 | def update(self, val): 15 | self.val = val 16 | self.history.append(self.val) 17 | 18 | 19 | class AverageMeter(object): 20 | """Computes and stores the average and current value""" 21 | def __init__(self): 22 | self.clear() 23 | self.has_new_data = False 24 | 25 | def reset(self): 26 | self.avg = 0 27 | self.val = 0 28 | self.sum = 0 29 | self.count = 0 30 | 31 | def clear(self): 32 | self.reset() 33 | self.history = [] 34 | 35 | def update(self, val, n=1): 36 | self.val = val 37 | self.sum += val * n 38 | self.count += n 39 | self.avg = self.sum / self.count 40 | 41 | def new_epoch(self): 42 | if self.count > 0: 43 | self.history.append(self.avg) 44 | self.reset() 45 | self.has_new_data = True 46 | else: 47 | self.has_new_data = False 48 | 49 | 50 | def topk_accuracy(output, target, topk=(1,)): 51 | """Computes the precision@k for the specified values of k""" 52 | single_input = not isinstance(topk, (tuple, list)) 53 | if single_input: 54 | topk = (topk,) 55 | 56 | maxk = max(topk) 57 | batch_size = target.size(0) 58 | 59 | _, pred = output.topk(maxk, 1, True, True) 60 | pred = pred.t() 61 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 62 | 63 | res = [] 64 | for k in topk: 65 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0] 66 | res.append(correct_k * 100.0 / batch_size) 67 | 68 | if single_input: 69 | return res[0] 70 | 71 | return res 72 | -------------------------------------------------------------------------------- /ltr/admin/tensorboard.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import OrderedDict 3 | from tensorboardX import SummaryWriter 4 | 5 | 6 | class TensorboardWriter: 7 | def __init__(self, directory, loader_names): 8 | self.directory = directory 9 | self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) 10 | 11 | def write_info(self, module_name, script_name, description): 12 | tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info')) 13 | tb_info_writer.add_text('Modulet_name', module_name) 14 | tb_info_writer.add_text('Script_name', script_name) 15 | tb_info_writer.add_text('Description', description) 16 | tb_info_writer.close() 17 | 18 | def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1): 19 | for loader_name, loader_stats in stats.items(): 20 | if loader_stats is None: 21 | continue 22 | for var_name, val in loader_stats.items(): 23 | if hasattr(val, 'history') and getattr(val, 'has_new_data', True): 24 | self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch) -------------------------------------------------------------------------------- /ltr/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .loader import LTRLoader -------------------------------------------------------------------------------- /ltr/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/image_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/image_loader.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/loader.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/processing.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/processing.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/processing_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/processing_utils.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/sampler.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/__pycache__/transforms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/data/__pycache__/transforms.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/data/image_loader.py: -------------------------------------------------------------------------------- 1 | import jpeg4py 2 | import cv2 as cv 3 | 4 | 5 | def default_image_loader(path): 6 | """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, 7 | but reverts to the opencv_loader if the former is not available.""" 8 | if default_image_loader.use_jpeg4py is None: 9 | # Try using jpeg4py 10 | im = jpeg4py_loader(path) 11 | if im is None: 12 | default_image_loader.use_jpeg4py = False 13 | print('Using opencv_loader instead.') 14 | else: 15 | default_image_loader.use_jpeg4py = True 16 | return im 17 | if default_image_loader.use_jpeg4py: 18 | return jpeg4py_loader(path) 19 | return opencv_loader(path) 20 | 21 | default_image_loader.use_jpeg4py = None 22 | 23 | 24 | def jpeg4py_loader(path): 25 | """ Image reading using jpeg4py (https://github.com/ajkxyz/jpeg4py)""" 26 | try: 27 | return jpeg4py.JPEG(path).decode() 28 | except Exception as e: 29 | print('ERROR: Could not read image "{}"'.format(path)) 30 | print(e) 31 | return None 32 | 33 | 34 | def opencv_loader(path): 35 | """ Read image using opencv's imread function and returns it in rgb format""" 36 | try: 37 | im = cv.imread(path, cv.IMREAD_COLOR) 38 | # convert to rgb and return 39 | return cv.cvtColor(im, cv.COLOR_BGR2RGB) 40 | except Exception as e: 41 | print('ERROR: Could not read image "{}"'.format(path)) 42 | print(e) 43 | return None 44 | -------------------------------------------------------------------------------- /ltr/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | from .lasot import Lasot 2 | from .got10k import Got10k 3 | from .tracking_net import TrackingNet 4 | from .imagenetvid import ImagenetVID 5 | from .coco_seq import MSCOCOSeq 6 | -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/coco_seq.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/coco_seq.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/got10k.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/got10k.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/imagenetvid.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/imagenetvid.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/lasot.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/lasot.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/dataset/__pycache__/tracking_net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/dataset/__pycache__/tracking_net.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | .vim-template* 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # SageMath parsed files 84 | *.sage.py 85 | 86 | # Environments 87 | .env 88 | .venv 89 | env/ 90 | venv/ 91 | ENV/ 92 | env.bak/ 93 | venv.bak/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | .spyproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jiayuan Mao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/_assets/prroi_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/external/PreciseRoIPooling/_assets/prroi_visualization.png -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | /_prroi_pooling 3 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : __init__.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | from .prroi_pool import * 13 | 14 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : functional.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import torch 13 | import torch.autograd as ag 14 | 15 | try: 16 | from os.path import join as pjoin, dirname 17 | from torch.utils.cpp_extension import load as load_extension 18 | root_dir = pjoin(dirname(__file__), 'src') 19 | _prroi_pooling = load_extension( 20 | '_prroi_pooling', 21 | [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], 22 | verbose=True 23 | ) 24 | except ImportError: 25 | raise ImportError('Can not compile Precise RoI Pooling library.') 26 | 27 | __all__ = ['prroi_pool2d'] 28 | 29 | 30 | class PrRoIPool2DFunction(ag.Function): 31 | @staticmethod 32 | def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): 33 | assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ 34 | 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) 35 | 36 | pooled_height = int(pooled_height) 37 | pooled_width = int(pooled_width) 38 | spatial_scale = float(spatial_scale) 39 | 40 | features = features.contiguous() 41 | rois = rois.contiguous() 42 | params = (pooled_height, pooled_width, spatial_scale) 43 | 44 | if features.is_cuda: 45 | output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) 46 | ctx.params = params 47 | # everything here is contiguous. 48 | ctx.save_for_backward(features, rois, output) 49 | else: 50 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') 51 | 52 | return output 53 | 54 | @staticmethod 55 | def backward(ctx, grad_output): 56 | features, rois, output = ctx.saved_tensors 57 | grad_input = grad_coor = None 58 | 59 | if features.requires_grad: 60 | grad_output = grad_output.contiguous() 61 | grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) 62 | if rois.requires_grad: 63 | grad_output = grad_output.contiguous() 64 | grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) 65 | 66 | return grad_input, grad_coor, None, None, None 67 | 68 | 69 | prroi_pool2d = PrRoIPool2DFunction.apply 70 | 71 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : prroi_pool.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import torch.nn as nn 13 | 14 | from .functional import prroi_pool2d 15 | 16 | __all__ = ['PrRoIPool2D'] 17 | 18 | 19 | class PrRoIPool2D(nn.Module): 20 | def __init__(self, pooled_height, pooled_width, spatial_scale): 21 | super().__init__() 22 | 23 | self.pooled_height = int(pooled_height) 24 | self.pooled_width = int(pooled_width) 25 | self.spatial_scale = float(spatial_scale) 26 | 27 | def forward(self, features, rois): 28 | return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) 29 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * File : prroi_pooling_gpu.h 3 | * Author : Jiayuan Mao, Tete Xiao 4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 5 | * Date : 07/13/2018 6 | * 7 | * Distributed under terms of the MIT license. 8 | * Copyright (c) 2017 Megvii Technology Limited. 9 | */ 10 | 11 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); 12 | 13 | int prroi_pooling_backward_cuda( 14 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 15 | int pooled_height, int pooled_width, float spatial_scale 16 | ); 17 | 18 | int prroi_pooling_coor_backward_cuda( 19 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 20 | int pooled_height, int pooled_width, float spatial_scal 21 | ); 22 | 23 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu: -------------------------------------------------------------------------------- 1 | ../../../src/prroi_pooling_gpu_impl.cu -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh: -------------------------------------------------------------------------------- 1 | ../../../src/prroi_pooling_gpu_impl.cuh -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : test_prroi_pooling2d.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 18/02/2018 6 | # 7 | # This file is part of Jacinle. 8 | 9 | import unittest 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | 15 | from jactorch.utils.unittest import TorchTestCase 16 | 17 | from prroi_pool import PrRoIPool2D 18 | 19 | 20 | class TestPrRoIPool2D(TorchTestCase): 21 | def test_forward(self): 22 | pool = PrRoIPool2D(7, 7, spatial_scale=0.5) 23 | features = torch.rand((4, 16, 24, 32)).cuda() 24 | rois = torch.tensor([ 25 | [0, 0, 0, 14, 14], 26 | [1, 14, 14, 28, 28], 27 | ]).float().cuda() 28 | 29 | out = pool(features, rois) 30 | out_gold = F.avg_pool2d(features, kernel_size=2, stride=1) 31 | 32 | self.assertTensorClose(out, torch.stack(( 33 | out_gold[0, :, :7, :7], 34 | out_gold[1, :, 7:14, 7:14], 35 | ), dim=0)) 36 | 37 | def test_backward_shapeonly(self): 38 | pool = PrRoIPool2D(2, 2, spatial_scale=0.5) 39 | 40 | features = torch.rand((4, 2, 24, 32)).cuda() 41 | rois = torch.tensor([ 42 | [0, 0, 0, 4, 4], 43 | [1, 14, 14, 18, 18], 44 | ]).float().cuda() 45 | features.requires_grad = rois.requires_grad = True 46 | out = pool(features, rois) 47 | 48 | loss = out.sum() 49 | loss.backward() 50 | 51 | self.assertTupleEqual(features.size(), features.grad.size()) 52 | self.assertTupleEqual(rois.size(), rois.grad.size()) 53 | 54 | 55 | if __name__ == '__main__': 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh: -------------------------------------------------------------------------------- 1 | /* 2 | * File : prroi_pooling_gpu_impl.cuh 3 | * Author : Tete Xiao, Jiayuan Mao 4 | * Email : jasonhsiao97@gmail.com 5 | * 6 | * Distributed under terms of the MIT license. 7 | * Copyright (c) 2017 Megvii Technology Limited. 8 | */ 9 | 10 | #ifndef PRROI_POOLING_GPU_IMPL_CUH 11 | #define PRROI_POOLING_GPU_IMPL_CUH 12 | 13 | #ifdef __cplusplus 14 | extern "C" { 15 | #endif 16 | 17 | #define F_DEVPTR_IN const float * 18 | #define F_DEVPTR_OUT float * 19 | 20 | void PrRoIPoolingForwardGpu( 21 | cudaStream_t stream, 22 | F_DEVPTR_IN bottom_data, 23 | F_DEVPTR_IN bottom_rois, 24 | F_DEVPTR_OUT top_data, 25 | const int channels_, const int height_, const int width_, 26 | const int pooled_height_, const int pooled_width_, 27 | const float spatial_scale_, 28 | const int top_count); 29 | 30 | void PrRoIPoolingBackwardGpu( 31 | cudaStream_t stream, 32 | F_DEVPTR_IN bottom_data, 33 | F_DEVPTR_IN bottom_rois, 34 | F_DEVPTR_IN top_data, 35 | F_DEVPTR_IN top_diff, 36 | F_DEVPTR_OUT bottom_diff, 37 | const int channels_, const int height_, const int width_, 38 | const int pooled_height_, const int pooled_width_, 39 | const float spatial_scale_, 40 | const int top_count, const int bottom_count); 41 | 42 | void PrRoIPoolingCoorBackwardGpu( 43 | cudaStream_t stream, 44 | F_DEVPTR_IN bottom_data, 45 | F_DEVPTR_IN bottom_rois, 46 | F_DEVPTR_IN top_data, 47 | F_DEVPTR_IN top_diff, 48 | F_DEVPTR_OUT bottom_diff, 49 | const int channels_, const int height_, const int width_, 50 | const int pooled_height_, const int pooled_width_, 51 | const float spatial_scale_, 52 | const int top_count, const int bottom_count); 53 | 54 | #ifdef __cplusplus 55 | } /* !extern "C" */ 56 | #endif 57 | 58 | #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ 59 | 60 | -------------------------------------------------------------------------------- /ltr/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/__init__.py -------------------------------------------------------------------------------- /ltr/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import * 2 | from .resnet18_vggm import * 3 | -------------------------------------------------------------------------------- /ltr/models/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/backbone/__pycache__/resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/backbone/__pycache__/resnet.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/backbone/__pycache__/resnet18_vggm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/backbone/__pycache__/resnet18_vggm.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/bbreg/__init__.py: -------------------------------------------------------------------------------- 1 | from .atom_iou_net import AtomIoUNet 2 | -------------------------------------------------------------------------------- /ltr/models/bbreg/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/bbreg/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/bbreg/__pycache__/atom.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/bbreg/__pycache__/atom.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/bbreg/__pycache__/atom_iou_net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/bbreg/__pycache__/atom_iou_net.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/layers/__init__.py -------------------------------------------------------------------------------- /ltr/models/layers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/layers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/layers/__pycache__/blocks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/layers/__pycache__/blocks.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/layers/batch_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | import torch 3 | from torch import nn 4 | 5 | 6 | class FrozenBatchNorm2d(nn.Module): 7 | """ 8 | BatchNorm2d where the batch statistics and the affine parameters 9 | are fixed 10 | """ 11 | 12 | def __init__(self, n): 13 | super(FrozenBatchNorm2d, self).__init__() 14 | self.register_buffer("weight", torch.ones(n)) 15 | self.register_buffer("bias", torch.zeros(n)) 16 | self.register_buffer("running_mean", torch.zeros(n)) 17 | self.register_buffer("running_var", torch.ones(n)) 18 | 19 | def forward(self, x): 20 | # Cast all fixed parameters to half() if necessary 21 | if x.dtype == torch.float16: 22 | self.weight = self.weight.half() 23 | self.bias = self.bias.half() 24 | self.running_mean = self.running_mean.half() 25 | self.running_var = self.running_var.half() 26 | 27 | scale = self.weight * self.running_var.rsqrt() 28 | bias = self.bias - self.running_mean * scale 29 | scale = scale.reshape(1, -1, 1, 1) 30 | bias = bias.reshape(1, -1, 1, 1) 31 | return x * scale + bias 32 | -------------------------------------------------------------------------------- /ltr/models/layers/blocks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, 5 | batch_norm=True, relu=True): 6 | layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, 7 | padding=padding, dilation=dilation, bias=bias)] 8 | if batch_norm: 9 | layers.append(nn.BatchNorm2d(out_planes)) 10 | if relu: 11 | layers.append(nn.ReLU(inplace=True)) 12 | return nn.Sequential(*layers) 13 | 14 | 15 | class LinearBlock(nn.Module): 16 | def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): 17 | super().__init__() 18 | self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) 19 | self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None 20 | self.relu = nn.ReLU(inplace=True) if relu else None 21 | 22 | def forward(self, x): 23 | x = self.linear(x.view(x.shape[0], -1)) 24 | if self.bn is not None: 25 | x = self.bn(x.view(x.shape[0], x.shape[1], 1, 1)) 26 | if self.relu is not None: 27 | x = self.relu(x) 28 | return x.view(x.shape[0], -1) -------------------------------------------------------------------------------- /ltr/models/siam_sel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/siam_sel/__init__.py -------------------------------------------------------------------------------- /ltr/models/siam_sel/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/siam_sel/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/models/siam_sel/__pycache__/siam_sel.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/models/siam_sel/__pycache__/siam_sel.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/run_training.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ["CUDA_VISIBLE_DEVICES"] = "1" 3 | import sys 4 | import argparse 5 | import importlib 6 | import multiprocessing 7 | import cv2 as cv 8 | import torch.backends.cudnn 9 | 10 | env_path = os.path.join(os.path.dirname(__file__), '..') 11 | if env_path not in sys.path: 12 | sys.path.append(env_path) 13 | 14 | import ltr.admin.settings as ws_settings 15 | 16 | 17 | def run_training(train_module, train_name, cudnn_benchmark=True): 18 | """Run a train scripts in train_settings. 19 | args: 20 | train_module: Name of module in the "train_settings/" folder. 21 | train_name: Name of the train settings file. 22 | cudnn_benchmark: Use cudnn benchmark or not (default is True). 23 | """ 24 | 25 | # This is needed to avoid strange crashes related to opencv 26 | cv.setNumThreads(0) 27 | 28 | torch.backends.cudnn.benchmark = cudnn_benchmark 29 | 30 | print('Training: {} {}'.format(train_module, train_name)) 31 | 32 | settings = ws_settings.Settings() 33 | 34 | if settings.env.workspace_dir == '': 35 | raise Exception('Setup your workspace_dir in "ltr/admin/local.py".') 36 | 37 | settings.module_name = train_module 38 | settings.script_name = train_name 39 | settings.project_path = 'ltr/{}/{}'.format(train_module, train_name) 40 | 41 | expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name)) 42 | expr_func = getattr(expr_module, 'run') 43 | 44 | expr_func(settings) 45 | 46 | 47 | def main(): 48 | parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') 49 | parser.add_argument('--train_module',default='siam_sel', type=str, help='Name of module in the "train_settings/" folder.') 50 | parser.add_argument('--train_name', default='default3_1', type=str, help='Name of the train settings file.') 51 | parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).') 52 | 53 | args = parser.parse_args() 54 | 55 | run_training(args.train_module, args.train_name, args.cudnn_benchmark) 56 | 57 | 58 | if __name__ == '__main__': 59 | multiprocessing.set_start_method('spawn', force=True) 60 | main() 61 | -------------------------------------------------------------------------------- /ltr/run_training2.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ["CUDA_VISIBLE_DEVICES"] = "1" 3 | import sys 4 | import argparse 5 | import importlib 6 | import multiprocessing 7 | import cv2 as cv 8 | import torch.backends.cudnn 9 | 10 | env_path = os.path.join(os.path.dirname(__file__), '..') 11 | if env_path not in sys.path: 12 | sys.path.append(env_path) 13 | 14 | import ltr.admin.settings as ws_settings 15 | 16 | 17 | def run_training(train_module, train_name, cudnn_benchmark=True): 18 | """Run a train scripts in train_settings. 19 | args: 20 | train_module: Name of module in the "train_settings/" folder. 21 | train_name: Name of the train settings file. 22 | cudnn_benchmark: Use cudnn benchmark or not (default is True). 23 | """ 24 | 25 | # This is needed to avoid strange crashes related to opencv 26 | cv.setNumThreads(0) 27 | 28 | torch.backends.cudnn.benchmark = cudnn_benchmark 29 | 30 | print('Training: {} {}'.format(train_module, train_name)) 31 | 32 | settings = ws_settings.Settings() 33 | 34 | if settings.env.workspace_dir == '': 35 | raise Exception('Setup your workspace_dir in "ltr/admin/local.py".') 36 | 37 | settings.module_name = train_module 38 | settings.script_name = train_name 39 | settings.project_path = 'ltr/{}/{}'.format(train_module, train_name) 40 | 41 | expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name)) 42 | expr_func = getattr(expr_module, 'run') 43 | 44 | expr_func(settings) 45 | 46 | 47 | def main(): 48 | parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') 49 | parser.add_argument('--train_module',default='siam_sel', type=str, help='Name of module in the "train_settings/" folder.') 50 | parser.add_argument('--train_name', default='default3_2', type=str, help='Name of the train settings file.') 51 | parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).') 52 | 53 | args = parser.parse_args() 54 | 55 | run_training(args.train_module, args.train_name, args.cudnn_benchmark) 56 | 57 | 58 | if __name__ == '__main__': 59 | multiprocessing.set_start_method('spawn', force=True) 60 | main() 61 | -------------------------------------------------------------------------------- /ltr/train_settings/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/__init__.py -------------------------------------------------------------------------------- /ltr/train_settings/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/train_settings/bbreg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/bbreg/__init__.py -------------------------------------------------------------------------------- /ltr/train_settings/siam_sel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/siam_sel/__init__.py -------------------------------------------------------------------------------- /ltr/train_settings/siam_sel/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/siam_sel/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/train_settings/siam_sel/__pycache__/bounding_box.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/siam_sel/__pycache__/bounding_box.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/train_settings/siam_sel/__pycache__/default3_1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/train_settings/siam_sel/__pycache__/default3_1.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_trainer import BaseTrainer 2 | from .ltr_trainer import LTRTrainer -------------------------------------------------------------------------------- /ltr/trainers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/trainers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/trainers/__pycache__/base_trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/trainers/__pycache__/base_trainer.cpython-37.pyc -------------------------------------------------------------------------------- /ltr/trainers/__pycache__/ltr_trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/ltr/trainers/__pycache__/ltr_trainer.cpython-37.pyc -------------------------------------------------------------------------------- /py37.yaml: -------------------------------------------------------------------------------- 1 | name: py37 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - backcall=0.1.0=py37_0 7 | - blas=1.0=mkl 8 | - ca-certificates=2019.1.23=0 9 | - certifi=2019.3.9=py37_0 10 | - cffi=1.12.3=py37h2e261b9_0 11 | - cudatoolkit=9.0=h13b8566_0 12 | - dbus=1.13.6=h746ee38_0 13 | - decorator=4.4.0=py37_1 14 | - expat=2.2.6=he6710b0_0 15 | - fontconfig=2.13.0=h9420a91_0 16 | - freetype=2.9.1=h8a8886c_1 17 | - glib=2.56.2=hd408876_0 18 | - gst-plugins-base=1.14.0=hbbd80ab_1 19 | - gstreamer=1.14.0=hb453b48_1 20 | - icu=58.2=h9c2bf20_1 21 | - intel-openmp=2019.3=199 22 | - ipython=7.5.0=py37h39e3cac_0 23 | - ipython_genutils=0.2.0=py37_0 24 | - jedi=0.13.3=py37_0 25 | - jpeg=9b=h024ee3a_2 26 | - kiwisolver=1.1.0=py37he6710b0_0 27 | - libedit=3.1.20181209=hc058e9b_0 28 | - libffi=3.2.1=hd88cf55_4 29 | - libgcc-ng=8.2.0=hdf63c60_1 30 | - libgfortran-ng=7.3.0=hdf63c60_0 31 | - libpng=1.6.37=hbc83047_0 32 | - libstdcxx-ng=8.2.0=hdf63c60_1 33 | - libtiff=4.0.10=h2733197_2 34 | - libuuid=1.0.3=h1bed415_2 35 | - libxcb=1.13=h1bed415_1 36 | - libxml2=2.9.9=he19cac6_0 37 | - mkl=2019.3=199 38 | - mkl_fft=1.0.12=py37ha843d7b_0 39 | - mkl_random=1.0.2=py37hd81dba3_0 40 | - ncurses=6.1=he6710b0_1 41 | - numpy=1.16.3=py37h7e9f1db_0 42 | - numpy-base=1.16.3=py37hde5b4d6_0 43 | - olefile=0.46=py37_0 44 | - openssl=1.1.1b=h7b6447c_1 45 | - pandas=0.24.2=py37he6710b0_0 46 | - parso=0.4.0=py_0 47 | - pcre=8.43=he6710b0_0 48 | - pexpect=4.7.0=py37_0 49 | - pickleshare=0.7.5=py37_0 50 | - pillow=6.0.0=py37h34e0f95_0 51 | - pip=19.1.1=py37_0 52 | - prompt_toolkit=2.0.9=py37_0 53 | - ptyprocess=0.6.0=py37_0 54 | - pycparser=2.19=py37_0 55 | - pygments=2.4.0=py_0 56 | - pyparsing=2.4.0=py_0 57 | - pyqt=5.9.2=py37h05f1152_2 58 | - python=3.7.3=h0371630_0 59 | - python-dateutil=2.8.0=py37_0 60 | - pytorch=1.1.0=py3.7_cuda9.0.176_cudnn7.5.1_0 61 | - pytz=2019.1=py_0 62 | - qt=5.9.7=h5867ecd_1 63 | - readline=7.0=h7b6447c_5 64 | - setuptools=41.0.1=py37_0 65 | - sip=4.19.8=py37hf484d3e_0 66 | - six=1.12.0=py37_0 67 | - sqlite=3.28.0=h7b6447c_0 68 | - tk=8.6.8=hbc83047_0 69 | - torchvision=0.3.0=py37_cu9.0.176_1 70 | - tornado=6.0.2=py37h7b6447c_0 71 | - traitlets=4.3.2=py37_0 72 | - wcwidth=0.1.7=py37_0 73 | - wheel=0.33.4=py37_0 74 | - xz=5.2.4=h14c3975_4 75 | - zlib=1.2.11=h7b6447c_3 76 | - zstd=1.3.7=h0b5b093_0 77 | - pip: 78 | - addict==2.2.1 79 | - chardet==3.0.4 80 | - cycler==0.10.0 81 | - cython==0.29.7 82 | - idna==2.8 83 | - jpeg4py==0.1.4 84 | - matplotlib==3.1.0 85 | - mmcv==0.2.8 86 | - ninja==1.9.0 87 | - opencv-python==4.1.0.25 88 | - protobuf==3.7.1 89 | - pycocotools==2.0.0 90 | - pyyaml==5.1 91 | - requests==2.22.0 92 | - tensorboardx==1.7 93 | - tqdm==4.32.1 94 | - urllib3==1.25.3 95 | - yacs==0.1.6 96 | prefix: /home/tangjiuqi097/anaconda3/envs/py37 97 | -------------------------------------------------------------------------------- /pytracking/__init__.py: -------------------------------------------------------------------------------- 1 | from pytracking.libs import TensorList, TensorDict 2 | import pytracking.libs.complex as complex 3 | import pytracking.libs.operation as operation 4 | import pytracking.libs.fourier as fourier 5 | import pytracking.libs.dcf as dcf 6 | import pytracking.libs.optimization as optimization 7 | from pytracking.run_tracker import run_tracker 8 | from pytracking.run_webcam import run_webcam 9 | -------------------------------------------------------------------------------- /pytracking/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .otbdataset import OTBDataset 2 | from .nfsdataset import NFSDataset 3 | from .uavdataset import UAVDataset 4 | from .tpldataset import TPLDataset 5 | from .votdataset import VOTDataset 6 | from .trackingnetdataset import TrackingNetDataset 7 | from .got10kdataset import GOT10KDatasetTest, GOT10KDatasetVal, GOT10KDatasetLTRVal 8 | from .lasotdataset import LaSOTDataset 9 | from .data import Sequence 10 | from .tracker import Tracker 11 | -------------------------------------------------------------------------------- /pytracking/evaluation/data.py: -------------------------------------------------------------------------------- 1 | from pytracking.evaluation.environment import env_settings 2 | 3 | 4 | class BaseDataset: 5 | """Base class for all datasets.""" 6 | def __init__(self): 7 | self.env_settings = env_settings() 8 | 9 | def __len__(self): 10 | """Overload this function in your dataset. This should return number of sequences in the dataset.""" 11 | raise NotImplementedError 12 | 13 | def get_sequence_list(self): 14 | """Overload this in your dataset. Should return the list of sequences in the dataset.""" 15 | raise NotImplementedError 16 | 17 | 18 | class Sequence: 19 | """Class for the sequence in an evaluation.""" 20 | def __init__(self, name, frames, ground_truth_rect, object_class=None): 21 | self.name = name 22 | self.frames = frames 23 | self.ground_truth_rect = ground_truth_rect 24 | self.init_state = list(self.ground_truth_rect[0,:]) 25 | self.object_class = object_class 26 | 27 | 28 | class SequenceList(list): 29 | """List of sequences. Supports the addition operator to concatenate sequence lists.""" 30 | def __getitem__(self, item): 31 | if isinstance(item, str): 32 | for seq in self: 33 | if seq.name == item: 34 | return seq 35 | raise IndexError('Sequence name not in the dataset.') 36 | elif isinstance(item, int): 37 | return super(SequenceList, self).__getitem__(item) 38 | elif isinstance(item, (tuple, list)): 39 | return SequenceList([super(SequenceList, self).__getitem__(i) for i in item]) 40 | else: 41 | return SequenceList(super(SequenceList, self).__getitem__(item)) 42 | 43 | def __add__(self, other): 44 | return SequenceList(super(SequenceList, self).__add__(other)) 45 | 46 | def copy(self): 47 | return SequenceList(super(SequenceList, self).copy()) -------------------------------------------------------------------------------- /pytracking/evaluation/environment.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | 5 | class EnvSettings: 6 | def __init__(self): 7 | pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 8 | 9 | self.results_path = '{}/tracking_results/'.format(pytracking_path) 10 | self.network_path = '{}/networks/'.format(pytracking_path) 11 | self.otb_path = '' 12 | self.nfs_path = '' 13 | self.uav_path = '' 14 | self.tpl_path = '' 15 | self.vot_path = '' 16 | self.got10k_path = '' 17 | self.lasot_path = '' 18 | self.trackingnet_path = '' 19 | 20 | 21 | def create_default_local_file(): 22 | comment = {'results_path': 'Where to store tracking results', 23 | 'network_path': 'Where tracking networks are stored.'} 24 | 25 | path = os.path.join(os.path.dirname(__file__), 'local.py') 26 | with open(path, 'w') as f: 27 | settings = EnvSettings() 28 | 29 | f.write('from pytracking.evaluation.environment import EnvSettings\n\n') 30 | f.write('def local_env_settings():\n') 31 | f.write(' settings = EnvSettings()\n\n') 32 | f.write(' # Set your local paths here.\n\n') 33 | 34 | for attr in dir(settings): 35 | comment_str = None 36 | if attr in comment: 37 | comment_str = comment[attr] 38 | attr_val = getattr(settings, attr) 39 | if not attr.startswith('__') and not callable(attr_val): 40 | if comment_str is None: 41 | f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) 42 | else: 43 | f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) 44 | f.write('\n return settings\n\n') 45 | 46 | 47 | def env_settings(): 48 | env_module_name = 'pytracking.evaluation.local' 49 | try: 50 | env_module = importlib.import_module(env_module_name) 51 | return env_module.local_env_settings() 52 | except: 53 | env_file = os.path.join(os.path.dirname(__file__), 'local.py') 54 | 55 | # Create a default file 56 | create_default_local_file() 57 | raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. ' 58 | 'Then try to run again.'.format(env_file)) 59 | -------------------------------------------------------------------------------- /pytracking/evaluation/local.py: -------------------------------------------------------------------------------- 1 | from pytracking.evaluation.environment import EnvSettings 2 | 3 | def local_env_settings(): 4 | settings = EnvSettings() 5 | 6 | # Set your local paths here. 7 | 8 | settings.got10k_path = '' 9 | settings.lasot_path = '' 10 | settings.network_path = '/home/tangjiuqi097/vot/pytracking/pytracking/networks/' # Where tracking networks are stored. 11 | settings.nfs_path = '' 12 | settings.otb_path = '' 13 | settings.results_path = '/home/tangjiuqi097/vot/pytracking/pytracking/tracking_results/' # Where to store tracking results 14 | settings.tpl_path = '' 15 | settings.trackingnet_path = '' 16 | settings.uav_path = '' 17 | settings.vot_path = '' 18 | settings.otb_path = '/home/tangjiuqi097/data/OTB/data' 19 | settings.vot_path = '/home/tangjiuqi097/data/vot2019/vot_short/sequences' 20 | settings.vot_path = '/home/tangjiuqi097/data/vot2019/VOT2018ST/sequences' 21 | settings.vot_path = '/home/tangjiuqi097/data/vot2019/vot_long/sequences' 22 | settings.vot_path = '/home/tangjiuqi097/data/vot2019/vot_rgbd/sequences' 23 | 24 | return settings 25 | 26 | -------------------------------------------------------------------------------- /pytracking/evaluation/running.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import multiprocessing 3 | import os 4 | from itertools import product 5 | from pytracking.evaluation import Sequence, Tracker 6 | 7 | 8 | def run_sequence(seq: Sequence, tracker: Tracker, debug=False): 9 | """Runs a tracker on a sequence.""" 10 | 11 | base_results_path = '{}/{}'.format(tracker.results_dir, seq.name) 12 | results_path = '{}.txt'.format(base_results_path) 13 | times_path = '{}_time.txt'.format(base_results_path) 14 | 15 | # if os.path.isfile(results_path) and not debug: 16 | # return 17 | 18 | print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name)) 19 | 20 | if debug: 21 | tracked_bb, exec_times = tracker.run(seq, debug=debug) 22 | else: 23 | try: 24 | tracked_bb, exec_times = tracker.run(seq, debug=debug) 25 | except Exception as e: 26 | raise e 27 | print(e) 28 | return 29 | 30 | tracked_bb = np.array(tracked_bb).astype(int) 31 | exec_times = np.array(exec_times).astype(float) 32 | 33 | print('FPS: {}'.format(len(exec_times) / exec_times.sum())) 34 | if not debug: 35 | np.savetxt(results_path, tracked_bb, delimiter='\t', fmt='%d') 36 | np.savetxt(times_path, exec_times, delimiter='\t', fmt='%f') 37 | 38 | 39 | def run_dataset(dataset, trackers, debug=False, threads=0): 40 | """Runs a list of trackers on a dataset. 41 | args: 42 | dataset: List of Sequence instances, forming a dataset. 43 | trackers: List of Tracker instances. 44 | debug: Debug level. 45 | threads: Number of threads to use (default 0). 46 | """ 47 | if threads == 0: 48 | mode = 'sequential' 49 | else: 50 | mode = 'parallel' 51 | 52 | if mode == 'sequential': 53 | for seq in dataset: 54 | for tracker_info in trackers: 55 | run_sequence(seq, tracker_info, debug=debug) 56 | elif mode == 'parallel': 57 | param_list = [(seq, tracker_info, debug) for seq, tracker_info in product(dataset, trackers)] 58 | with multiprocessing.Pool(processes=threads) as pool: 59 | pool.starmap(run_sequence, param_list) 60 | print('Done') 61 | -------------------------------------------------------------------------------- /pytracking/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/experiments/__init__.py -------------------------------------------------------------------------------- /pytracking/experiments/myexperiments.py: -------------------------------------------------------------------------------- 1 | from pytracking.evaluation import Tracker, OTBDataset, NFSDataset, UAVDataset, TPLDataset, VOTDataset, TrackingNetDataset, LaSOTDataset 2 | 3 | 4 | def atom_nfs_uav(): 5 | # Run three runs of ATOM on NFS and UAV datasets 6 | trackers = [Tracker('atom', 'default', i) for i in range(3)] 7 | 8 | dataset = NFSDataset() + UAVDataset() 9 | return trackers, dataset 10 | 11 | 12 | def uav_test(): 13 | # Run ATOM and ECO on the UAV dataset 14 | trackers = [Tracker('atom', 'default', i) for i in range(1)] + \ 15 | [Tracker('eco', 'default', i) for i in range(1)] 16 | 17 | dataset = UAVDataset() 18 | return trackers, dataset 19 | -------------------------------------------------------------------------------- /pytracking/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/features/__init__.py -------------------------------------------------------------------------------- /pytracking/features/color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pytracking.features.featurebase import FeatureBase 3 | 4 | 5 | class RGB(FeatureBase): 6 | """RGB feature normalized to [-0.5, 0.5].""" 7 | def dim(self): 8 | return 3 9 | 10 | def stride(self): 11 | return self.pool_stride 12 | 13 | def extract(self, im: torch.Tensor): 14 | return im/255 - 0.5 15 | 16 | 17 | class Grayscale(FeatureBase): 18 | """Grayscale feature normalized to [-0.5, 0.5].""" 19 | def dim(self): 20 | return 1 21 | 22 | def stride(self): 23 | return self.pool_stride 24 | 25 | def extract(self, im: torch.Tensor): 26 | return torch.mean(im/255 - 0.5, 1, keepdim=True) 27 | -------------------------------------------------------------------------------- /pytracking/features/preprocessing.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | 5 | 6 | def numpy_to_torch(a: np.ndarray): 7 | return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0) 8 | 9 | 10 | def torch_to_numpy(a: torch.Tensor): 11 | return a.squeeze(0).permute(1,2,0).numpy() 12 | 13 | 14 | def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None): 15 | """Sample an image patch. 16 | 17 | args: 18 | im: Image 19 | pos: center position of crop 20 | sample_sz: size to crop 21 | output_sz: size to resize to 22 | """ 23 | 24 | # copy and convert 25 | posl = pos.long().clone() 26 | 27 | # Compute pre-downsampling factor 28 | if output_sz is not None: 29 | resize_factor = torch.min(sample_sz.float() / output_sz.float()).item() 30 | df = int(max(int(resize_factor - 0.1), 1)) 31 | else: 32 | df = int(1) 33 | 34 | sz = sample_sz.float() / df # new size 35 | 36 | # Do downsampling 37 | if df > 1: 38 | os = posl % df # offset 39 | posl = (posl - os) / df # new position 40 | im2 = im[..., os[0].item()::df, os[1].item()::df] # downsample 41 | else: 42 | im2 = im 43 | 44 | # compute size to crop 45 | szl = torch.max(sz.round(), torch.Tensor([2])).long() 46 | 47 | # Extract top and bottom coordinates 48 | tl = posl - (szl - 1)/2 49 | br = posl + szl/2 50 | 51 | # Get image patch 52 | im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3] + 1, -tl[0].item(), br[0].item() - im2.shape[2] + 1), 'replicate') 53 | 54 | if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]): 55 | return im_patch 56 | 57 | # Resample 58 | im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear') 59 | 60 | return im_patch 61 | -------------------------------------------------------------------------------- /pytracking/features/util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pytracking.features.featurebase import FeatureBase 3 | 4 | 5 | class Concatenate(FeatureBase): 6 | """A feature that concatenates other features. 7 | args: 8 | features: List of features to concatenate. 9 | """ 10 | def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): 11 | super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) 12 | self.features = features 13 | 14 | self.input_stride = self.features[0].stride() 15 | 16 | for feat in self.features: 17 | if self.input_stride != feat.stride(): 18 | raise ValueError('Strides for the features must be the same for a bultiresolution feature.') 19 | 20 | def dim(self): 21 | return sum([f.dim() for f in self.features]) 22 | 23 | def stride(self): 24 | return self.pool_stride * self.input_stride 25 | 26 | def extract(self, im: torch.Tensor): 27 | return torch.cat([f.get_feature(im) for f in self.features], 1) 28 | -------------------------------------------------------------------------------- /pytracking/libs/__init__.py: -------------------------------------------------------------------------------- 1 | from .tensorlist import TensorList 2 | from .tensordict import TensorDict -------------------------------------------------------------------------------- /pytracking/libs/operation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from pytracking.libs.tensorlist import tensor_operation, TensorList 4 | 5 | 6 | @tensor_operation 7 | def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None): 8 | """Standard conv2d. Returns the input if weight=None.""" 9 | 10 | if weight is None: 11 | return input 12 | 13 | ind = None 14 | if mode is not None: 15 | if padding != 0: 16 | raise ValueError('Cannot input both padding and mode.') 17 | if mode == 'same': 18 | padding = (weight.shape[2]//2, weight.shape[3]//2) 19 | if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0: 20 | ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None), 21 | slice(-1) if weight.shape[3] % 2 == 0 else slice(None)) 22 | elif mode == 'valid': 23 | padding = (0, 0) 24 | elif mode == 'full': 25 | padding = (weight.shape[2]-1, weight.shape[3]-1) 26 | else: 27 | raise ValueError('Unknown mode for padding.') 28 | 29 | out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) 30 | if ind is None: 31 | return out 32 | return out[:,:,ind[0],ind[1]] 33 | 34 | 35 | @tensor_operation 36 | def conv1x1(input: torch.Tensor, weight: torch.Tensor): 37 | """Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv.""" 38 | 39 | if weight is None: 40 | return input 41 | 42 | return torch.matmul(weight.view(weight.shape[0], weight.shape[1]), 43 | input.view(input.shape[0], input.shape[1], -1)).view(input.shape[0], weight.shape[0], input.shape[2], input.shape[3]) 44 | -------------------------------------------------------------------------------- /pytracking/libs/tensordict.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import torch 3 | 4 | 5 | class TensorDict(OrderedDict): 6 | """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" 7 | 8 | def concat(self, other): 9 | """Concatenates two dicts without copying internal data.""" 10 | return TensorDict(self, **other) 11 | 12 | def copy(self): 13 | return TensorDict(super(TensorDict, self).copy()) 14 | 15 | def __getattr__(self, name): 16 | if not hasattr(torch.Tensor, name): 17 | raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) 18 | 19 | def apply_attr(*args, **kwargs): 20 | return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) 21 | return apply_attr 22 | 23 | def attribute(self, attr: str, *args): 24 | return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) 25 | 26 | def apply(self, fn, *args, **kwargs): 27 | return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) 28 | 29 | @staticmethod 30 | def _iterable(a): 31 | return isinstance(a, (TensorDict, list)) 32 | 33 | -------------------------------------------------------------------------------- /pytracking/mmdetection/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # cython generated cpp 107 | mmdet/ops/nms/src/soft_nms_cpu.cpp 108 | mmdet/version.py 109 | data 110 | .vscode 111 | .idea 112 | -------------------------------------------------------------------------------- /pytracking/mmdetection/.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | language: python 3 | 4 | install: 5 | - pip install flake8 6 | 7 | python: 8 | - "3.5" 9 | - "3.6" 10 | 11 | script: 12 | - flake8 -------------------------------------------------------------------------------- /pytracking/mmdetection/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to mmdetection 2 | 3 | All kinds of contributions are welcome, including but not limited to the following. 4 | 5 | - Fixes (typo, bugs) 6 | - New features and components 7 | 8 | ## Workflow 9 | 10 | 1. fork and pull the latest mmdetection 11 | 2. checkout a new branch (do not use master branch for PRs) 12 | 3. commit your changes 13 | 4. create a PR 14 | 15 | Note 16 | - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. 17 | - If you are the author of some papers and would like to include your method to mmdetection, 18 | please contact Kai Chen (chenkaidev[at]gmail[dot]com). We will much appreciate your contribution. 19 | 20 | ## Code style 21 | 22 | ### Python 23 | We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. 24 | We use [flake8](http://flake8.pycqa.org/en/latest/) as the linter and [yapf](https://github.com/google/yapf) as the formatter. 25 | Please upgrade to the latest yapf (>=0.27.0) and refer to the [configuration](.style.yapf). 26 | 27 | >Before you create a PR, make sure that your code lints and is formatted by yapf. 28 | 29 | ### C++ and CUDA 30 | We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). -------------------------------------------------------------------------------- /pytracking/mmdetection/INSTALL.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | ### Requirements 4 | 5 | - Linux 6 | - Python 3.5+ ([Say goodbye to Python2](https://python3statement.org/)) 7 | - PyTorch 1.0+ or PyTorch-nightly 8 | - CUDA 9.0+ 9 | - NCCL 2+ 10 | - GCC 4.9+ 11 | - [mmcv](https://github.com/open-mmlab/mmcv) 12 | 13 | We have tested the following versions of OS and softwares: 14 | 15 | - OS: Ubuntu 16.04/18.04 and CentOS 7.2 16 | - CUDA: 9.0/9.2/10.0 17 | - NCCL: 2.1.15/2.2.13/2.3.7/2.4.2 18 | - GCC: 4.9/5.3/5.4/7.3 19 | 20 | ### Install mmdetection 21 | 22 | a. Create a conda virtual environment and activate it. Then install Cython. 23 | 24 | ```shell 25 | conda create -n open-mmlab python=3.7 -y 26 | source activate open-mmlab 27 | 28 | conda install cython 29 | ``` 30 | 31 | b. Install PyTorch stable or nightly and torchvision following the [official instructions](https://pytorch.org/). 32 | 33 | c. Clone the mmdetection repository. 34 | 35 | ```shell 36 | git clone https://github.com/open-mmlab/mmdetection.git 37 | cd mmdetection 38 | ``` 39 | 40 | d. Compile cuda extensions. 41 | 42 | ```shell 43 | ./compile.sh 44 | ``` 45 | 46 | e. Install mmdetection (other dependencies will be installed automatically). 47 | 48 | ```shell 49 | python setup.py develop 50 | # or "pip install -e ." 51 | ``` 52 | 53 | Note: 54 | 55 | 1. It is recommended that you run the step e each time you pull some updates from github. If there are some updates of the C/CUDA codes, you also need to run step d. 56 | The git commit id will be written to the version number with step e, e.g. 0.6.0+2e7045c. The version will also be saved in trained models. 57 | 58 | 2. Following the above instructions, mmdetection is installed on `dev` mode, any modifications to the code will take effect without installing it again. 59 | 60 | ### Prepare COCO dataset. 61 | 62 | It is recommended to symlink the dataset root to `$MMDETECTION/data`. 63 | 64 | ``` 65 | mmdetection 66 | ├── mmdet 67 | ├── tools 68 | ├── configs 69 | ├── data 70 | │ ├── coco 71 | │ │ ├── annotations 72 | │ │ ├── train2017 73 | │ │ ├── val2017 74 | │ │ ├── test2017 75 | │ ├── VOCdevkit 76 | │ │ ├── VOC2007 77 | │ │ ├── VOC2012 78 | 79 | ``` 80 | 81 | ### Scripts 82 | [Here](https://gist.github.com/hellock/bf23cd7348c727d69d48682cb6909047) is 83 | a script for setting up mmdetection with conda. 84 | 85 | ### Notice 86 | You can run `python(3) setup.py develop` or `pip install -e .` to install mmdetection if you want to make modifications to it frequently. 87 | 88 | If there are more than one mmdetection on your machine, and you want to use them alternatively. 89 | Please insert the following code to the main file 90 | ```python 91 | import os.path as osp 92 | import sys 93 | sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../')) 94 | ``` 95 | or run the following command in the terminal of corresponding folder. 96 | ```shell 97 | export PYTHONPATH=`pwd`:$PYTHONPATH 98 | ``` 99 | -------------------------------------------------------------------------------- /pytracking/mmdetection/compile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | echo "Building roi align op..." 6 | cd mmdet/ops/roi_align 7 | if [ -d "build" ]; then 8 | rm -r build 9 | fi 10 | $PYTHON setup.py build_ext --inplace 11 | 12 | echo "Building roi pool op..." 13 | cd ../roi_pool 14 | if [ -d "build" ]; then 15 | rm -r build 16 | fi 17 | $PYTHON setup.py build_ext --inplace 18 | 19 | echo "Building nms op..." 20 | cd ../nms 21 | if [ -d "build" ]; then 22 | rm -r build 23 | fi 24 | $PYTHON setup.py build_ext --inplace 25 | 26 | echo "Building dcn..." 27 | cd ../dcn 28 | if [ -d "build" ]; then 29 | rm -r build 30 | fi 31 | $PYTHON setup.py build_ext --inplace 32 | 33 | echo "Building sigmoid focal loss op..." 34 | cd ../sigmoid_focal_loss 35 | if [ -d "build" ]; then 36 | rm -r build 37 | fi 38 | $PYTHON setup.py build_ext --inplace 39 | 40 | echo "Building masked conv op..." 41 | cd ../masked_conv 42 | if [ -d "build" ]; then 43 | rm -r build 44 | fi 45 | $PYTHON setup.py build_ext --inplace 46 | -------------------------------------------------------------------------------- /pytracking/mmdetection/configs/gn/README.md: -------------------------------------------------------------------------------- 1 | # Group Normalization 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @inproceedings{wu2018group, 7 | title={Group Normalization}, 8 | author={Wu, Yuxin and He, Kaiming}, 9 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | model | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | 17 | |:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| 18 | | R-50-FPN (d) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.8 | 36.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_2x_20180113-86832cf2.pth) | 19 | | R-50-FPN (d) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.1 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_3x_20180113-8e82f48d.pth) | 20 | | R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 0.970 | 4.8 | 41.5 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_2x_20180113-9598649c.pth) | 21 | | R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | 0.970 | 4.8 | 41.6 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_3x_20180113-a14ffb96.pth) | 22 | | R-50-FPN (c) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.7 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_2x_20180113-ec93305c.pth) | 23 | | R-50-FPN (c) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.0 | 36.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_3x_20180113-9d230cab.pth) | 24 | 25 | **Notes:** 26 | - (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). 27 | - The `3x` schedule is epoch [28, 34, 36]. 28 | - **Memory, Train/Inf time is outdated.** -------------------------------------------------------------------------------- /pytracking/mmdetection/configs/scratch/README.md: -------------------------------------------------------------------------------- 1 | # Rethinking ImageNet Pre-training 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{he2018rethinking, 7 | title={Rethinking imagenet pre-training}, 8 | author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, 9 | journal={arXiv preprint arXiv:1811.08883}, 10 | year={2018} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Model | Backbone | Style | Lr schd | box AP | mask AP | Download | 17 | |:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:--------:| 18 | | Faster R-CNN | R-50-FPN | pytorch | 6x | 40.1 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_faster_rcnn_r50_fpn_gn_6x-20190515-ff554978.pth) | 19 | | Mask R-CNN | R-50-FPN | pytorch | 6x | 41.0 | 37.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/scratch/scratch_mask_rcnn_r50_fpn_gn_6x_20190515-96743f5e.pth) | 20 | 21 | Note: 22 | - The above models are trained with 16 GPUs. -------------------------------------------------------------------------------- /pytracking/mmdetection/demo/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/demo/coco_test_12510.jpg -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .env import init_dist, get_root_logger, set_random_seed 2 | from .train import train_detector 3 | from .inference import init_detector, inference_detector, show_result 4 | 5 | __all__ = [ 6 | 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector', 7 | 'init_detector', 'inference_detector', 'show_result' 8 | ] 9 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/apis/env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | import subprocess 5 | 6 | import numpy as np 7 | import torch 8 | import torch.distributed as dist 9 | import torch.multiprocessing as mp 10 | from mmcv.runner import get_dist_info 11 | 12 | 13 | def init_dist(launcher, backend='nccl', **kwargs): 14 | if mp.get_start_method(allow_none=True) is None: 15 | mp.set_start_method('spawn') 16 | if launcher == 'pytorch': 17 | _init_dist_pytorch(backend, **kwargs) 18 | elif launcher == 'mpi': 19 | _init_dist_mpi(backend, **kwargs) 20 | elif launcher == 'slurm': 21 | _init_dist_slurm(backend, **kwargs) 22 | else: 23 | raise ValueError('Invalid launcher type: {}'.format(launcher)) 24 | 25 | 26 | def _init_dist_pytorch(backend, **kwargs): 27 | # TODO: use local_rank instead of rank % num_gpus 28 | rank = int(os.environ['RANK']) 29 | num_gpus = torch.cuda.device_count() 30 | torch.cuda.set_device(rank % num_gpus) 31 | dist.init_process_group(backend=backend, **kwargs) 32 | 33 | 34 | def _init_dist_mpi(backend, **kwargs): 35 | raise NotImplementedError 36 | 37 | 38 | def _init_dist_slurm(backend, port=29500, **kwargs): 39 | proc_id = int(os.environ['SLURM_PROCID']) 40 | ntasks = int(os.environ['SLURM_NTASKS']) 41 | node_list = os.environ['SLURM_NODELIST'] 42 | num_gpus = torch.cuda.device_count() 43 | torch.cuda.set_device(proc_id % num_gpus) 44 | addr = subprocess.getoutput( 45 | 'scontrol show hostname {} | head -n1'.format(node_list)) 46 | os.environ['MASTER_PORT'] = str(port) 47 | os.environ['MASTER_ADDR'] = addr 48 | os.environ['WORLD_SIZE'] = str(ntasks) 49 | os.environ['RANK'] = str(proc_id) 50 | dist.init_process_group(backend=backend) 51 | 52 | 53 | def set_random_seed(seed): 54 | random.seed(seed) 55 | np.random.seed(seed) 56 | torch.manual_seed(seed) 57 | torch.cuda.manual_seed_all(seed) 58 | 59 | 60 | def get_root_logger(log_level=logging.INFO): 61 | logger = logging.getLogger() 62 | if not logger.hasHandlers(): 63 | logging.basicConfig( 64 | format='%(asctime)s - %(levelname)s - %(message)s', 65 | level=log_level) 66 | rank, _ = get_dist_info() 67 | if rank != 0: 68 | logger.setLevel('ERROR') 69 | return logger 70 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .mask import * # noqa: F401, F403 4 | from .loss import * # noqa: F401, F403 5 | from .evaluation import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import anchor_target, anchor_inside_flags 3 | from .guided_anchor_target import ga_loc_target, ga_shape_target 4 | 5 | __all__ = [ 6 | 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target', 7 | 'ga_shape_target' 8 | ] 9 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .geometry import bbox_overlaps 2 | from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult 3 | from .samplers import (BaseSampler, PseudoSampler, RandomSampler, 4 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 5 | CombinedSampler, SamplingResult) 6 | from .assign_sampling import build_assigner, build_sampler, assign_and_sample 7 | from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, 8 | bbox_mapping_back, bbox2roi, roi2bbox, bbox2result, 9 | distance2bbox) 10 | from .bbox_target import bbox_target 11 | 12 | __all__ = [ 13 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 14 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 15 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 16 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 17 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 18 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 19 | 'distance2bbox', 'bbox_target' 20 | ] 21 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs) 11 | else: 12 | raise TypeError('Invalid type {} for building a sampler'.format( 13 | type(cfg))) 14 | 15 | 16 | def build_sampler(cfg, **kwargs): 17 | if isinstance(cfg, samplers.BaseSampler): 18 | return cfg 19 | elif isinstance(cfg, dict): 20 | return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs) 21 | else: 22 | raise TypeError('Invalid type {} for building a sampler'.format( 23 | type(cfg))) 24 | 25 | 26 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 27 | bbox_assigner = build_assigner(cfg.assigner) 28 | bbox_sampler = build_sampler(cfg.sampler) 29 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 30 | gt_labels) 31 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 32 | gt_labels) 33 | return assign_result, sampling_result 34 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_assigner import BaseAssigner 2 | from .max_iou_assigner import MaxIoUAssigner 3 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 4 | from .assign_result import AssignResult 5 | 6 | __all__ = [ 7 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult' 8 | ] 9 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/assigners/assign_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AssignResult(object): 5 | 6 | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): 7 | self.num_gts = num_gts 8 | self.gt_inds = gt_inds 9 | self.max_overlaps = max_overlaps 10 | self.labels = labels 11 | 12 | def add_gt_(self, gt_labels): 13 | self_inds = torch.arange( 14 | 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) 15 | self.gt_inds = torch.cat([self_inds, self.gt_inds]) 16 | self.max_overlaps = torch.cat( 17 | [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) 18 | if self.labels is not None: 19 | self.labels = torch.cat([gt_labels, self.labels]) 20 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/geometry.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): 5 | """Calculate overlap between two set of bboxes. 6 | 7 | If ``is_aligned`` is ``False``, then calculate the ious between each bbox 8 | of bboxes1 and bboxes2, otherwise the ious between each aligned pair of 9 | bboxes1 and bboxes2. 10 | 11 | Args: 12 | bboxes1 (Tensor): shape (m, 4) 13 | bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n 14 | must be equal. 15 | mode (str): "iou" (intersection over union) or iof (intersection over 16 | foreground). 17 | 18 | Returns: 19 | ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) 20 | """ 21 | 22 | assert mode in ['iou', 'iof'] 23 | 24 | rows = bboxes1.size(0) 25 | cols = bboxes2.size(0) 26 | if is_aligned: 27 | assert rows == cols 28 | 29 | if rows * cols == 0: 30 | return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) 31 | 32 | if is_aligned: 33 | lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] 34 | rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] 35 | 36 | wh = (rb - lt + 1).clamp(min=0) # [rows, 2] 37 | overlap = wh[:, 0] * wh[:, 1] 38 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 39 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 40 | 41 | if mode == 'iou': 42 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 43 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 44 | ious = overlap / (area1 + area2 - overlap) 45 | else: 46 | ious = overlap / area1 47 | else: 48 | lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] 49 | rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] 50 | 51 | wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] 52 | overlap = wh[:, :, 0] * wh[:, :, 1] 53 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 54 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 55 | 56 | if mode == 'iou': 57 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 58 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 59 | ious = overlap / (area1[:, None] + area2 - overlap) 60 | else: 61 | ious = overlap / (area1[:, None]) 62 | 63 | return ious 64 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .pseudo_sampler import PseudoSampler 3 | from .random_sampler import RandomSampler 4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 6 | from .combined_sampler import CombinedSampler 7 | from .ohem_sampler import OHEMSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from ..assign_sampling import build_sampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .random_sampler import RandomSampler 5 | 6 | 7 | class InstanceBalancedPosSampler(RandomSampler): 8 | 9 | def _sample_pos(self, assign_result, num_expected, **kwargs): 10 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 11 | if pos_inds.numel() != 0: 12 | pos_inds = pos_inds.squeeze(1) 13 | if pos_inds.numel() <= num_expected: 14 | return pos_inds 15 | else: 16 | unique_gt_inds = assign_result.gt_inds[pos_inds].unique() 17 | num_gts = len(unique_gt_inds) 18 | num_per_gt = int(round(num_expected / float(num_gts)) + 1) 19 | sampled_inds = [] 20 | for i in unique_gt_inds: 21 | inds = torch.nonzero(assign_result.gt_inds == i.item()) 22 | if inds.numel() != 0: 23 | inds = inds.squeeze(1) 24 | else: 25 | continue 26 | if len(inds) > num_per_gt: 27 | inds = self.random_choice(inds, num_per_gt) 28 | sampled_inds.append(inds) 29 | sampled_inds = torch.cat(sampled_inds) 30 | if len(sampled_inds) < num_expected: 31 | num_extra = num_expected - len(sampled_inds) 32 | extra_inds = np.array( 33 | list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) 34 | if len(extra_inds) > num_extra: 35 | extra_inds = self.random_choice(extra_inds, num_extra) 36 | extra_inds = torch.from_numpy(extra_inds).to( 37 | assign_result.gt_inds.device).long() 38 | sampled_inds = torch.cat([sampled_inds, extra_inds]) 39 | elif len(sampled_inds) > num_expected: 40 | sampled_inds = self.random_choice(sampled_inds, num_expected) 41 | return sampled_inds 42 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/random_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .base_sampler import BaseSampler 5 | 6 | 7 | class RandomSampler(BaseSampler): 8 | 9 | def __init__(self, 10 | num, 11 | pos_fraction, 12 | neg_pos_ub=-1, 13 | add_gt_as_proposals=True, 14 | **kwargs): 15 | super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, 16 | add_gt_as_proposals) 17 | 18 | @staticmethod 19 | def random_choice(gallery, num): 20 | """Random select some elements from the gallery. 21 | 22 | It seems that Pytorch's implementation is slower than numpy so we use 23 | numpy to randperm the indices. 24 | """ 25 | assert len(gallery) >= num 26 | if isinstance(gallery, list): 27 | gallery = np.array(gallery) 28 | cands = np.arange(len(gallery)) 29 | np.random.shuffle(cands) 30 | rand_inds = cands[:num] 31 | if not isinstance(gallery, np.ndarray): 32 | rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) 33 | return gallery[rand_inds] 34 | 35 | def _sample_pos(self, assign_result, num_expected, **kwargs): 36 | """Randomly sample some positive samples.""" 37 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 38 | if pos_inds.numel() != 0: 39 | pos_inds = pos_inds.squeeze(1) 40 | if pos_inds.numel() <= num_expected: 41 | return pos_inds 42 | else: 43 | return self.random_choice(pos_inds, num_expected) 44 | 45 | def _sample_neg(self, assign_result, num_expected, **kwargs): 46 | """Randomly sample some negative samples.""" 47 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 48 | if neg_inds.numel() != 0: 49 | neg_inds = neg_inds.squeeze(1) 50 | if len(neg_inds) <= num_expected: 51 | return neg_inds 52 | else: 53 | return self.random_choice(neg_inds, num_expected) 54 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/bbox/samplers/sampling_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SamplingResult(object): 5 | 6 | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, 7 | gt_flags): 8 | self.pos_inds = pos_inds 9 | self.neg_inds = neg_inds 10 | self.pos_bboxes = bboxes[pos_inds] 11 | self.neg_bboxes = bboxes[neg_inds] 12 | self.pos_is_gt = gt_flags[pos_inds] 13 | 14 | self.num_gts = gt_bboxes.shape[0] 15 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 16 | self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] 17 | if assign_result.labels is not None: 18 | self.pos_gt_labels = assign_result.labels[pos_inds] 19 | else: 20 | self.pos_gt_labels = None 21 | 22 | @property 23 | def bboxes(self): 24 | return torch.cat([self.pos_bboxes, self.neg_bboxes]) 25 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (voc_classes, imagenet_det_classes, 2 | imagenet_vid_classes, coco_classes, dataset_aliases, 3 | get_classes) 4 | from .coco_utils import coco_eval, fast_eval_recall, results2json 5 | from .eval_hooks import (DistEvalHook, DistEvalmAPHook, CocoDistEvalRecallHook, 6 | CocoDistEvalmAPHook) 7 | from .mean_ap import average_precision, eval_map, print_map_summary 8 | from .recall import (eval_recalls, print_recall_summary, plot_num_recall, 9 | plot_iou_recall) 10 | 11 | __all__ = [ 12 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 13 | 'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval', 14 | 'fast_eval_recall', 'results2json', 'DistEvalHook', 'DistEvalmAPHook', 15 | 'CocoDistEvalRecallHook', 'CocoDistEvalmAPHook', 'average_precision', 16 | 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 17 | 'plot_num_recall', 'plot_iou_recall' 18 | ] 19 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/evaluation/bbox_overlaps.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou'): 5 | """Calculate the ious between each bbox of bboxes1 and bboxes2. 6 | 7 | Args: 8 | bboxes1(ndarray): shape (n, 4) 9 | bboxes2(ndarray): shape (k, 4) 10 | mode(str): iou (intersection over union) or iof (intersection 11 | over foreground) 12 | 13 | Returns: 14 | ious(ndarray): shape (n, k) 15 | """ 16 | 17 | assert mode in ['iou', 'iof'] 18 | 19 | bboxes1 = bboxes1.astype(np.float32) 20 | bboxes2 = bboxes2.astype(np.float32) 21 | rows = bboxes1.shape[0] 22 | cols = bboxes2.shape[0] 23 | ious = np.zeros((rows, cols), dtype=np.float32) 24 | if rows * cols == 0: 25 | return ious 26 | exchange = False 27 | if bboxes1.shape[0] > bboxes2.shape[0]: 28 | bboxes1, bboxes2 = bboxes2, bboxes1 29 | ious = np.zeros((cols, rows), dtype=np.float32) 30 | exchange = True 31 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 32 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 33 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 34 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 35 | for i in range(bboxes1.shape[0]): 36 | x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) 37 | y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) 38 | x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) 39 | y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) 40 | overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( 41 | y_end - y_start + 1, 0) 42 | if mode == 'iou': 43 | union = area1[i] + area2 - overlap 44 | else: 45 | union = area1[i] if not exchange else area2 46 | ious[i, :] = overlap / union 47 | if exchange: 48 | ious = ious.T 49 | return ious 50 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .losses import (weighted_nll_loss, weighted_cross_entropy, 2 | weighted_binary_cross_entropy, sigmoid_focal_loss, 3 | py_sigmoid_focal_loss, weighted_sigmoid_focal_loss, 4 | mask_cross_entropy, smooth_l1_loss, weighted_smoothl1, 5 | bounded_iou_loss, weighted_iou_loss, iou_loss, accuracy) 6 | 7 | __all__ = [ 8 | 'weighted_nll_loss', 'weighted_cross_entropy', 9 | 'weighted_binary_cross_entropy', 'sigmoid_focal_loss', 10 | 'py_sigmoid_focal_loss', 'weighted_sigmoid_focal_loss', 11 | 'mask_cross_entropy', 'smooth_l1_loss', 'weighted_smoothl1', 12 | 'bounded_iou_loss', 'weighted_iou_loss', 'iou_loss', 'accuracy' 13 | ] 14 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import split_combined_polys 2 | from .mask_target import mask_target 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/mask/mask_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import mmcv 4 | 5 | 6 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, 7 | cfg): 8 | cfg_list = [cfg for _ in range(len(pos_proposals_list))] 9 | mask_targets = map(mask_target_single, pos_proposals_list, 10 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list) 11 | mask_targets = torch.cat(list(mask_targets)) 12 | return mask_targets 13 | 14 | 15 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): 16 | mask_size = cfg.mask_size 17 | num_pos = pos_proposals.size(0) 18 | mask_targets = [] 19 | if num_pos > 0: 20 | proposals_np = pos_proposals.cpu().numpy() 21 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() 22 | for i in range(num_pos): 23 | gt_mask = gt_masks[pos_assigned_gt_inds[i]] 24 | bbox = proposals_np[i, :].astype(np.int32) 25 | x1, y1, x2, y2 = bbox 26 | w = np.maximum(x2 - x1 + 1, 1) 27 | h = np.maximum(y2 - y1 + 1, 1) 28 | # mask is uint8 both before and after resizing 29 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], 30 | (mask_size, mask_size)) 31 | mask_targets.append(target) 32 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( 33 | pos_proposals.device) 34 | else: 35 | mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) 36 | return mask_targets 37 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, 3 | merge_aug_scores, merge_aug_masks) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/post_processing/bbox_nms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmdet.ops.nms import nms_wrapper 4 | 5 | 6 | def multiclass_nms(multi_bboxes, 7 | multi_scores, 8 | score_thr, 9 | nms_cfg, 10 | max_num=-1, 11 | score_factors=None): 12 | """NMS for multi-class bboxes. 13 | 14 | Args: 15 | multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) 16 | multi_scores (Tensor): shape (n, #class) 17 | score_thr (float): bbox threshold, bboxes with scores lower than it 18 | will not be considered. 19 | nms_thr (float): NMS IoU threshold 20 | max_num (int): if there are more than max_num bboxes after NMS, 21 | only top max_num will be kept. 22 | score_factors (Tensor): The factors multiplied to scores before 23 | applying NMS 24 | 25 | Returns: 26 | tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels 27 | are 0-based. 28 | """ 29 | num_classes = multi_scores.shape[1] 30 | bboxes, labels = [], [] 31 | nms_cfg_ = nms_cfg.copy() 32 | nms_type = nms_cfg_.pop('type', 'nms') 33 | nms_op = getattr(nms_wrapper, nms_type) 34 | for i in range(1, num_classes): 35 | cls_inds = multi_scores[:, i] > score_thr 36 | if not cls_inds.any(): 37 | continue 38 | # get bboxes and scores of this class 39 | if multi_bboxes.shape[1] == 4: 40 | _bboxes = multi_bboxes[cls_inds, :] 41 | else: 42 | _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] 43 | _scores = multi_scores[cls_inds, i] 44 | if score_factors is not None: 45 | _scores *= score_factors[cls_inds] 46 | cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) 47 | cls_dets, _ = nms_op(cls_dets, **nms_cfg_) 48 | cls_labels = multi_bboxes.new_full( 49 | (cls_dets.shape[0], ), i - 1, dtype=torch.long) 50 | bboxes.append(cls_dets) 51 | labels.append(cls_labels) 52 | if bboxes: 53 | bboxes = torch.cat(bboxes) 54 | labels = torch.cat(labels) 55 | if bboxes.shape[0] > max_num: 56 | _, inds = bboxes[:, -1].sort(descending=True) 57 | inds = inds[:max_num] 58 | bboxes = bboxes[inds] 59 | labels = labels[inds] 60 | else: 61 | bboxes = multi_bboxes.new_zeros((0, 5)) 62 | labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) 63 | 64 | return bboxes, labels 65 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import allreduce_grads, DistOptimizerHook 2 | from .misc import tensor2imgs, unmap, multi_apply 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/utils/dist_utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch.distributed as dist 4 | from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors, 5 | _take_tensors) 6 | from mmcv.runner import OptimizerHook 7 | 8 | 9 | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): 10 | if bucket_size_mb > 0: 11 | bucket_size_bytes = bucket_size_mb * 1024 * 1024 12 | buckets = _take_tensors(tensors, bucket_size_bytes) 13 | else: 14 | buckets = OrderedDict() 15 | for tensor in tensors: 16 | tp = tensor.type() 17 | if tp not in buckets: 18 | buckets[tp] = [] 19 | buckets[tp].append(tensor) 20 | buckets = buckets.values() 21 | 22 | for bucket in buckets: 23 | flat_tensors = _flatten_dense_tensors(bucket) 24 | dist.all_reduce(flat_tensors) 25 | flat_tensors.div_(world_size) 26 | for tensor, synced in zip( 27 | bucket, _unflatten_dense_tensors(flat_tensors, bucket)): 28 | tensor.copy_(synced) 29 | 30 | 31 | def allreduce_grads(model, coalesce=True, bucket_size_mb=-1): 32 | grads = [ 33 | param.grad.data for param in model.parameters() 34 | if param.requires_grad and param.grad is not None 35 | ] 36 | world_size = dist.get_world_size() 37 | if coalesce: 38 | _allreduce_coalesced(grads, world_size, bucket_size_mb) 39 | else: 40 | for tensor in grads: 41 | dist.all_reduce(tensor.div_(world_size)) 42 | 43 | 44 | class DistOptimizerHook(OptimizerHook): 45 | 46 | def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): 47 | self.grad_clip = grad_clip 48 | self.coalesce = coalesce 49 | self.bucket_size_mb = bucket_size_mb 50 | 51 | def after_train_iter(self, runner): 52 | runner.optimizer.zero_grad() 53 | runner.outputs['loss'].backward() 54 | allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb) 55 | if self.grad_clip is not None: 56 | self.clip_grads(runner.model.parameters()) 57 | runner.optimizer.step() 58 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .custom import CustomDataset 2 | from .xml_style import XMLDataset 3 | from .coco import CocoDataset 4 | from .voc import VOCDataset 5 | from .loader import GroupSampler, DistributedGroupSampler, build_dataloader 6 | from .utils import to_tensor, random_scale, show_ann, get_dataset 7 | from .concat_dataset import ConcatDataset 8 | from .repeat_dataset import RepeatDataset 9 | from .extra_aug import ExtraAugmentation 10 | 11 | __all__ = [ 12 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler', 13 | 'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale', 14 | 'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset', 15 | 'ExtraAugmentation' 16 | ] 17 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/concat_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset 3 | 4 | 5 | class ConcatDataset(_ConcatDataset): 6 | """A wrapper of concatenated dataset. 7 | 8 | Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but 9 | concat the group flag for image aspect ratio. 10 | 11 | Args: 12 | datasets (list[:obj:`Dataset`]): A list of datasets. 13 | """ 14 | 15 | def __init__(self, datasets): 16 | super(ConcatDataset, self).__init__(datasets) 17 | self.CLASSES = datasets[0].CLASSES 18 | if hasattr(datasets[0], 'flag'): 19 | flags = [] 20 | for i in range(0, len(datasets)): 21 | flags.append(datasets[i].flag) 22 | self.flag = np.concatenate(flags) 23 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/loader/__init__.py: -------------------------------------------------------------------------------- 1 | from .build_loader import build_dataloader 2 | from .sampler import GroupSampler, DistributedGroupSampler 3 | 4 | __all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/loader/build_loader.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | from mmcv.runner import get_dist_info 4 | from mmcv.parallel import collate 5 | from torch.utils.data import DataLoader 6 | 7 | from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler 8 | 9 | # https://github.com/pytorch/pytorch/issues/973 10 | import resource 11 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 12 | resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) 13 | 14 | 15 | def build_dataloader(dataset, 16 | imgs_per_gpu, 17 | workers_per_gpu, 18 | num_gpus=1, 19 | dist=True, 20 | **kwargs): 21 | shuffle = kwargs.get('shuffle', True) 22 | if dist: 23 | rank, world_size = get_dist_info() 24 | if shuffle: 25 | sampler = DistributedGroupSampler(dataset, imgs_per_gpu, 26 | world_size, rank) 27 | else: 28 | sampler = DistributedSampler( 29 | dataset, world_size, rank, shuffle=False) 30 | batch_size = imgs_per_gpu 31 | num_workers = workers_per_gpu 32 | else: 33 | sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None 34 | batch_size = num_gpus * imgs_per_gpu 35 | num_workers = num_gpus * workers_per_gpu 36 | 37 | data_loader = DataLoader( 38 | dataset, 39 | batch_size=batch_size, 40 | sampler=sampler, 41 | num_workers=num_workers, 42 | collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), 43 | pin_memory=False, 44 | **kwargs) 45 | 46 | return data_loader 47 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/repeat_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class RepeatDataset(object): 5 | 6 | def __init__(self, dataset, times): 7 | self.dataset = dataset 8 | self.times = times 9 | self.CLASSES = dataset.CLASSES 10 | if hasattr(self.dataset, 'flag'): 11 | self.flag = np.tile(self.dataset.flag, times) 12 | 13 | self._ori_len = len(self.dataset) 14 | 15 | def __getitem__(self, idx): 16 | return self.dataset[idx % self._ori_len] 17 | 18 | def __len__(self): 19 | return self.times * self._ori_len 20 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/datasets/voc.py: -------------------------------------------------------------------------------- 1 | from .xml_style import XMLDataset 2 | 3 | 4 | class VOCDataset(XMLDataset): 5 | 6 | CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 7 | 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 8 | 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 9 | 'tvmonitor') 10 | 11 | def __init__(self, **kwargs): 12 | super(VOCDataset, self).__init__(**kwargs) 13 | if 'VOC2007' in self.img_prefix: 14 | self.year = 2007 15 | elif 'VOC2012' in self.img_prefix: 16 | self.year = 2012 17 | else: 18 | raise ValueError('Cannot infer dataset year from img_prefix') 19 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .necks import * # noqa: F401,F403 3 | from .roi_extractors import * # noqa: F401,F403 4 | from .anchor_heads import * # noqa: F401,F403 5 | from .shared_heads import * # noqa: F401,F403 6 | from .bbox_heads import * # noqa: F401,F403 7 | from .mask_heads import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .detectors import * # noqa: F401,F403 10 | from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, 11 | LOSSES, DETECTORS) 12 | from .builder import (build_backbone, build_neck, build_roi_extractor, 13 | build_shared_head, build_head, build_loss, 14 | build_detector) 15 | 16 | __all__ = [ 17 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 18 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 19 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 20 | ] 21 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .guided_anchor_head import GuidedAnchorHead, FeatureAdaption 3 | from .fcos_head import FCOSHead 4 | from .rpn_head import RPNHead 5 | from .ga_rpn_head import GARPNHead 6 | from .retina_head import RetinaHead 7 | from .ga_retina_head import GARetinaHead 8 | from .ssd_head import SSDHead 9 | 10 | __all__ = [ 11 | 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 12 | 'GARPNHead', 'RetinaHead', 'GARetinaHead', 'SSDHead', 'FCOSHead' 13 | ] 14 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import ResNet, make_res_layer 2 | from .resnext import ResNeXt 3 | from .ssd_vgg import SSDVGG 4 | from .hrnet import HRNet 5 | 6 | __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet'] 7 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | 4 | __all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | from torch import nn 3 | 4 | from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, 5 | LOSSES, DETECTORS) 6 | 7 | 8 | def _build_module(cfg, registry, default_args): 9 | assert isinstance(cfg, dict) and 'type' in cfg 10 | assert isinstance(default_args, dict) or default_args is None 11 | args = cfg.copy() 12 | obj_type = args.pop('type') 13 | if mmcv.is_str(obj_type): 14 | if obj_type not in registry.module_dict: 15 | raise KeyError('{} is not in the {} registry'.format( 16 | obj_type, registry.name)) 17 | obj_type = registry.module_dict[obj_type] 18 | elif not isinstance(obj_type, type): 19 | raise TypeError('type must be a str or valid type, but got {}'.format( 20 | type(obj_type))) 21 | if default_args is not None: 22 | for name, value in default_args.items(): 23 | args.setdefault(name, value) 24 | return obj_type(**args) 25 | 26 | 27 | def build(cfg, registry, default_args=None): 28 | if isinstance(cfg, list): 29 | modules = [_build_module(cfg_, registry, default_args) for cfg_ in cfg] 30 | return nn.Sequential(*modules) 31 | else: 32 | return _build_module(cfg, registry, default_args) 33 | 34 | 35 | def build_backbone(cfg): 36 | return build(cfg, BACKBONES) 37 | 38 | 39 | def build_neck(cfg): 40 | return build(cfg, NECKS) 41 | 42 | 43 | def build_roi_extractor(cfg): 44 | return build(cfg, ROI_EXTRACTORS) 45 | 46 | 47 | def build_shared_head(cfg): 48 | return build(cfg, SHARED_HEADS) 49 | 50 | 51 | def build_head(cfg): 52 | return build(cfg, HEADS) 53 | 54 | 55 | def build_loss(cfg): 56 | return build(cfg, LOSSES) 57 | 58 | 59 | def build_detector(cfg, train_cfg=None, test_cfg=None): 60 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 61 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseDetector 2 | from .single_stage import SingleStageDetector 3 | from .two_stage import TwoStageDetector 4 | from .rpn import RPN 5 | from .fast_rcnn import FastRCNN 6 | from .faster_rcnn import FasterRCNN 7 | from .mask_rcnn import MaskRCNN 8 | from .cascade_rcnn import CascadeRCNN 9 | from .htc import HybridTaskCascade 10 | from .retinanet import RetinaNet 11 | from .fcos import FCOS 12 | 13 | __all__ = [ 14 | 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', 15 | 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 16 | 'RetinaNet', 'FCOS' 17 | ] 18 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/fast_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class FastRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | bbox_roi_extractor, 11 | bbox_head, 12 | train_cfg, 13 | test_cfg, 14 | neck=None, 15 | shared_head=None, 16 | mask_roi_extractor=None, 17 | mask_head=None, 18 | pretrained=None): 19 | super(FastRCNN, self).__init__( 20 | backbone=backbone, 21 | neck=neck, 22 | shared_head=shared_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | mask_roi_extractor=mask_roi_extractor, 28 | mask_head=mask_head, 29 | pretrained=pretrained) 30 | 31 | def forward_test(self, imgs, img_metas, proposals, **kwargs): 32 | for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: 33 | if not isinstance(var, list): 34 | raise TypeError('{} must be a list, but got {}'.format( 35 | name, type(var))) 36 | 37 | num_augs = len(imgs) 38 | if num_augs != len(img_metas): 39 | raise ValueError( 40 | 'num of augmentations ({}) != num of image meta ({})'.format( 41 | len(imgs), len(img_metas))) 42 | # TODO: remove the restriction of imgs_per_gpu == 1 when prepared 43 | imgs_per_gpu = imgs[0].size(0) 44 | assert imgs_per_gpu == 1 45 | 46 | if num_augs == 1: 47 | return self.simple_test(imgs[0], img_metas[0], proposals[0], 48 | **kwargs) 49 | else: 50 | return self.aug_test(imgs, img_metas, proposals, **kwargs) 51 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class FasterRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | shared_head=None, 17 | pretrained=None): 18 | super(FasterRCNN, self).__init__( 19 | backbone=backbone, 20 | neck=neck, 21 | shared_head=shared_head, 22 | rpn_head=rpn_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from .single_stage import SingleStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class FCOS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class MaskRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | mask_roi_extractor, 14 | mask_head, 15 | train_cfg, 16 | test_cfg, 17 | neck=None, 18 | shared_head=None, 19 | pretrained=None): 20 | super(MaskRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | shared_head=shared_head, 24 | rpn_head=rpn_head, 25 | bbox_roi_extractor=bbox_roi_extractor, 26 | bbox_head=bbox_head, 27 | mask_roi_extractor=mask_roi_extractor, 28 | mask_head=mask_head, 29 | train_cfg=train_cfg, 30 | test_cfg=test_cfg, 31 | pretrained=pretrained) 32 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from .single_stage import SingleStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class RetinaNet(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/detectors/single_stage.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .base import BaseDetector 4 | from .. import builder 5 | from ..registry import DETECTORS 6 | from mmdet.core import bbox2result 7 | 8 | 9 | @DETECTORS.register_module 10 | class SingleStageDetector(BaseDetector): 11 | 12 | def __init__(self, 13 | backbone, 14 | neck=None, 15 | bbox_head=None, 16 | train_cfg=None, 17 | test_cfg=None, 18 | pretrained=None): 19 | super(SingleStageDetector, self).__init__() 20 | self.backbone = builder.build_backbone(backbone) 21 | if neck is not None: 22 | self.neck = builder.build_neck(neck) 23 | self.bbox_head = builder.build_head(bbox_head) 24 | self.train_cfg = train_cfg 25 | self.test_cfg = test_cfg 26 | self.init_weights(pretrained=pretrained) 27 | 28 | def init_weights(self, pretrained=None): 29 | super(SingleStageDetector, self).init_weights(pretrained) 30 | self.backbone.init_weights(pretrained=pretrained) 31 | if self.with_neck: 32 | if isinstance(self.neck, nn.Sequential): 33 | for m in self.neck: 34 | m.init_weights() 35 | else: 36 | self.neck.init_weights() 37 | self.bbox_head.init_weights() 38 | 39 | def extract_feat(self, img): 40 | x = self.backbone(img) 41 | if self.with_neck: 42 | x = self.neck(x) 43 | return x 44 | 45 | def forward_train(self, 46 | img, 47 | img_metas, 48 | gt_bboxes, 49 | gt_labels, 50 | gt_bboxes_ignore=None): 51 | x = self.extract_feat(img) 52 | outs = self.bbox_head(x) 53 | loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) 54 | losses = self.bbox_head.loss( 55 | *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) 56 | return losses 57 | 58 | def simple_test(self, img, img_meta, rescale=False): 59 | x = self.extract_feat(img) 60 | outs = self.bbox_head(x) 61 | bbox_inputs = outs + (img_meta, self.test_cfg, rescale) 62 | bbox_list = self.bbox_head.get_bboxes(*bbox_inputs) 63 | bbox_results = [ 64 | bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) 65 | for det_bboxes, det_labels in bbox_list 66 | ] 67 | return bbox_results[0] 68 | 69 | def aug_test(self, imgs, img_metas, rescale=False): 70 | raise NotImplementedError 71 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .cross_entropy_loss import CrossEntropyLoss 2 | from .focal_loss import FocalLoss 3 | from .smooth_l1_loss import SmoothL1Loss 4 | from .iou_loss import IoULoss 5 | 6 | __all__ = ['CrossEntropyLoss', 'FocalLoss', 'SmoothL1Loss', 'IoULoss'] 7 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/losses/cross_entropy_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from mmdet.core import (weighted_cross_entropy, weighted_binary_cross_entropy, 3 | mask_cross_entropy) 4 | 5 | from ..registry import LOSSES 6 | 7 | 8 | @LOSSES.register_module 9 | class CrossEntropyLoss(nn.Module): 10 | 11 | def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0): 12 | super(CrossEntropyLoss, self).__init__() 13 | assert (use_sigmoid is False) or (use_mask is False) 14 | self.use_sigmoid = use_sigmoid 15 | self.use_mask = use_mask 16 | self.loss_weight = loss_weight 17 | 18 | if self.use_sigmoid: 19 | self.cls_criterion = weighted_binary_cross_entropy 20 | elif self.use_mask: 21 | self.cls_criterion = mask_cross_entropy 22 | else: 23 | self.cls_criterion = weighted_cross_entropy 24 | 25 | def forward(self, cls_score, label, label_weight, *args, **kwargs): 26 | loss_cls = self.loss_weight * self.cls_criterion( 27 | cls_score, label, label_weight, *args, **kwargs) 28 | return loss_cls 29 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from mmdet.core import weighted_sigmoid_focal_loss 3 | 4 | from ..registry import LOSSES 5 | 6 | 7 | @LOSSES.register_module 8 | class FocalLoss(nn.Module): 9 | 10 | def __init__(self, 11 | use_sigmoid=False, 12 | loss_weight=1.0, 13 | gamma=2.0, 14 | alpha=0.25): 15 | super(FocalLoss, self).__init__() 16 | assert use_sigmoid is True, 'Only sigmoid focaloss supported now.' 17 | self.use_sigmoid = use_sigmoid 18 | self.loss_weight = loss_weight 19 | self.gamma = gamma 20 | self.alpha = alpha 21 | self.cls_criterion = weighted_sigmoid_focal_loss 22 | 23 | def forward(self, cls_score, label, label_weight, *args, **kwargs): 24 | if self.use_sigmoid: 25 | loss_cls = self.loss_weight * self.cls_criterion( 26 | cls_score, 27 | label, 28 | label_weight, 29 | gamma=self.gamma, 30 | alpha=self.alpha, 31 | *args, 32 | **kwargs) 33 | else: 34 | raise NotImplementedError 35 | return loss_cls 36 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/losses/iou_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from mmdet.core import weighted_iou_loss 3 | 4 | from ..registry import LOSSES 5 | 6 | 7 | @LOSSES.register_module 8 | class IoULoss(nn.Module): 9 | 10 | def __init__(self, style='naive', beta=0.2, eps=1e-3, loss_weight=1.0): 11 | super(IoULoss, self).__init__() 12 | self.style = style 13 | self.beta = beta 14 | self.eps = eps 15 | self.loss_weight = loss_weight 16 | 17 | def forward(self, pred, target, weight, *args, **kwargs): 18 | loss = self.loss_weight * weighted_iou_loss( 19 | pred, 20 | target, 21 | weight, 22 | beta=self.beta, 23 | eps=self.eps, 24 | *args, 25 | **kwargs) 26 | return loss 27 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/losses/smooth_l1_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from mmdet.core import weighted_smoothl1 3 | 4 | from ..registry import LOSSES 5 | 6 | 7 | @LOSSES.register_module 8 | class SmoothL1Loss(nn.Module): 9 | 10 | def __init__(self, beta=1.0, loss_weight=1.0): 11 | super(SmoothL1Loss, self).__init__() 12 | self.beta = beta 13 | self.loss_weight = loss_weight 14 | 15 | def forward(self, pred, target, weight, *args, **kwargs): 16 | loss_bbox = self.loss_weight * weighted_smoothl1( 17 | pred, target, weight, beta=self.beta, *args, **kwargs) 18 | return loss_bbox 19 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | from .htc_mask_head import HTCMaskHead 3 | from .fused_semantic_head import FusedSemanticHead 4 | 5 | __all__ = ['FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead'] 6 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/mask_heads/htc_mask_head.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | from ..registry import HEADS 3 | from ..utils import ConvModule 4 | 5 | 6 | @HEADS.register_module 7 | class HTCMaskHead(FCNMaskHead): 8 | 9 | def __init__(self, *args, **kwargs): 10 | super(HTCMaskHead, self).__init__(*args, **kwargs) 11 | self.conv_res = ConvModule( 12 | self.conv_out_channels, 13 | self.conv_out_channels, 14 | 1, 15 | conv_cfg=self.conv_cfg, 16 | norm_cfg=self.norm_cfg) 17 | 18 | def init_weights(self): 19 | super(HTCMaskHead, self).init_weights() 20 | self.conv_res.init_weights() 21 | 22 | def forward(self, x, res_feat=None, return_logits=True, return_feat=True): 23 | if res_feat is not None: 24 | res_feat = self.conv_res(res_feat) 25 | x = x + res_feat 26 | for conv in self.convs: 27 | x = conv(x) 28 | res_feat = x 29 | outs = [] 30 | if return_logits: 31 | x = self.upsample(x) 32 | if self.upsample_method == 'deconv': 33 | x = self.relu(x) 34 | mask_pred = self.conv_logits(x) 35 | outs.append(mask_pred) 36 | if return_feat: 37 | outs.append(res_feat) 38 | return outs if len(outs) > 1 else outs[0] 39 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | from .hrfpn import HRFPN 3 | 4 | __all__ = ['FPN', 'HRFPN'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Registry(object): 5 | 6 | def __init__(self, name): 7 | self._name = name 8 | self._module_dict = dict() 9 | 10 | @property 11 | def name(self): 12 | return self._name 13 | 14 | @property 15 | def module_dict(self): 16 | return self._module_dict 17 | 18 | def _register_module(self, module_class): 19 | """Register a module. 20 | 21 | Args: 22 | module (:obj:`nn.Module`): Module to be registered. 23 | """ 24 | if not issubclass(module_class, nn.Module): 25 | raise TypeError( 26 | 'module must be a child of nn.Module, but got {}'.format( 27 | module_class)) 28 | module_name = module_class.__name__ 29 | if module_name in self._module_dict: 30 | raise KeyError('{} is already registered in {}'.format( 31 | module_name, self.name)) 32 | self._module_dict[module_name] = module_class 33 | 34 | def register_module(self, cls): 35 | self._register_module(cls) 36 | return cls 37 | 38 | 39 | BACKBONES = Registry('backbone') 40 | NECKS = Registry('neck') 41 | ROI_EXTRACTORS = Registry('roi_extractor') 42 | SHARED_HEADS = Registry('shared_head') 43 | HEADS = Registry('head') 44 | LOSSES = Registry('loss') 45 | DETECTORS = Registry('detector') 46 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .single_level import SingleRoIExtractor 2 | 3 | __all__ = ['SingleRoIExtractor'] 4 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/shared_heads/res_layer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import torch.nn as nn 4 | from mmcv.cnn import constant_init, kaiming_init 5 | from mmcv.runner import load_checkpoint 6 | 7 | from ..backbones import ResNet, make_res_layer 8 | from ..registry import SHARED_HEADS 9 | 10 | 11 | @SHARED_HEADS.register_module 12 | class ResLayer(nn.Module): 13 | 14 | def __init__(self, 15 | depth, 16 | stage=3, 17 | stride=2, 18 | dilation=1, 19 | style='pytorch', 20 | norm_cfg=dict(type='BN', requires_grad=True), 21 | norm_eval=True, 22 | with_cp=False, 23 | dcn=None): 24 | super(ResLayer, self).__init__() 25 | self.norm_eval = norm_eval 26 | self.norm_cfg = norm_cfg 27 | self.stage = stage 28 | block, stage_blocks = ResNet.arch_settings[depth] 29 | stage_block = stage_blocks[stage] 30 | planes = 64 * 2**stage 31 | inplanes = 64 * 2**(stage - 1) * block.expansion 32 | 33 | res_layer = make_res_layer( 34 | block, 35 | inplanes, 36 | planes, 37 | stage_block, 38 | stride=stride, 39 | dilation=dilation, 40 | style=style, 41 | with_cp=with_cp, 42 | norm_cfg=self.norm_cfg, 43 | dcn=dcn) 44 | self.add_module('layer{}'.format(stage + 1), res_layer) 45 | 46 | def init_weights(self, pretrained=None): 47 | if isinstance(pretrained, str): 48 | logger = logging.getLogger() 49 | load_checkpoint(self, pretrained, strict=False, logger=logger) 50 | elif pretrained is None: 51 | for m in self.modules(): 52 | if isinstance(m, nn.Conv2d): 53 | kaiming_init(m) 54 | elif isinstance(m, nn.BatchNorm2d): 55 | constant_init(m, 1) 56 | else: 57 | raise TypeError('pretrained must be a str or None') 58 | 59 | def forward(self, x): 60 | res_layer = getattr(self, 'layer{}'.format(self.stage + 1)) 61 | out = res_layer(x) 62 | return out 63 | 64 | def train(self, mode=True): 65 | super(ResLayer, self).train(mode) 66 | if self.norm_eval: 67 | for m in self.modules(): 68 | if isinstance(m, nn.BatchNorm2d): 69 | m.eval() 70 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_ws import conv_ws_2d, ConvWS2d 2 | from .conv_module import build_conv_layer, ConvModule 3 | from .norm import build_norm_layer 4 | from .scale import Scale 5 | from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init, 6 | bias_init_with_prob) 7 | 8 | __all__ = [ 9 | 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', 10 | 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init', 11 | 'kaiming_init', 'bias_init_with_prob', 'Scale' 12 | ] 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/utils/conv_ws.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | def conv_ws_2d(input, 6 | weight, 7 | bias=None, 8 | stride=1, 9 | padding=0, 10 | dilation=1, 11 | groups=1, 12 | eps=1e-5): 13 | c_in = weight.size(0) 14 | weight_flat = weight.view(c_in, -1) 15 | mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) 16 | std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) 17 | weight = (weight - mean) / (std + eps) 18 | return F.conv2d(input, weight, bias, stride, padding, dilation, groups) 19 | 20 | 21 | class ConvWS2d(nn.Conv2d): 22 | 23 | def __init__(self, 24 | in_channels, 25 | out_channels, 26 | kernel_size, 27 | stride=1, 28 | padding=0, 29 | dilation=1, 30 | groups=1, 31 | bias=True, 32 | eps=1e-5): 33 | super(ConvWS2d, self).__init__( 34 | in_channels, 35 | out_channels, 36 | kernel_size, 37 | stride=stride, 38 | padding=padding, 39 | dilation=dilation, 40 | groups=groups, 41 | bias=bias) 42 | self.eps = eps 43 | 44 | def forward(self, x): 45 | return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, 46 | self.dilation, self.groups, self.eps) 47 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/utils/norm.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | norm_cfg = { 4 | # format: layer_type: (abbreviation, module) 5 | 'BN': ('bn', nn.BatchNorm2d), 6 | 'SyncBN': ('bn', nn.SyncBatchNorm), 7 | 'GN': ('gn', nn.GroupNorm), 8 | # and potentially 'SN' 9 | } 10 | 11 | 12 | def build_norm_layer(cfg, num_features, postfix=''): 13 | """ Build normalization layer 14 | 15 | Args: 16 | cfg (dict): cfg should contain: 17 | type (str): identify norm layer type. 18 | layer args: args needed to instantiate a norm layer. 19 | requires_grad (bool): [optional] whether stop gradient updates 20 | num_features (int): number of channels from input. 21 | postfix (int, str): appended into norm abbreviation to 22 | create named layer. 23 | 24 | Returns: 25 | name (str): abbreviation + postfix 26 | layer (nn.Module): created norm layer 27 | """ 28 | assert isinstance(cfg, dict) and 'type' in cfg 29 | cfg_ = cfg.copy() 30 | 31 | layer_type = cfg_.pop('type') 32 | if layer_type not in norm_cfg: 33 | raise KeyError('Unrecognized norm type {}'.format(layer_type)) 34 | else: 35 | abbr, norm_layer = norm_cfg[layer_type] 36 | if norm_layer is None: 37 | raise NotImplementedError 38 | 39 | assert isinstance(postfix, (int, str)) 40 | name = abbr + str(postfix) 41 | 42 | requires_grad = cfg_.pop('requires_grad', True) 43 | cfg_.setdefault('eps', 1e-5) 44 | if layer_type != 'GN': 45 | layer = norm_layer(num_features, **cfg_) 46 | if layer_type == 'SyncBN': 47 | layer._specify_ddp_gpu_num(1) 48 | else: 49 | assert 'num_groups' in cfg_ 50 | layer = norm_layer(num_channels=num_features, **cfg_) 51 | 52 | for param in layer.parameters(): 53 | param.requires_grad = requires_grad 54 | 55 | return name, layer 56 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/utils/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | 7 | def __init__(self, scale=1.0): 8 | super(Scale, self).__init__() 9 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 10 | 11 | def forward(self, x): 12 | return x * self.scale 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/models/utils/weight_init.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | 4 | 5 | def xavier_init(module, gain=1, bias=0, distribution='normal'): 6 | assert distribution in ['uniform', 'normal'] 7 | if distribution == 'uniform': 8 | nn.init.xavier_uniform_(module.weight, gain=gain) 9 | else: 10 | nn.init.xavier_normal_(module.weight, gain=gain) 11 | if hasattr(module, 'bias'): 12 | nn.init.constant_(module.bias, bias) 13 | 14 | 15 | def normal_init(module, mean=0, std=1, bias=0): 16 | nn.init.normal_(module.weight, mean, std) 17 | if hasattr(module, 'bias'): 18 | nn.init.constant_(module.bias, bias) 19 | 20 | 21 | def uniform_init(module, a=0, b=1, bias=0): 22 | nn.init.uniform_(module.weight, a, b) 23 | if hasattr(module, 'bias'): 24 | nn.init.constant_(module.bias, bias) 25 | 26 | 27 | def kaiming_init(module, 28 | mode='fan_out', 29 | nonlinearity='relu', 30 | bias=0, 31 | distribution='normal'): 32 | assert distribution in ['uniform', 'normal'] 33 | if distribution == 'uniform': 34 | nn.init.kaiming_uniform_( 35 | module.weight, mode=mode, nonlinearity=nonlinearity) 36 | else: 37 | nn.init.kaiming_normal_( 38 | module.weight, mode=mode, nonlinearity=nonlinearity) 39 | if hasattr(module, 'bias'): 40 | nn.init.constant_(module.bias, bias) 41 | 42 | 43 | def bias_init_with_prob(prior_prob): 44 | """ initialize conv/fc bias value according to giving probablity""" 45 | bias_init = float(-np.log((1 - prior_prob) / prior_prob)) 46 | return bias_init 47 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .dcn import (DeformConv, DeformConvPack, ModulatedDeformConv, 2 | ModulatedDeformConvPack, DeformRoIPooling, 3 | DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack, 4 | deform_conv, modulated_deform_conv, deform_roi_pooling) 5 | from .nms import nms, soft_nms 6 | from .roi_align import RoIAlign, roi_align 7 | from .roi_pool import RoIPool, roi_pool 8 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 9 | from .masked_conv import MaskedConv2d 10 | 11 | __all__ = [ 12 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 13 | 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 14 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 15 | 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', 16 | 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss', 17 | 'MaskedConv2d' 18 | ] 19 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/dcn/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.deform_conv import deform_conv, modulated_deform_conv 2 | from .functions.deform_pool import deform_roi_pooling 3 | from .modules.deform_conv import (DeformConv, ModulatedDeformConv, 4 | DeformConvPack, ModulatedDeformConvPack) 5 | from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, 6 | ModulatedDeformRoIPoolingPack) 7 | 8 | __all__ = [ 9 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 10 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 11 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 12 | 'deform_roi_pooling' 13 | ] 14 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/dcn/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/dcn/functions/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/dcn/functions/deform_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | 4 | from .. import deform_pool_cuda 5 | 6 | 7 | class DeformRoIPoolingFunction(Function): 8 | 9 | @staticmethod 10 | def forward(ctx, 11 | data, 12 | rois, 13 | offset, 14 | spatial_scale, 15 | out_size, 16 | out_channels, 17 | no_trans, 18 | group_size=1, 19 | part_size=None, 20 | sample_per_part=4, 21 | trans_std=.0): 22 | ctx.spatial_scale = spatial_scale 23 | ctx.out_size = out_size 24 | ctx.out_channels = out_channels 25 | ctx.no_trans = no_trans 26 | ctx.group_size = group_size 27 | ctx.part_size = out_size if part_size is None else part_size 28 | ctx.sample_per_part = sample_per_part 29 | ctx.trans_std = trans_std 30 | 31 | assert 0.0 <= ctx.trans_std <= 1.0 32 | if not data.is_cuda: 33 | raise NotImplementedError 34 | 35 | n = rois.shape[0] 36 | output = data.new_empty(n, out_channels, out_size, out_size) 37 | output_count = data.new_empty(n, out_channels, out_size, out_size) 38 | deform_pool_cuda.deform_psroi_pooling_cuda_forward( 39 | data, rois, offset, output, output_count, ctx.no_trans, 40 | ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, 41 | ctx.part_size, ctx.sample_per_part, ctx.trans_std) 42 | 43 | if data.requires_grad or rois.requires_grad or offset.requires_grad: 44 | ctx.save_for_backward(data, rois, offset) 45 | ctx.output_count = output_count 46 | 47 | return output 48 | 49 | @staticmethod 50 | def backward(ctx, grad_output): 51 | if not grad_output.is_cuda: 52 | raise NotImplementedError 53 | 54 | data, rois, offset = ctx.saved_tensors 55 | output_count = ctx.output_count 56 | grad_input = torch.zeros_like(data) 57 | grad_rois = None 58 | grad_offset = torch.zeros_like(offset) 59 | 60 | deform_pool_cuda.deform_psroi_pooling_cuda_backward( 61 | grad_output, data, rois, offset, output_count, grad_input, 62 | grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, 63 | ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, 64 | ctx.trans_std) 65 | return (grad_input, grad_rois, grad_offset, None, None, None, None, 66 | None, None, None, None) 67 | 68 | 69 | deform_roi_pooling = DeformRoIPoolingFunction.apply 70 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/dcn/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/dcn/modules/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/dcn/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='deform_conv', 6 | ext_modules=[ 7 | CUDAExtension('deform_conv_cuda', [ 8 | 'src/deform_conv_cuda.cpp', 9 | 'src/deform_conv_cuda_kernel.cu', 10 | ]), 11 | CUDAExtension( 12 | 'deform_pool_cuda', 13 | ['src/deform_pool_cuda.cpp', 'src/deform_pool_cuda_kernel.cu']), 14 | ], 15 | cmdclass={'build_ext': BuildExtension}) 16 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.masked_conv import masked_conv2d 2 | from .modules.masked_conv import MaskedConv2d 3 | 4 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/masked_conv/functions/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/functions/masked_conv.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | from torch.autograd import Function 4 | from torch.nn.modules.utils import _pair 5 | from .. import masked_conv2d_cuda 6 | 7 | 8 | class MaskedConv2dFunction(Function): 9 | 10 | @staticmethod 11 | def forward(ctx, features, mask, weight, bias, padding=0, stride=1): 12 | assert mask.dim() == 3 and mask.size(0) == 1 13 | assert features.dim() == 4 and features.size(0) == 1 14 | assert features.size()[2:] == mask.size()[1:] 15 | pad_h, pad_w = _pair(padding) 16 | stride_h, stride_w = _pair(stride) 17 | if stride_h != 1 or stride_w != 1: 18 | raise ValueError( 19 | 'Stride could not only be 1 in masked_conv2d currently.') 20 | if not features.is_cuda: 21 | raise NotImplementedError 22 | 23 | out_channel, in_channel, kernel_h, kernel_w = weight.size() 24 | 25 | batch_size = features.size(0) 26 | out_h = int( 27 | math.floor((features.size(2) + 2 * pad_h - 28 | (kernel_h - 1) - 1) / stride_h + 1)) 29 | out_w = int( 30 | math.floor((features.size(3) + 2 * pad_w - 31 | (kernel_h - 1) - 1) / stride_w + 1)) 32 | mask_inds = torch.nonzero(mask[0] > 0) 33 | mask_h_idx = mask_inds[:, 0].contiguous() 34 | mask_w_idx = mask_inds[:, 1].contiguous() 35 | data_col = features.new_zeros(in_channel * kernel_h * kernel_w, 36 | mask_inds.size(0)) 37 | masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx, 38 | mask_w_idx, kernel_h, 39 | kernel_w, pad_h, pad_w, 40 | data_col) 41 | 42 | masked_output = torch.addmm(1, bias[:, None], 1, 43 | weight.view(out_channel, -1), data_col) 44 | output = features.new_zeros(batch_size, out_channel, out_h, out_w) 45 | masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx, 46 | mask_w_idx, out_h, out_w, 47 | out_channel, output) 48 | return output 49 | 50 | @staticmethod 51 | def backward(ctx, grad_output): 52 | return (None, ) * 5 53 | 54 | 55 | masked_conv2d = MaskedConv2dFunction.apply 56 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/modules/__Init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/masked_conv/modules/__Init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/modules/masked_conv.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from ..functions.masked_conv import masked_conv2d 3 | 4 | 5 | class MaskedConv2d(nn.Conv2d): 6 | """A MaskedConv2d which inherits the official Conv2d. 7 | 8 | The masked forward doesn't implement the backward function and only 9 | supports the stride parameter to be 1 currently. 10 | """ 11 | 12 | def __init__(self, 13 | in_channels, 14 | out_channels, 15 | kernel_size, 16 | stride=1, 17 | padding=0, 18 | dilation=1, 19 | groups=1, 20 | bias=True): 21 | super(MaskedConv2d, 22 | self).__init__(in_channels, out_channels, kernel_size, stride, 23 | padding, dilation, groups, bias) 24 | 25 | def forward(self, input, mask=None): 26 | if mask is None: # fallback to the normal Conv2d 27 | return super(MaskedConv2d, self).forward(input) 28 | else: 29 | return masked_conv2d(input, mask, self.weight, self.bias, 30 | self.padding) 31 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/masked_conv/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='masked_conv2d_cuda', 6 | ext_modules=[ 7 | CUDAExtension('masked_conv2d_cuda', [ 8 | 'src/masked_conv2d_cuda.cpp', 9 | 'src/masked_conv2d_kernel.cu', 10 | ]), 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/nms/nms_wrapper.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from . import nms_cuda, nms_cpu 5 | from .soft_nms_cpu import soft_nms_cpu 6 | 7 | 8 | def nms(dets, iou_thr, device_id=None): 9 | """Dispatch to either CPU or GPU NMS implementations. 10 | 11 | The input can be either a torch tensor or numpy array. GPU NMS will be used 12 | if the input is a gpu tensor or device_id is specified, otherwise CPU NMS 13 | will be used. The returned type will always be the same as inputs. 14 | 15 | Arguments: 16 | dets (torch.Tensor or np.ndarray): bboxes with scores. 17 | iou_thr (float): IoU threshold for NMS. 18 | device_id (int, optional): when `dets` is a numpy array, if `device_id` 19 | is None, then cpu nms is used, otherwise gpu_nms will be used. 20 | 21 | Returns: 22 | tuple: kept bboxes and indice, which is always the same data type as 23 | the input. 24 | """ 25 | # convert dets (tensor or numpy array) to tensor 26 | if isinstance(dets, torch.Tensor): 27 | is_numpy = False 28 | dets_th = dets 29 | elif isinstance(dets, np.ndarray): 30 | is_numpy = True 31 | device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id) 32 | dets_th = torch.from_numpy(dets).to(device) 33 | else: 34 | raise TypeError( 35 | 'dets must be either a Tensor or numpy array, but got {}'.format( 36 | type(dets))) 37 | 38 | # execute cpu or cuda nms 39 | if dets_th.shape[0] == 0: 40 | inds = dets_th.new_zeros(0, dtype=torch.long) 41 | else: 42 | if dets_th.is_cuda: 43 | inds = nms_cuda.nms(dets_th, iou_thr) 44 | else: 45 | inds = nms_cpu.nms(dets_th, iou_thr) 46 | 47 | if is_numpy: 48 | inds = inds.cpu().numpy() 49 | return dets[inds, :], inds 50 | 51 | 52 | def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3): 53 | if isinstance(dets, torch.Tensor): 54 | is_tensor = True 55 | dets_np = dets.detach().cpu().numpy() 56 | elif isinstance(dets, np.ndarray): 57 | is_tensor = False 58 | dets_np = dets 59 | else: 60 | raise TypeError( 61 | 'dets must be either a Tensor or numpy array, but got {}'.format( 62 | type(dets))) 63 | 64 | method_codes = {'linear': 1, 'gaussian': 2} 65 | if method not in method_codes: 66 | raise ValueError('Invalid method for SoftNMS: {}'.format(method)) 67 | new_dets, inds = soft_nms_cpu( 68 | dets_np, 69 | iou_thr, 70 | method=method_codes[method], 71 | sigma=sigma, 72 | min_score=min_score) 73 | 74 | if is_tensor: 75 | return dets.new_tensor(new_dets), dets.new_tensor( 76 | inds, dtype=torch.long) 77 | else: 78 | return new_dets.astype(np.float32), inds.astype(np.int64) 79 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/nms/src/nms_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | template 5 | at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) { 6 | AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); 7 | 8 | if (dets.numel() == 0) { 9 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 10 | } 11 | 12 | auto x1_t = dets.select(1, 0).contiguous(); 13 | auto y1_t = dets.select(1, 1).contiguous(); 14 | auto x2_t = dets.select(1, 2).contiguous(); 15 | auto y2_t = dets.select(1, 3).contiguous(); 16 | auto scores = dets.select(1, 4).contiguous(); 17 | 18 | at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1); 19 | 20 | auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); 21 | 22 | auto ndets = dets.size(0); 23 | at::Tensor suppressed_t = 24 | at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); 25 | 26 | auto suppressed = suppressed_t.data(); 27 | auto order = order_t.data(); 28 | auto x1 = x1_t.data(); 29 | auto y1 = y1_t.data(); 30 | auto x2 = x2_t.data(); 31 | auto y2 = y2_t.data(); 32 | auto areas = areas_t.data(); 33 | 34 | for (int64_t _i = 0; _i < ndets; _i++) { 35 | auto i = order[_i]; 36 | if (suppressed[i] == 1) continue; 37 | auto ix1 = x1[i]; 38 | auto iy1 = y1[i]; 39 | auto ix2 = x2[i]; 40 | auto iy2 = y2[i]; 41 | auto iarea = areas[i]; 42 | 43 | for (int64_t _j = _i + 1; _j < ndets; _j++) { 44 | auto j = order[_j]; 45 | if (suppressed[j] == 1) continue; 46 | auto xx1 = std::max(ix1, x1[j]); 47 | auto yy1 = std::max(iy1, y1[j]); 48 | auto xx2 = std::min(ix2, x2[j]); 49 | auto yy2 = std::min(iy2, y2[j]); 50 | 51 | auto w = std::max(static_cast(0), xx2 - xx1 + 1); 52 | auto h = std::max(static_cast(0), yy2 - yy1 + 1); 53 | auto inter = w * h; 54 | auto ovr = inter / (iarea + areas[j] - inter); 55 | if (ovr >= threshold) suppressed[j] = 1; 56 | } 57 | } 58 | return at::nonzero(suppressed_t == 0).squeeze(1); 59 | } 60 | 61 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 62 | at::Tensor result; 63 | AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] { 64 | result = nms_cpu_kernel(dets, threshold); 65 | }); 66 | return result; 67 | } 68 | 69 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 70 | m.def("nms", &nms, "non-maximum suppression"); 71 | } -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/nms/src/nms_cuda.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 5 | 6 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); 7 | 8 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 9 | CHECK_CUDA(dets); 10 | if (dets.numel() == 0) 11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 12 | return nms_cuda(dets, threshold); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("nms", &nms, "non-maximum suppression"); 17 | } -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.roi_align import roi_align 2 | from .modules.roi_align import RoIAlign 3 | 4 | __all__ = ['roi_align', 'RoIAlign'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/roi_align/functions/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/functions/roi_align.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Function 2 | 3 | from .. import roi_align_cuda 4 | 5 | 6 | class RoIAlignFunction(Function): 7 | 8 | @staticmethod 9 | def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): 10 | if isinstance(out_size, int): 11 | out_h = out_size 12 | out_w = out_size 13 | elif isinstance(out_size, tuple): 14 | assert len(out_size) == 2 15 | assert isinstance(out_size[0], int) 16 | assert isinstance(out_size[1], int) 17 | out_h, out_w = out_size 18 | else: 19 | raise TypeError( 20 | '"out_size" must be an integer or tuple of integers') 21 | ctx.spatial_scale = spatial_scale 22 | ctx.sample_num = sample_num 23 | ctx.save_for_backward(rois) 24 | ctx.feature_size = features.size() 25 | 26 | batch_size, num_channels, data_height, data_width = features.size() 27 | num_rois = rois.size(0) 28 | 29 | output = features.new_zeros(num_rois, num_channels, out_h, out_w) 30 | if features.is_cuda: 31 | roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, 32 | sample_num, output) 33 | else: 34 | raise NotImplementedError 35 | 36 | return output 37 | 38 | @staticmethod 39 | def backward(ctx, grad_output): 40 | feature_size = ctx.feature_size 41 | spatial_scale = ctx.spatial_scale 42 | sample_num = ctx.sample_num 43 | rois = ctx.saved_tensors[0] 44 | assert (feature_size is not None and grad_output.is_cuda) 45 | 46 | batch_size, num_channels, data_height, data_width = feature_size 47 | out_w = grad_output.size(3) 48 | out_h = grad_output.size(2) 49 | 50 | grad_input = grad_rois = None 51 | if ctx.needs_input_grad[0]: 52 | grad_input = rois.new_zeros(batch_size, num_channels, data_height, 53 | data_width) 54 | roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, 55 | out_w, spatial_scale, sample_num, 56 | grad_input) 57 | 58 | return grad_input, grad_rois, None, None, None 59 | 60 | 61 | roi_align = RoIAlignFunction.apply 62 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/gradcheck.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.autograd import gradcheck 4 | 5 | import os.path as osp 6 | import sys 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_align import RoIAlign # noqa: E402 9 | 10 | feat_size = 15 11 | spatial_scale = 1.0 / 8 12 | img_size = feat_size / spatial_scale 13 | num_imgs = 2 14 | num_rois = 20 15 | 16 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) 17 | rois = np.random.rand(num_rois, 4) * img_size * 0.5 18 | rois[:, 2:] += img_size * 0.5 19 | rois = np.hstack((batch_ind, rois)) 20 | 21 | feat = torch.randn( 22 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') 23 | rois = torch.from_numpy(rois).float().cuda() 24 | inputs = (feat, rois) 25 | print('Gradcheck for roi align...') 26 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) 27 | print(test) 28 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) 29 | print(test) 30 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/roi_align/modules/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/modules/roi_align.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from ..functions.roi_align import RoIAlignFunction 3 | 4 | 5 | class RoIAlign(Module): 6 | 7 | def __init__(self, out_size, spatial_scale, sample_num=0): 8 | super(RoIAlign, self).__init__() 9 | 10 | self.out_size = out_size 11 | self.spatial_scale = float(spatial_scale) 12 | self.sample_num = int(sample_num) 13 | 14 | def forward(self, features, rois): 15 | return RoIAlignFunction.apply(features, rois, self.out_size, 16 | self.spatial_scale, self.sample_num) 17 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_align/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='roi_align_cuda', 6 | ext_modules=[ 7 | CUDAExtension('roi_align_cuda', [ 8 | 'src/roi_align_cuda.cpp', 9 | 'src/roi_align_kernel.cu', 10 | ]), 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.roi_pool import roi_pool 2 | from .modules.roi_pool import RoIPool 3 | 4 | __all__ = ['roi_pool', 'RoIPool'] 5 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/roi_pool/functions/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/functions/roi_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | 4 | from .. import roi_pool_cuda 5 | 6 | 7 | class RoIPoolFunction(Function): 8 | 9 | @staticmethod 10 | def forward(ctx, features, rois, out_size, spatial_scale): 11 | if isinstance(out_size, int): 12 | out_h = out_size 13 | out_w = out_size 14 | elif isinstance(out_size, tuple): 15 | assert len(out_size) == 2 16 | assert isinstance(out_size[0], int) 17 | assert isinstance(out_size[1], int) 18 | out_h, out_w = out_size 19 | else: 20 | raise TypeError( 21 | '"out_size" must be an integer or tuple of integers') 22 | assert features.is_cuda 23 | ctx.save_for_backward(rois) 24 | num_channels = features.size(1) 25 | num_rois = rois.size(0) 26 | out_size = (num_rois, num_channels, out_h, out_w) 27 | output = features.new_zeros(out_size) 28 | argmax = features.new_zeros(out_size, dtype=torch.int) 29 | roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, 30 | output, argmax) 31 | ctx.spatial_scale = spatial_scale 32 | ctx.feature_size = features.size() 33 | ctx.argmax = argmax 34 | 35 | return output 36 | 37 | @staticmethod 38 | def backward(ctx, grad_output): 39 | assert grad_output.is_cuda 40 | spatial_scale = ctx.spatial_scale 41 | feature_size = ctx.feature_size 42 | argmax = ctx.argmax 43 | rois = ctx.saved_tensors[0] 44 | assert feature_size is not None 45 | 46 | grad_input = grad_rois = None 47 | if ctx.needs_input_grad[0]: 48 | grad_input = grad_output.new_zeros(feature_size) 49 | roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, 50 | spatial_scale, grad_input) 51 | 52 | return grad_input, grad_rois, None, None 53 | 54 | 55 | roi_pool = RoIPoolFunction.apply 56 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import gradcheck 3 | 4 | import os.path as osp 5 | import sys 6 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 7 | from roi_pool import RoIPool # noqa: E402 8 | 9 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 10 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 11 | [1, 67, 40, 110, 120]]).cuda() 12 | inputs = (feat, rois) 13 | print('Gradcheck for roi pooling...') 14 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 15 | print(test) 16 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/roi_pool/modules/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/modules/roi_pool.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from ..functions.roi_pool import roi_pool 3 | 4 | 5 | class RoIPool(Module): 6 | 7 | def __init__(self, out_size, spatial_scale): 8 | super(RoIPool, self).__init__() 9 | 10 | self.out_size = out_size 11 | self.spatial_scale = float(spatial_scale) 12 | 13 | def forward(self, features, rois): 14 | return roi_pool(features, rois, self.out_size, self.spatial_scale) 15 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/roi_pool/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='roi_pool', 6 | ext_modules=[ 7 | CUDAExtension('roi_pool_cuda', [ 8 | 'src/roi_pool_cuda.cpp', 9 | 'src/roi_pool_kernel.cu', 10 | ]) 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules.sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/functions/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn.functional as F 2 | from torch.autograd import Function 3 | from torch.autograd.function import once_differentiable 4 | 5 | from .. import sigmoid_focal_loss_cuda 6 | 7 | 8 | class SigmoidFocalLossFunction(Function): 9 | 10 | @staticmethod 11 | def forward(ctx, input, target, gamma=2.0, alpha=0.25, reduction='mean'): 12 | ctx.save_for_backward(input, target) 13 | num_classes = input.shape[1] 14 | ctx.num_classes = num_classes 15 | ctx.gamma = gamma 16 | ctx.alpha = alpha 17 | 18 | loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes, 19 | gamma, alpha) 20 | reduction_enum = F._Reduction.get_enum(reduction) 21 | # none: 0, mean:1, sum: 2 22 | if reduction_enum == 0: 23 | return loss 24 | elif reduction_enum == 1: 25 | return loss.mean() 26 | elif reduction_enum == 2: 27 | return loss.sum() 28 | 29 | @staticmethod 30 | @once_differentiable 31 | def backward(ctx, d_loss): 32 | input, target = ctx.saved_tensors 33 | num_classes = ctx.num_classes 34 | gamma = ctx.gamma 35 | alpha = ctx.alpha 36 | d_loss = d_loss.contiguous() 37 | d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss, 38 | num_classes, gamma, alpha) 39 | return d_input, None, None, None, None 40 | 41 | 42 | sigmoid_focal_loss = SigmoidFocalLossFunction.apply 43 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/modules/__init__.py -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from ..functions.sigmoid_focal_loss import sigmoid_focal_loss 4 | 5 | 6 | class SigmoidFocalLoss(nn.Module): 7 | 8 | def __init__(self, gamma, alpha): 9 | super(SigmoidFocalLoss, self).__init__() 10 | self.gamma = gamma 11 | self.alpha = alpha 12 | 13 | def forward(self, logits, targets): 14 | assert logits.is_cuda 15 | loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha) 16 | return loss.sum() 17 | 18 | def __repr__(self): 19 | tmpstr = self.__class__.__name__ + "(" 20 | tmpstr += "gamma=" + str(self.gamma) 21 | tmpstr += ", alpha=" + str(self.alpha) 22 | tmpstr += ")" 23 | return tmpstr 24 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='SigmoidFocalLoss', 6 | ext_modules=[ 7 | CUDAExtension('sigmoid_focal_loss_cuda', [ 8 | 'src/sigmoid_focal_loss.cpp', 9 | 'src/sigmoid_focal_loss_cuda.cu', 10 | ]), 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /pytracking/mmdetection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp: -------------------------------------------------------------------------------- 1 | // modify from 2 | // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/SigmoidFocalLoss.h 3 | #include 4 | 5 | at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, 6 | const at::Tensor &targets, 7 | const int num_classes, 8 | const float gamma, const float alpha); 9 | 10 | at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, 11 | const at::Tensor &targets, 12 | const at::Tensor &d_losses, 13 | const int num_classes, 14 | const float gamma, const float alpha); 15 | 16 | // Interface for Python 17 | at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits, 18 | const at::Tensor &targets, 19 | const int num_classes, const float gamma, 20 | const float alpha) { 21 | if (logits.type().is_cuda()) { 22 | return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma, 23 | alpha); 24 | } 25 | } 26 | 27 | at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits, 28 | const at::Tensor &targets, 29 | const at::Tensor &d_losses, 30 | const int num_classes, const float gamma, 31 | const float alpha) { 32 | if (logits.type().is_cuda()) { 33 | return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses, 34 | num_classes, gamma, alpha); 35 | } 36 | } 37 | 38 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 39 | m.def("forward", &SigmoidFocalLoss_forward, 40 | "SigmoidFocalLoss forward (CUDA)"); 41 | m.def("backward", &SigmoidFocalLoss_backward, 42 | "SigmoidFocalLoss backward (CUDA)"); 43 | } 44 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/coco_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from mmdet.core import coco_eval 4 | 5 | 6 | def main(): 7 | parser = ArgumentParser(description='COCO Evaluation') 8 | parser.add_argument('result', help='result file path') 9 | parser.add_argument('--ann', help='annotation file path') 10 | parser.add_argument( 11 | '--types', 12 | type=str, 13 | nargs='+', 14 | choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], 15 | default=['bbox'], 16 | help='result types') 17 | parser.add_argument( 18 | '--max-dets', 19 | type=int, 20 | nargs='+', 21 | default=[100, 300, 1000], 22 | help='proposal numbers, only used for recall evaluation') 23 | args = parser.parse_args() 24 | coco_eval(args.result, args.types, args.ann, args.max_dets) 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | CHECKPOINT=$2 7 | GPUS=$3 8 | 9 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ 10 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 11 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | GPUS=$2 7 | 8 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/publish_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import subprocess 3 | import torch 4 | 5 | 6 | def parse_args(): 7 | parser = argparse.ArgumentParser( 8 | description='Process a checkpoint to be published') 9 | parser.add_argument('in_file', help='input checkpoint filename') 10 | parser.add_argument('out_file', help='output checkpoint filename') 11 | args = parser.parse_args() 12 | return args 13 | 14 | 15 | def process_checkpoint(in_file, out_file): 16 | checkpoint = torch.load(in_file, map_location='cpu') 17 | # remove optimizer for smaller file size 18 | if 'optimizer' in checkpoint: 19 | del checkpoint['optimizer'] 20 | # if it is necessary to remove some sensitive data in checkpoint['meta'], 21 | # add the code here. 22 | torch.save(checkpoint, out_file) 23 | sha = subprocess.check_output(['sha256sum', out_file]).decode() 24 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) 25 | subprocess.Popen(['mv', out_file, final_file]) 26 | 27 | 28 | def main(): 29 | args = parse_args() 30 | process_checkpoint(args.in_file, args.out_file) 31 | 32 | 33 | if __name__ == '__main__': 34 | main() 35 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${5:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${PY_ARGS:-"--validate"} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/upgrade_model_version.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import re 3 | from collections import OrderedDict 4 | 5 | import torch 6 | 7 | 8 | def convert(in_file, out_file): 9 | """Convert keys in checkpoints. 10 | 11 | There can be some breaking changes during the development of mmdetection, 12 | and this tool is used for upgrading checkpoints trained with old versions 13 | to the latest one. 14 | """ 15 | checkpoint = torch.load(in_file) 16 | in_state_dict = checkpoint.pop('state_dict') 17 | out_state_dict = OrderedDict() 18 | for key, val in in_state_dict.items(): 19 | # Use ConvModule instead of nn.Conv2d in RetinaNet 20 | # cls_convs.0.weight -> cls_convs.0.conv.weight 21 | m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) 22 | if m is not None: 23 | param = m.groups()[1] 24 | new_key = key.replace(param, 'conv.{}'.format(param)) 25 | out_state_dict[new_key] = val 26 | continue 27 | 28 | out_state_dict[key] = val 29 | checkpoint['state_dict'] = out_state_dict 30 | torch.save(checkpoint, out_file) 31 | 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser(description='Upgrade model version') 35 | parser.add_argument('in_file', help='input checkpoint file') 36 | parser.add_argument('out_file', help='output checkpoint file') 37 | args = parser.parse_args() 38 | convert(args.in_file, args.out_file) 39 | 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/voc_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | import mmcv 4 | import numpy as np 5 | 6 | from mmdet import datasets 7 | from mmdet.core import eval_map 8 | 9 | 10 | def voc_eval(result_file, dataset, iou_thr=0.5): 11 | det_results = mmcv.load(result_file) 12 | gt_bboxes = [] 13 | gt_labels = [] 14 | gt_ignore = [] 15 | for i in range(len(dataset)): 16 | ann = dataset.get_ann_info(i) 17 | bboxes = ann['bboxes'] 18 | labels = ann['labels'] 19 | if 'bboxes_ignore' in ann: 20 | ignore = np.concatenate([ 21 | np.zeros(bboxes.shape[0], dtype=np.bool), 22 | np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) 23 | ]) 24 | gt_ignore.append(ignore) 25 | bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) 26 | labels = np.concatenate([labels, ann['labels_ignore']]) 27 | gt_bboxes.append(bboxes) 28 | gt_labels.append(labels) 29 | if not gt_ignore: 30 | gt_ignore = gt_ignore 31 | if hasattr(dataset, 'year') and dataset.year == 2007: 32 | dataset_name = 'voc07' 33 | else: 34 | dataset_name = dataset.CLASSES 35 | eval_map( 36 | det_results, 37 | gt_bboxes, 38 | gt_labels, 39 | gt_ignore=gt_ignore, 40 | scale_ranges=None, 41 | iou_thr=iou_thr, 42 | dataset=dataset_name, 43 | print_summary=True) 44 | 45 | 46 | def main(): 47 | parser = ArgumentParser(description='VOC Evaluation') 48 | parser.add_argument('result', help='result file path') 49 | parser.add_argument('config', help='config file path') 50 | parser.add_argument( 51 | '--iou-thr', 52 | type=float, 53 | default=0.5, 54 | help='IoU threshold for evaluation') 55 | args = parser.parse_args() 56 | cfg = mmcv.Config.fromfile(args.config) 57 | test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets) 58 | voc_eval(args.result, test_dataset, args.iou_thr) 59 | 60 | 61 | if __name__ == '__main__': 62 | main() 63 | -------------------------------------------------------------------------------- /pytracking/mmdetection/tools/work_dirs/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e/20190609_193702.log.json: -------------------------------------------------------------------------------- 1 | {"mode": "train", "epoch": 1, "iter": 50, "lr": 0.0005, "time": 0.97258, "data_time": 0.03408, "memory": 3472, "loss_rpn_cls": 0.01717, "loss_rpn_bbox": 0.02216, "loss_semantic_seg": 0.16378, "s0.loss_cls": 0.33863, "s0.acc": 84.55469, "s0.loss_bbox": 0.08503, "s0.loss_mask": 0.54235, "s1.loss_cls": 0.2039, "s1.acc": 80.36973, "s1.loss_bbox": 0.0949, "s1.loss_mask": 0.69489, "s2.loss_cls": 0.12088, "s2.acc": 72.56468, "s2.loss_bbox": 0.06268, "s2.loss_mask": 0.22321, "loss": 2.56959} 2 | -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS/__init__.py -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS/__pycache__/default.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS/__pycache__/default.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS_cpu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS_cpu/__init__.py -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS_cpu/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS_cpu/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/ATCAIS_cpu/__pycache__/default.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/ATCAIS_cpu/__pycache__/default.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/__init__.py -------------------------------------------------------------------------------- /pytracking/parameter/atom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/atom/__init__.py -------------------------------------------------------------------------------- /pytracking/parameter/atom/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/atom/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/atom/__pycache__/default.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/atom/__pycache__/default.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/parameter/eco/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/parameter/eco/__init__.py -------------------------------------------------------------------------------- /pytracking/run_experiment.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | import importlib 5 | 6 | env_path = os.path.join(os.path.dirname(__file__), '..') 7 | if env_path not in sys.path: 8 | sys.path.append(env_path) 9 | 10 | from pytracking.evaluation.running import run_dataset 11 | 12 | 13 | def run_experiment(experiment_module: str, experiment_name: str, debug=0, threads=0): 14 | """Run experiment. 15 | args: 16 | experiment_module: Name of experiment module in the experiments/ folder. 17 | experiment_name: Name of the experiment function. 18 | debug: Debug level. 19 | threads: Number of threads. 20 | """ 21 | expr_module = importlib.import_module('pytracking.experiments.{}'.format(experiment_module)) 22 | expr_func = getattr(expr_module, experiment_name) 23 | trackers, dataset = expr_func() 24 | print('Running: {} {}'.format(experiment_module, experiment_name)) 25 | run_dataset(dataset, trackers, debug, threads) 26 | 27 | 28 | def main(): 29 | parser = argparse.ArgumentParser(description='Run tracker.') 30 | parser.add_argument('experiment_module', type=str, help='Name of experiment module in the experiments/ folder.') 31 | parser.add_argument('experiment_name', type=str, help='Name of the experiment function.') 32 | parser.add_argument('--debug', type=int, default=0, help='Debug level.') 33 | parser.add_argument('--threads', type=int, default=0, help='Number of threads.') 34 | 35 | args = parser.parse_args() 36 | 37 | run_experiment(args.experiment_module, args.experiment_name, args.debug, args.threads) 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /pytracking/run_video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | 5 | env_path = os.path.join(os.path.dirname(__file__), '..') 6 | if env_path not in sys.path: 7 | sys.path.append(env_path) 8 | 9 | from pytracking.evaluation import Tracker 10 | 11 | 12 | def run_video(tracker_name, tracker_param, videofile, optional_box=None, debug=None): 13 | """Run the tracker on your webcam. 14 | args: 15 | tracker_name: Name of tracking method. 16 | tracker_param: Name of parameter file. 17 | debug: Debug level. 18 | """ 19 | tracker = Tracker(tracker_name, tracker_param) 20 | tracker.run_video(videofilepath=videofile, optional_box=optional_box, debug=debug) 21 | 22 | def main(): 23 | parser = argparse.ArgumentParser(description='Run the tracker on your webcam.') 24 | parser.add_argument('tracker_name', type=str, help='Name of tracking method.') 25 | parser.add_argument('tracker_param', type=str, help='Name of parameter file.') 26 | parser.add_argument('videofile', type=str, help='path to a video file.') 27 | parser.add_argument('--optional_box', default=None, help='optional_box with format x,y,w,h.') 28 | parser.add_argument('--debug', type=int, default=0, help='Debug level.') 29 | 30 | args = parser.parse_args() 31 | 32 | run_video(args.tracker_name, args.tracker_param,args.videofile, args.optional_box, args.debug) 33 | 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /pytracking/run_webcam.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | 5 | env_path = os.path.join(os.path.dirname(__file__), '..') 6 | if env_path not in sys.path: 7 | sys.path.append(env_path) 8 | 9 | from pytracking.evaluation import Tracker 10 | 11 | 12 | def run_webcam(tracker_name, tracker_param, debug=None): 13 | """Run the tracker on your webcam. 14 | args: 15 | tracker_name: Name of tracking method. 16 | tracker_param: Name of parameter file. 17 | debug: Debug level. 18 | """ 19 | tracker = Tracker(tracker_name, tracker_param) 20 | tracker.run_webcam(debug) 21 | 22 | 23 | def main(): 24 | parser = argparse.ArgumentParser(description='Run the tracker on your webcam.') 25 | parser.add_argument('tracker_name', type=str, help='Name of tracking method.') 26 | parser.add_argument('tracker_param', type=str, help='Name of parameter file.') 27 | parser.add_argument('--debug', type=int, default=0, help='Debug level.') 28 | 29 | args = parser.parse_args() 30 | 31 | run_webcam(args.tracker_name, args.tracker_param, args.debug) 32 | 33 | 34 | if __name__ == '__main__': 35 | main() -------------------------------------------------------------------------------- /pytracking/tracker/ATCAIS/__init__.py: -------------------------------------------------------------------------------- 1 | from .atom import ATOM 2 | 3 | def get_tracker_class(): 4 | return ATOM -------------------------------------------------------------------------------- /pytracking/tracker/ATCAIS_cpu/__init__.py: -------------------------------------------------------------------------------- 1 | from .atom import ATOM 2 | 3 | def get_tracker_class(): 4 | return ATOM -------------------------------------------------------------------------------- /pytracking/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/tracker/__init__.py -------------------------------------------------------------------------------- /pytracking/tracker/atom/__init__.py: -------------------------------------------------------------------------------- 1 | from .atom import ATOM 2 | 3 | def get_tracker_class(): 4 | return ATOM -------------------------------------------------------------------------------- /pytracking/tracker/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .basetracker import BaseTracker -------------------------------------------------------------------------------- /pytracking/tracker/base/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/tracker/base/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/tracker/base/__pycache__/basetracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/tracker/base/__pycache__/basetracker.cpython-37.pyc -------------------------------------------------------------------------------- /pytracking/tracker/eco/__init__.py: -------------------------------------------------------------------------------- 1 | from .eco import ECO 2 | 3 | def get_tracker_class(): 4 | return ECO -------------------------------------------------------------------------------- /pytracking/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from .evaluation import * 2 | from .params import * -------------------------------------------------------------------------------- /pytracking/utils/atom_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/pytracking/utils/atom_overview.png -------------------------------------------------------------------------------- /pytracking/utils/gdrive_download: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The script taken from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html 4 | 5 | url=$1 6 | filename=$2 7 | 8 | [ -z "$url" ] && echo A URL or ID is required first argument && exit 1 9 | 10 | fileid="" 11 | declare -a patterns=("s/.*\/file\/d\/\(.*\)\/.*/\1/p" "s/.*id\=\(.*\)/\1/p" "s/\(.*\)/\1/p") 12 | for i in "${patterns[@]}" 13 | do 14 | fileid=$(echo $url | sed -n $i) 15 | [ ! -z "$fileid" ] && break 16 | done 17 | 18 | [ -z "$fileid" ] && echo Could not find Google ID && exit 1 19 | 20 | echo File ID: $fileid 21 | 22 | tmp_file="$filename.$$.file" 23 | tmp_cookies="$filename.$$.cookies" 24 | tmp_headers="$filename.$$.headers" 25 | 26 | url='https://docs.google.com/uc?export=download&id='$fileid 27 | echo Downloading: "$url > $tmp_file" 28 | wget --save-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 29 | 30 | if [[ ! $(find "$tmp_file" -type f -size +10000c 2>/dev/null) ]]; then 31 | confirm=$(cat "$tmp_file" | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p') 32 | fi 33 | 34 | if [ ! -z "$confirm" ]; then 35 | url='https://docs.google.com/uc?export=download&id='$fileid'&confirm='$confirm 36 | echo Downloading: "$url > $tmp_file" 37 | wget --load-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 38 | fi 39 | 40 | [ -z "$filename" ] && filename=$(cat "$tmp_headers" | sed -rn 's/.*filename=\"(.*)\".*/\1/p') 41 | [ -z "$filename" ] && filename="google_drive.file" 42 | 43 | echo Moving: "$tmp_file > $filename" 44 | 45 | mv "$tmp_file" "$filename" 46 | 47 | rm -f "$tmp_cookies" "$tmp_headers" 48 | 49 | echo Saved: "$filename" 50 | echo DONE! 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /pytracking/utils/params.py: -------------------------------------------------------------------------------- 1 | from pytracking import TensorList 2 | import random 3 | 4 | 5 | class TrackerParams: 6 | """Class for tracker parameters.""" 7 | def free_memory(self): 8 | for a in dir(self): 9 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 10 | getattr(self, a).free_memory() 11 | 12 | 13 | class FeatureParams: 14 | """Class for feature specific parameters""" 15 | def __init__(self, *args, **kwargs): 16 | if len(args) > 0: 17 | raise ValueError 18 | 19 | for name, val in kwargs.items(): 20 | if isinstance(val, list): 21 | setattr(self, name, TensorList(val)) 22 | else: 23 | setattr(self, name, val) 24 | 25 | 26 | def Choice(*args): 27 | """Can be used to sample random parameter values.""" 28 | return random.choice(args) 29 | -------------------------------------------------------------------------------- /pytracking/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('TkAgg') 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import torch 6 | import cv2 7 | import torch.nn.functional as F 8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None): 9 | """Display a 2D tensor. 10 | args: 11 | fig_num: Figure number. 12 | title: Title of figure. 13 | """ 14 | # min_sz=min(a.shape[-2:]) 15 | # scale=400/min_sz 16 | # a=F.interpolate(a, scale_factor=scale, mode='bilinear') 17 | 18 | 19 | a_np = a.squeeze().cpu().clone().detach().numpy() 20 | if a_np.ndim == 3: 21 | a_np = np.transpose(a_np, (1, 2, 0)) 22 | 23 | plt.figure(fig_num) 24 | plt.tight_layout() 25 | plt.cla() 26 | plt.imshow(a_np) 27 | plt.axis('off') 28 | plt.axis('equal') 29 | if title is not None: 30 | plt.title(title) 31 | plt.draw() 32 | plt.pause(0.001) 33 | 34 | 35 | # img=a_np 36 | # img=cv2.putText(img, title, (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255), 2) 37 | # img=cv2.imshow("show tensor {}".format(fig_num),img) 38 | # cv2.waitKey(1) 39 | 40 | 41 | 42 | def plot_graph(a: torch.Tensor, fig_num = None, title = None): 43 | """Plot graph. Data is a 1D tensor. 44 | args: 45 | fig_num: Figure number. 46 | title: Title of figure. 47 | """ 48 | a_np = a.squeeze().cpu().clone().detach().numpy() 49 | if a_np.ndim > 1: 50 | raise ValueError 51 | plt.figure(fig_num) 52 | # plt.tight_layout() 53 | plt.cla() 54 | plt.plot(a_np) 55 | if title is not None: 56 | plt.title(title) 57 | plt.draw() 58 | plt.pause(0.001) 59 | -------------------------------------------------------------------------------- /tracker_ATCAIS.m: -------------------------------------------------------------------------------- 1 | 2 | tracker_label = 'ATCAIS'; 3 | 4 | 5 | tracker_command = generate_python_command('vot_ATCAIS', ... 6 | {'/home/tangjiuqi097/vot/ATCAIS/pytracking/vot', ... % tracker source and vot.py are here 7 | '/home/tangjiuqi097/vot/ATCAIS/pytracking', ... 8 | '/home/tangjiuqi097/vot/ATCAIS/', ... 9 | '/home/tangjiuqi097/data/vot2019/vot-toolkit_7_0_2/native/trax/support/python' 10 | }); 11 | 12 | 13 | tracker_interpreter = 'python'; 14 | 15 | tracker_linkpath = {'/home/tangjiuqi097/data/vot2019/vot-toolkit_7_0_2/native/trax/build'}; 16 | -------------------------------------------------------------------------------- /tracker_ATCAIS_cpu.m: -------------------------------------------------------------------------------- 1 | 2 | tracker_label = 'ATCAIS'; 3 | 4 | 5 | tracker_command = generate_python_command('vot_ATCAIS_cpu', ... 6 | {'/home/tangjiuqi097/vot/ATCAIS/pytracking/vot', ... % tracker source and vot.py are here 7 | '/home/tangjiuqi097/vot/ATCAIS/pytracking', ... 8 | '/home/tangjiuqi097/vot/ATCAIS/', ... 9 | '/home/tangjiuqi097/data/vot2019/vot-toolkit_7_0_2/native/trax/support/python' 10 | }); 11 | 12 | 13 | tracker_interpreter = 'python'; 14 | 15 | tracker_linkpath = {'/home/tangjiuqi097/data/vot2019/vot-toolkit_7_0_2/native/trax/build'}; 16 | -------------------------------------------------------------------------------- /vot_rgbd2019_result/ATCAIS.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tangjiuqi097/ATCAIS/4e69b7f185842a767ac443acad3a1cf46737eaad/vot_rgbd2019_result/ATCAIS.zip --------------------------------------------------------------------------------