├── README.md ├── SiamDW_D ├── LICENSE ├── install.sh ├── libs │ ├── FPNlib │ │ ├── configs │ │ │ ├── rpn_r101_fpn_1x.py │ │ │ ├── rpn_r50_fpn_1x.py │ │ │ ├── rpn_x101_32x4d_fpn_1x.py │ │ │ └── rpn_x101_64x4d_fpn_1x.py │ │ ├── install.sh │ │ └── mmdet │ │ │ ├── __init__.py │ │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── env.py │ │ │ └── inference.py │ │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── anchor │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_generator.py │ │ │ │ └── anchor_target.py │ │ │ ├── bbox │ │ │ │ ├── __init__.py │ │ │ │ ├── assign_sampling.py │ │ │ │ ├── assigners │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── assign_result.py │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ └── max_iou_assigner.py │ │ │ │ ├── bbox_target.py │ │ │ │ ├── geometry.py │ │ │ │ ├── samplers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ └── sampling_result.py │ │ │ │ └── transforms.py │ │ │ ├── loss │ │ │ │ ├── __init__.py │ │ │ │ └── losses.py │ │ │ ├── mask │ │ │ │ ├── __init__.py │ │ │ │ ├── mask_target.py │ │ │ │ └── utils.py │ │ │ ├── post_processing │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_nms.py │ │ │ │ └── merge_augs.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── dist_utils.py │ │ │ │ └── misc.py │ │ │ ├── datasets │ │ │ └── transforms.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── anchor_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_head.py │ │ │ │ └── rpn_head.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── resnet.py │ │ │ │ ├── resnext.py │ │ │ │ └── ssd_vgg.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ └── convfc_bbox_head.py │ │ │ ├── builder.py │ │ │ ├── detectors │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── rpn.py │ │ │ │ └── test_mixins.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ └── fpn.py │ │ │ ├── registry.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_module.py │ │ │ │ ├── norm.py │ │ │ │ └── weight_init.py │ │ │ └── ops │ │ │ ├── __init__.py │ │ │ └── nms │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── compile.py │ │ │ ├── cpu_nms.pyx │ │ │ ├── cpu_soft_nms.pyx │ │ │ ├── gpu_nms.hpp │ │ │ ├── gpu_nms.pyx │ │ │ ├── nms_kernel.cu │ │ │ ├── nms_wrapper.py │ │ │ └── setup.py │ ├── PreciseRoIPooling │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _assets │ │ │ └── prroi_visualization.png │ │ ├── pytorch │ │ │ ├── __init__.py │ │ │ ├── prroi_pool │ │ │ │ ├── .gitignore │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── functional.py │ │ │ │ ├── prroi_pool.py │ │ │ │ ├── src │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ └── travis.sh │ │ │ └── tests │ │ │ │ ├── __init__.py │ │ │ │ └── test_prroi_pooling2d.py │ │ └── src │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ └── prroi_pooling_gpu_impl.cuh │ ├── RPN │ │ ├── head.py │ │ ├── model_load.py │ │ ├── models.py │ │ ├── neck.py │ │ ├── resnet_atrous.py │ │ ├── rpnpp_utils.py │ │ └── xcorr.py │ ├── core │ │ ├── base │ │ │ ├── __init__.py │ │ │ └── basetracker.py │ │ ├── far_fusion │ │ │ ├── __init__.py │ │ │ ├── far_fusion.py │ │ │ └── optim.py │ │ ├── resnext_far │ │ │ ├── __init__.py │ │ │ ├── fpn_helper.py │ │ │ ├── optim.py │ │ │ ├── processing_utils.py │ │ │ └── resnext_far.py │ │ ├── senet_far │ │ │ ├── __init__.py │ │ │ ├── fpn_helper.py │ │ │ ├── optim.py │ │ │ ├── processing_utils.py │ │ │ └── senet_far.py │ │ ├── tracker_matlab.py │ │ └── tracker_test.py │ ├── features │ │ ├── augmentation.py │ │ ├── color.py │ │ ├── deep.py │ │ ├── extractor.py │ │ ├── featurebase.py │ │ ├── preprocessing.py │ │ └── util.py │ ├── mmcv │ │ ├── __init__.py │ │ ├── arraymisc │ │ │ ├── __init__.py │ │ │ └── quantization.py │ │ ├── cnn │ │ │ ├── __init__.py │ │ │ ├── alexnet.py │ │ │ ├── resnet.py │ │ │ ├── vgg.py │ │ │ └── weight_init.py │ │ ├── fileio │ │ │ ├── __init__.py │ │ │ ├── handlers │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── json_handler.py │ │ │ │ ├── pickle_handler.py │ │ │ │ └── yaml_handler.py │ │ │ ├── io.py │ │ │ └── parse.py │ │ ├── image │ │ │ ├── __init__.py │ │ │ ├── io.py │ │ │ └── transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── colorspace.py │ │ │ │ ├── geometry.py │ │ │ │ ├── normalize.py │ │ │ │ └── resize.py │ │ ├── opencv_info.py │ │ ├── parallel │ │ │ ├── __init__.py │ │ │ ├── _functions.py │ │ │ ├── collate.py │ │ │ ├── data_container.py │ │ │ ├── data_parallel.py │ │ │ ├── distributed.py │ │ │ └── scatter_gather.py │ │ ├── runner │ │ │ ├── __init__.py │ │ │ ├── checkpoint.py │ │ │ ├── hooks │ │ │ │ ├── __init__.py │ │ │ │ ├── checkpoint.py │ │ │ │ ├── closure.py │ │ │ │ ├── hook.py │ │ │ │ ├── iter_timer.py │ │ │ │ ├── logger │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base.py │ │ │ │ │ ├── pavi.py │ │ │ │ │ ├── tensorboard.py │ │ │ │ │ └── text.py │ │ │ │ ├── lr_updater.py │ │ │ │ ├── memory.py │ │ │ │ ├── optimizer.py │ │ │ │ └── sampler_seed.py │ │ │ ├── log_buffer.py │ │ │ ├── parallel_test.py │ │ │ ├── priority.py │ │ │ ├── runner.py │ │ │ └── utils.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── misc.py │ │ │ ├── path.py │ │ │ ├── progressbar.py │ │ │ └── timer.py │ │ ├── version.py │ │ ├── video │ │ │ ├── __init__.py │ │ │ ├── io.py │ │ │ ├── optflow.py │ │ │ └── processing.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ ├── color.py │ │ │ ├── image.py │ │ │ └── optflow.py │ ├── models │ │ ├── backbone │ │ │ ├── __init__.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ ├── resnext101_64x4d_features.py │ │ │ └── senet.py │ │ └── modules │ │ │ ├── __init__.py │ │ │ ├── blocks.py │ │ │ ├── iounet.py │ │ │ ├── iounet_dropout.py │ │ │ └── net.py │ └── utils │ │ ├── __init__.py │ │ ├── complex.py │ │ ├── dcf.py │ │ ├── fourier.py │ │ ├── interpolate.py │ │ ├── loading.py │ │ ├── operation.py │ │ ├── optimization.py │ │ ├── params.py │ │ ├── plotting.py │ │ ├── settings.py │ │ ├── tensordict.py │ │ └── tensorlist.py └── test │ ├── data │ └── README │ ├── networks │ └── README.md │ ├── parallel_test.py │ ├── settings │ ├── datasets │ │ ├── __init__.py │ │ ├── data.py │ │ ├── got10kdataset.py │ │ ├── lasotdataset.py │ │ ├── nfsdataset.py │ │ ├── otbdataset.py │ │ ├── oxuvadataset.py │ │ ├── rgbd_ddataset.py │ │ ├── rgbd_rgbdataset.py │ │ ├── tpldataset.py │ │ ├── trackingnetdataset.py │ │ ├── uavdataset.py │ │ ├── vot18dataset.py │ │ ├── vot19dataset.py │ │ ├── votlt18dataset.py │ │ └── votlt19dataset.py │ ├── envs.py │ ├── exp.py │ ├── far_fusion │ │ ├── __init__.py │ │ └── far_fusion.py │ ├── resnext_far │ │ ├── __init__.py │ │ └── resnext.py │ └── senet_far │ │ ├── __init__.py │ │ └── senet.py │ └── utils │ ├── __init__.py │ ├── gdrive_download │ ├── params.py │ └── plotting.py ├── SiamDW_LT ├── README.md ├── libs │ ├── FPNlib │ │ ├── configs │ │ │ ├── rpn_r101_fpn_1x.py │ │ │ ├── rpn_r50_fpn_1x.py │ │ │ ├── rpn_x101_32x4d_fpn_1x.py │ │ │ └── rpn_x101_64x4d_fpn_1x.py │ │ ├── install.sh │ │ └── mmdet │ │ │ ├── __init__.py │ │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── env.py │ │ │ └── inference.py │ │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── anchor │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_generator.py │ │ │ │ └── anchor_target.py │ │ │ ├── bbox │ │ │ │ ├── __init__.py │ │ │ │ ├── assign_sampling.py │ │ │ │ ├── assigners │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── assign_result.py │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ └── max_iou_assigner.py │ │ │ │ ├── bbox_target.py │ │ │ │ ├── geometry.py │ │ │ │ ├── samplers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ └── sampling_result.py │ │ │ │ └── transforms.py │ │ │ ├── loss │ │ │ │ ├── __init__.py │ │ │ │ └── losses.py │ │ │ ├── mask │ │ │ │ ├── __init__.py │ │ │ │ ├── mask_target.py │ │ │ │ └── utils.py │ │ │ ├── post_processing │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_nms.py │ │ │ │ └── merge_augs.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── dist_utils.py │ │ │ │ └── misc.py │ │ │ ├── datasets │ │ │ └── transforms.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── anchor_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_head.py │ │ │ │ └── rpn_head.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── resnet.py │ │ │ │ ├── resnext.py │ │ │ │ └── ssd_vgg.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ └── convfc_bbox_head.py │ │ │ ├── builder.py │ │ │ ├── detectors │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── rpn.py │ │ │ │ └── test_mixins.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ └── fpn.py │ │ │ ├── registry.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_module.py │ │ │ │ ├── norm.py │ │ │ │ └── weight_init.py │ │ │ └── ops │ │ │ ├── __init__.py │ │ │ └── nms │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── compile.py │ │ │ ├── cpu_nms.pyx │ │ │ ├── cpu_soft_nms.pyx │ │ │ ├── gpu_nms.hpp │ │ │ ├── gpu_nms.pyx │ │ │ ├── nms_kernel.cu │ │ │ ├── nms_wrapper.py │ │ │ └── setup.py │ ├── PreciseRoIPooling │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _assets │ │ │ └── prroi_visualization.png │ │ ├── pytorch │ │ │ ├── __init__.py │ │ │ ├── prroi_pool │ │ │ │ ├── .gitignore │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── functional.py │ │ │ │ ├── prroi_pool.py │ │ │ │ ├── src │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ └── travis.sh │ │ │ └── tests │ │ │ │ ├── __init__.py │ │ │ │ └── test_prroi_pooling2d.py │ │ └── src │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ └── prroi_pooling_gpu_impl.cuh │ ├── RPN │ │ ├── head.py │ │ ├── model_load.py │ │ ├── models.py │ │ ├── neck.py │ │ ├── resnet_atrous.py │ │ ├── rpnpp_utils.py │ │ └── xcorr.py │ ├── core │ │ ├── base │ │ │ ├── __init__.py │ │ │ └── basetracker.py │ │ ├── far_fusion │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── far_fusion.cpython-36.pyc │ │ │ │ └── flip_star.cpython-36.pyc │ │ │ ├── far_fusion.py │ │ │ └── optim.py │ │ ├── fstar │ │ │ ├── __init__.py │ │ │ ├── fstar.py │ │ │ └── optim.py │ │ ├── resnext_far │ │ │ ├── __init__.py │ │ │ ├── fpn_helper.py │ │ │ ├── optim.py │ │ │ ├── processing_utils.py │ │ │ └── resnext_far.py │ │ ├── senet_far │ │ │ ├── __init__.py │ │ │ ├── fpn_helper.py │ │ │ ├── optim.py │ │ │ ├── processing_utils.py │ │ │ └── senet_far.py │ │ ├── star │ │ │ ├── __init__.py │ │ │ ├── optim.py │ │ │ └── star.py │ │ ├── tracker_matlab.py │ │ └── tracker_test.py │ ├── features │ │ ├── augmentation.py │ │ ├── color.py │ │ ├── deep.py │ │ ├── extractor.py │ │ ├── featurebase.py │ │ ├── preprocessing.py │ │ └── util.py │ ├── mmcv │ │ ├── __init__.py │ │ ├── arraymisc │ │ │ ├── __init__.py │ │ │ └── quantization.py │ │ ├── cnn │ │ │ ├── __init__.py │ │ │ ├── alexnet.py │ │ │ ├── resnet.py │ │ │ ├── vgg.py │ │ │ └── weight_init.py │ │ ├── fileio │ │ │ ├── __init__.py │ │ │ ├── handlers │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── base.cpython-36.pyc │ │ │ │ │ ├── json_handler.cpython-36.pyc │ │ │ │ │ ├── pickle_handler.cpython-36.pyc │ │ │ │ │ └── yaml_handler.cpython-36.pyc │ │ │ │ ├── base.py │ │ │ │ ├── json_handler.py │ │ │ │ ├── pickle_handler.py │ │ │ │ └── yaml_handler.py │ │ │ ├── io.py │ │ │ └── parse.py │ │ ├── image │ │ │ ├── __init__.py │ │ │ ├── io.py │ │ │ └── transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── colorspace.cpython-36.pyc │ │ │ │ ├── geometry.cpython-36.pyc │ │ │ │ ├── normalize.cpython-36.pyc │ │ │ │ └── resize.cpython-36.pyc │ │ │ │ ├── colorspace.py │ │ │ │ ├── geometry.py │ │ │ │ ├── normalize.py │ │ │ │ └── resize.py │ │ ├── opencv_info.py │ │ ├── parallel │ │ │ ├── __init__.py │ │ │ ├── _functions.py │ │ │ ├── collate.py │ │ │ ├── data_container.py │ │ │ ├── data_parallel.py │ │ │ ├── distributed.py │ │ │ └── scatter_gather.py │ │ ├── runner │ │ │ ├── __init__.py │ │ │ ├── checkpoint.py │ │ │ ├── hooks │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── checkpoint.cpython-36.pyc │ │ │ │ │ ├── closure.cpython-36.pyc │ │ │ │ │ ├── hook.cpython-36.pyc │ │ │ │ │ ├── iter_timer.cpython-36.pyc │ │ │ │ │ ├── lr_updater.cpython-36.pyc │ │ │ │ │ ├── memory.cpython-36.pyc │ │ │ │ │ ├── optimizer.cpython-36.pyc │ │ │ │ │ └── sampler_seed.cpython-36.pyc │ │ │ │ ├── checkpoint.py │ │ │ │ ├── closure.py │ │ │ │ ├── hook.py │ │ │ │ ├── iter_timer.py │ │ │ │ ├── logger │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── __pycache__ │ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ │ ├── base.cpython-36.pyc │ │ │ │ │ │ ├── pavi.cpython-36.pyc │ │ │ │ │ │ ├── tensorboard.cpython-36.pyc │ │ │ │ │ │ └── text.cpython-36.pyc │ │ │ │ │ ├── base.py │ │ │ │ │ ├── pavi.py │ │ │ │ │ ├── tensorboard.py │ │ │ │ │ └── text.py │ │ │ │ ├── lr_updater.py │ │ │ │ ├── memory.py │ │ │ │ ├── optimizer.py │ │ │ │ └── sampler_seed.py │ │ │ ├── log_buffer.py │ │ │ ├── parallel_test.py │ │ │ ├── priority.py │ │ │ ├── runner.py │ │ │ └── utils.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── misc.py │ │ │ ├── path.py │ │ │ ├── progressbar.py │ │ │ └── timer.py │ │ ├── version.py │ │ ├── video │ │ │ ├── __init__.py │ │ │ ├── io.py │ │ │ ├── optflow.py │ │ │ └── processing.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ ├── color.py │ │ │ ├── image.py │ │ │ └── optflow.py │ ├── models │ │ ├── backbone │ │ │ ├── __init__.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ ├── resnext101_64x4d_features.py │ │ │ └── senet.py │ │ └── modules │ │ │ ├── __init__.py │ │ │ ├── blocks.py │ │ │ ├── iounet.py │ │ │ ├── iounet_dropout.py │ │ │ └── net.py │ └── utils │ │ ├── __init__.py │ │ ├── complex.py │ │ ├── dcf.py │ │ ├── fourier.py │ │ ├── interpolate.py │ │ ├── loading.py │ │ ├── operation.py │ │ ├── optimization.py │ │ ├── params.py │ │ ├── settings.py │ │ ├── tensordict.py │ │ └── tensorlist.py └── test │ ├── LICENSE │ ├── install.sh │ ├── matlab │ ├── _init_paths.py │ ├── python_star.py │ └── vot.py │ ├── parallel_test.py │ ├── requirements │ ├── settings │ ├── datasets │ │ ├── __init__.py │ │ ├── data.py │ │ ├── got10kdataset.py │ │ ├── lasotdataset.py │ │ ├── nfsdataset.py │ │ ├── otbdataset.py │ │ ├── oxuvadataset.py │ │ ├── rgbd_ddataset.py │ │ ├── rgbd_rgbdataset.py │ │ ├── tpldataset.py │ │ ├── trackingnetdataset.py │ │ ├── uavdataset.py │ │ ├── vot18dataset.py │ │ ├── vot19dataset.py │ │ ├── votlt18dataset.py │ │ └── votlt19dataset.py │ ├── envs.py │ ├── exp.py │ ├── far_fusion │ │ ├── __init__.py │ │ └── senet.py │ ├── fstar │ │ ├── __init__.py │ │ └── res50_512_TLVC.py │ ├── resnext_far │ │ ├── __init__.py │ │ └── resnext.py │ ├── senet_far │ │ ├── __init__.py │ │ └── senet.py │ └── star │ │ ├── __init__.py │ │ └── resnext64_dropout_TLVC.py │ └── utils │ ├── __init__.py │ ├── analyse_component.py │ ├── analyse_results.py │ ├── draw_plot.py │ ├── extract_backbone.py │ ├── gdrive_download │ ├── generate_epoch_test.py │ ├── get_eao.py │ ├── get_fscore.m │ ├── oxuva_generate.py │ ├── params.py │ └── plotting.py └── SiamDW_T ├── install.sh ├── libs ├── core │ ├── __init__.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── data.py │ │ ├── environment.py │ │ ├── local.py │ │ ├── running.py │ │ ├── tracker.py │ │ └── utils.py │ ├── experiments │ │ ├── __init__.py │ │ └── exp.py │ ├── features │ │ ├── __init__.py │ │ ├── augmentation.py │ │ ├── augmentation.py.bk │ │ ├── color.py │ │ ├── deep.py │ │ ├── extractor.py │ │ ├── featurebase.py │ │ ├── preprocessing.py │ │ └── util.py │ ├── libs │ │ ├── NMS │ │ │ ├── __init__.py │ │ │ ├── _ext │ │ │ │ ├── __init__.py │ │ │ │ └── nms │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── _nms.so │ │ │ ├── build.py │ │ │ ├── pth_nms.py │ │ │ └── src │ │ │ │ ├── cuda │ │ │ │ ├── nms_kernel.cu │ │ │ │ ├── nms_kernel.cu.o │ │ │ │ └── nms_kernel.h │ │ │ │ ├── nms.c │ │ │ │ ├── nms.h │ │ │ │ ├── nms_cuda.c │ │ │ │ └── nms_cuda.h │ │ ├── __init__.py │ │ ├── complex.py │ │ ├── dcf.py │ │ ├── dcf.py.bk │ │ ├── fourier.py │ │ ├── fourier.py.bk │ │ ├── operation.py │ │ ├── optimization.py │ │ ├── optimization.py.bk │ │ ├── tensordict.py │ │ ├── tensorlist.py │ │ └── tensorlist.py.bk │ ├── parameter │ │ ├── __init__.py │ │ └── improved │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── unrestore_res50_RGB.cpython-36.pyc │ │ │ └── unrestore_res50_T.cpython-36.pyc │ │ │ ├── unrestore_res50_RGB.py │ │ │ └── unrestore_res50_T.py │ ├── tracker │ │ ├── __init__.py │ │ ├── base │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── basetracker.cpython-36.pyc │ │ │ └── basetracker.py │ │ └── improved │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── atom.cpython-36.pyc │ │ │ └── optim.cpython-36.pyc │ │ │ ├── improved.py │ │ │ └── optim.py │ ├── utils │ │ ├── __init__.py │ │ ├── gdrive_download │ │ ├── params.py │ │ └── plotting.py │ └── vot.py ├── models │ ├── __init__.py │ ├── actors │ │ ├── __init__.py │ │ ├── base_actor.py │ │ ├── bbreg.py │ │ └── bbreg_para.py │ ├── admin │ │ ├── __init__.py │ │ ├── environment.py │ │ ├── loading.py │ │ ├── local.py │ │ ├── model_constructor.py │ │ ├── settings.py │ │ └── stats.py │ ├── data │ │ ├── __init__.py │ │ ├── image_loader.py │ │ ├── interpolate.py │ │ ├── loader.py │ │ ├── processing.py │ │ ├── processing_utils.py │ │ ├── sampler.py │ │ └── transforms.py │ ├── external │ │ └── PreciseRoIPooling │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── _assets │ │ │ └── prroi_visualization.png │ │ │ ├── pytorch │ │ │ ├── prroi_pool │ │ │ │ ├── .gitignore │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── functional.py │ │ │ │ ├── prroi_pool.py │ │ │ │ ├── src │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ └── travis.sh │ │ │ └── tests │ │ │ │ └── test_prroi_pooling2d.py │ │ │ └── src │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ └── prroi_pooling_gpu_impl.cuh │ └── models │ │ ├── __init__.py │ │ ├── backbone │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── resnet.cpython-36.pyc │ │ │ └── resnet18_vggm.cpython-36.pyc │ │ ├── resnet.py │ │ └── resnet18_vggm.py │ │ ├── bbreg │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── atom.cpython-36.pyc │ │ │ └── atom_iou_net.cpython-36.pyc │ │ ├── aas.py │ │ └── aas_iou_net.py │ │ └── layers │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── blocks.cpython-36.pyc │ │ └── blocks.py └── utils │ └── utils.py └── rgbt_tracking ├── _init_paths.py └── test_rgbt.py /SiamDW_D/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Microsoft Research Asia 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SiamDW_D/install.sh: -------------------------------------------------------------------------------- 1 | conda install -y cython 2 | pip install multiprocess 3 | pip install torch==0.3.1 4 | pip install numpy==1.16.4 5 | pip install matplotlib 6 | pip install cffi 7 | pip install opencv-python 8 | pip install torchvision==0.2.0 9 | pip install pytorch_fft 10 | 11 | base_dir=$(pwd) 12 | cd libs/PreciseRoIPooling/pytorch/prroi_pool 13 | PATH=/usr/local/cuda/bin/:$PATH 14 | bash travis.sh 15 | cd $base_dir 16 | 17 | cd libs/FPNlib/ 18 | sh install.sh 19 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/install.sh: -------------------------------------------------------------------------------- 1 | cd mmdet/ops/nms 2 | python compile.py 3 | cd ../../../ 4 | pip install mmcv 5 | pip uninstall -y mmcv 6 | pip install sklearn 7 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/libs/FPNlib/mmdet/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .env import init_dist, get_root_logger, set_random_seed 2 | from .inference import inference_detector, show_result 3 | 4 | __all__ = [ 5 | 'init_dist', 'get_root_logger', 'set_random_seed', 6 | 'inference_detector', 'show_result' 7 | ] 8 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .mask import * # noqa: F401, F403 4 | from .loss import * # noqa: F401, F403 5 | from .post_processing import * # noqa: F401, F403 6 | from .utils import * # noqa: F401, F403 7 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import anchor_target 3 | 4 | __all__ = ['AnchorGenerator', 'anchor_target'] 5 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .geometry import bbox_overlaps 2 | from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult 3 | from .samplers import (BaseSampler, PseudoSampler, RandomSampler, 4 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 5 | CombinedSampler, SamplingResult) 6 | from .assign_sampling import build_assigner, build_sampler, assign_and_sample 7 | from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, 8 | bbox_mapping_back, bbox2roi, roi2bbox, bbox2result) 9 | from .bbox_target import bbox_target 10 | 11 | __all__ = [ 12 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 13 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 14 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 15 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 16 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 17 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'bbox_target' 18 | ] 19 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import libs.mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict( 11 | cfg, assigners, default_args=kwargs) 12 | else: 13 | raise TypeError('Invalid type {} for building a sampler'.format( 14 | type(cfg))) 15 | 16 | 17 | def build_sampler(cfg, **kwargs): 18 | if isinstance(cfg, samplers.BaseSampler): 19 | return cfg 20 | elif isinstance(cfg, dict): 21 | return mmcv.runner.obj_from_dict( 22 | cfg, samplers, default_args=kwargs) 23 | else: 24 | raise TypeError('Invalid type {} for building a sampler'.format( 25 | type(cfg))) 26 | 27 | 28 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 29 | bbox_assigner = build_assigner(cfg.assigner) 30 | bbox_sampler = build_sampler(cfg.sampler) 31 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 32 | gt_labels) 33 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 34 | gt_labels) 35 | return assign_result, sampling_result 36 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_assigner import BaseAssigner 2 | from .max_iou_assigner import MaxIoUAssigner 3 | from .assign_result import AssignResult 4 | 5 | __all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] 6 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/assigners/assign_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AssignResult(object): 5 | 6 | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): 7 | self.num_gts = num_gts 8 | self.gt_inds = gt_inds 9 | self.max_overlaps = max_overlaps 10 | self.labels = labels 11 | 12 | def add_gt_(self, gt_labels): 13 | self_inds = torch.arange( 14 | 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) 15 | self.gt_inds = torch.cat([self_inds, self.gt_inds]) 16 | self.max_overlaps = torch.cat( 17 | [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) 18 | if self.labels is not None: 19 | self.labels = torch.cat([gt_labels, self.labels]) 20 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .pseudo_sampler import PseudoSampler 3 | from .random_sampler import RandomSampler 4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 6 | from .combined_sampler import CombinedSampler 7 | from .ohem_sampler import OHEMSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from ..assign_sampling import build_sampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/bbox/samplers/sampling_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SamplingResult(object): 5 | 6 | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, 7 | gt_flags): 8 | self.pos_inds = pos_inds 9 | self.neg_inds = neg_inds 10 | self.pos_bboxes = bboxes[pos_inds] 11 | self.neg_bboxes = bboxes[neg_inds] 12 | self.pos_is_gt = gt_flags[pos_inds] 13 | 14 | self.num_gts = gt_bboxes.shape[0] 15 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 16 | self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] 17 | if assign_result.labels is not None: 18 | self.pos_gt_labels = assign_result.labels[pos_inds] 19 | else: 20 | self.pos_gt_labels = None 21 | 22 | @property 23 | def bboxes(self): 24 | return torch.cat([self.pos_bboxes, self.neg_bboxes]) 25 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .losses import (weighted_nll_loss, weighted_cross_entropy, 2 | weighted_binary_cross_entropy, sigmoid_focal_loss, 3 | weighted_sigmoid_focal_loss, mask_cross_entropy, 4 | smooth_l1_loss, weighted_smoothl1, accuracy) 5 | 6 | __all__ = [ 7 | 'weighted_nll_loss', 'weighted_cross_entropy', 8 | 'weighted_binary_cross_entropy', 'sigmoid_focal_loss', 9 | 'weighted_sigmoid_focal_loss', 'mask_cross_entropy', 'smooth_l1_loss', 10 | 'weighted_smoothl1', 'accuracy' 11 | ] 12 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import split_combined_polys 2 | from .mask_target import mask_target 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/mask/mask_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import libs.mmcv as mmcv 4 | 5 | 6 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, 7 | cfg): 8 | cfg_list = [cfg for _ in range(len(pos_proposals_list))] 9 | mask_targets = map(mask_target_single, pos_proposals_list, 10 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list) 11 | mask_targets = torch.cat(list(mask_targets)) 12 | return mask_targets 13 | 14 | 15 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): 16 | mask_size = cfg.mask_size 17 | num_pos = pos_proposals.size(0) 18 | mask_targets = [] 19 | if num_pos > 0: 20 | proposals_np = pos_proposals.cpu().numpy() 21 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() 22 | for i in range(num_pos): 23 | gt_mask = gt_masks[pos_assigned_gt_inds[i]] 24 | bbox = proposals_np[i, :].astype(np.int32) 25 | x1, y1, x2, y2 = bbox 26 | w = np.maximum(x2 - x1 + 1, 1) 27 | h = np.maximum(y2 - y1 + 1, 1) 28 | # mask is uint8 both before and after resizing 29 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], 30 | (mask_size, mask_size)) 31 | mask_targets.append(target) 32 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( 33 | pos_proposals.device) 34 | else: 35 | mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) 36 | return mask_targets 37 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import libs.mmcv as mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, 3 | merge_aug_scores, merge_aug_masks) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import allreduce_grads, DistOptimizerHook 2 | from .misc import tensor2imgs, unmap, multi_apply 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import libs.mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .necks import * # noqa: F401,F403 3 | from .anchor_heads import * # noqa: F401,F403 4 | from .bbox_heads import * # noqa: F401,F403 5 | from .detectors import * # noqa: F401,F403 6 | from .registry import BACKBONES, NECKS, HEADS, DETECTORS 7 | from .builder import (build_backbone, build_neck, 8 | build_head, build_detector) 9 | 10 | __all__ = [ 11 | 'BACKBONES', 'NECKS', 'HEADS', 'DETECTORS', 12 | 'build_backbone', 'build_neck', 'build_head', 13 | 'build_detector' 14 | ] 15 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .rpn_head import RPNHead 3 | 4 | __all__ = ['AnchorHead', 'RPNHead'] 5 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import ResNet 2 | from .resnext import ResNeXt 3 | from .ssd_vgg import SSDVGG 4 | 5 | __all__ = ['ResNet', 'ResNeXt', 'SSDVGG'] 6 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | 4 | __all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead'] 5 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | import libs.mmcv as mmcv 2 | from torch import nn 3 | 4 | from .registry import BACKBONES, NECKS, HEADS, DETECTORS 5 | 6 | 7 | def _build_module(cfg, registry, default_args): 8 | assert isinstance(cfg, dict) and 'type' in cfg 9 | assert isinstance(default_args, dict) or default_args is None 10 | args = cfg.copy() 11 | obj_type = args.pop('type') 12 | if mmcv.is_str(obj_type): 13 | if obj_type not in registry.module_dict: 14 | raise KeyError('{} is not in the {} registry'.format( 15 | obj_type, registry.name)) 16 | obj_type = registry.module_dict[obj_type] 17 | elif not isinstance(obj_type, type): 18 | raise TypeError('type must be a str or valid type, but got {}'.format( 19 | type(obj_type))) 20 | if default_args is not None: 21 | for name, value in default_args.items(): 22 | args.setdefault(name, value) 23 | return obj_type(**args) 24 | 25 | 26 | def build(cfg, registry, default_args=None): 27 | if isinstance(cfg, list): 28 | modules = [_build_module(cfg_, registry, default_args) for cfg_ in cfg] 29 | return nn.Sequential(*modules) 30 | else: 31 | return _build_module(cfg, registry, default_args) 32 | 33 | 34 | def build_backbone(cfg): 35 | return build(cfg, BACKBONES) 36 | 37 | 38 | def build_neck(cfg): 39 | return build(cfg, NECKS) 40 | 41 | def build_head(cfg): 42 | return build(cfg, HEADS) 43 | 44 | 45 | def build_detector(cfg, train_cfg=None, test_cfg=None): 46 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 47 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseDetector 2 | from .rpn import RPN 3 | 4 | __all__ = [ 5 | 'BaseDetector', 'RPN' 6 | ] 7 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | 3 | __all__ = ['FPN'] 4 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Registry(object): 5 | 6 | def __init__(self, name): 7 | self._name = name 8 | self._module_dict = dict() 9 | 10 | @property 11 | def name(self): 12 | return self._name 13 | 14 | @property 15 | def module_dict(self): 16 | return self._module_dict 17 | 18 | def _register_module(self, module_class): 19 | """Register a module. 20 | 21 | Args: 22 | module (:obj:`nn.Module`): Module to be registered. 23 | """ 24 | if not issubclass(module_class, nn.Module): 25 | raise TypeError( 26 | 'module must be a child of nn.Module, but got {}'.format( 27 | type(module_class))) 28 | module_name = module_class.__name__ 29 | if module_name in self._module_dict: 30 | raise KeyError('{} is already registered in {}'.format( 31 | module_name, self.name)) 32 | self._module_dict[module_name] = module_class 33 | 34 | def register_module(self, cls): 35 | self._register_module(cls) 36 | return cls 37 | 38 | 39 | BACKBONES = Registry('backbone') 40 | NECKS = Registry('neck') 41 | HEADS = Registry('head') 42 | DETECTORS = Registry('detector') 43 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule 2 | from .norm import build_norm_layer 3 | from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init) 4 | 5 | __all__ = [ 6 | 'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init', 7 | 'uniform_init', 'kaiming_init' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms import nms, soft_nms 2 | 3 | __all__ = [ 4 | 'nms', 'soft_nms' 5 | ] 6 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/nms/.gitignore: -------------------------------------------------------------------------------- 1 | *.cpp 2 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/nms/compile.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.system('rm -f *.so') 4 | os.system('python setup.py build_ext --inplace') 5 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id, size_t base); 3 | size_t nms_Malloc(); 4 | -------------------------------------------------------------------------------- /SiamDW_D/libs/FPNlib/mmdet/ops/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Faster R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | assert sizeof(int) == sizeof(np.int32_t) 12 | 13 | cdef extern from "gpu_nms.hpp": 14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int, size_t) nogil 15 | size_t nms_Malloc() nogil 16 | 17 | memory_pool = {} 18 | 19 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 20 | np.int32_t device_id=0): 21 | cdef int boxes_num = dets.shape[0] 22 | cdef int boxes_dim = 5 23 | cdef int num_out 24 | cdef size_t base 25 | cdef np.ndarray[np.int32_t, ndim=1] \ 26 | keep = np.zeros(boxes_num, dtype=np.int32) 27 | cdef np.ndarray[np.float32_t, ndim=1] \ 28 | scores = dets[:, 4] 29 | cdef np.ndarray[np.int_t, ndim=1] \ 30 | order = scores.argsort()[::-1] 31 | cdef np.ndarray[np.float32_t, ndim=2] \ 32 | sorted_dets = dets[order, :5] 33 | cdef float cthresh = thresh 34 | if device_id not in memory_pool: 35 | with nogil: 36 | base = nms_Malloc() 37 | memory_pool[device_id] = base 38 | # print "malloc", base 39 | base = memory_pool[device_id] 40 | with nogil: 41 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, cthresh, device_id, base) 42 | keep = keep[:num_out] 43 | return list(order[keep]) 44 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jiayuan Mao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/libs/PreciseRoIPooling/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/_assets/prroi_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/libs/PreciseRoIPooling/_assets/prroi_visualization.png -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/libs/PreciseRoIPooling/pytorch/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | /_prroi_pooling 3 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : __init__.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | from .prroi_pool import * 13 | 14 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/build.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : build.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import os 13 | import torch 14 | 15 | from torch.utils.ffi import create_extension 16 | 17 | headers = [] 18 | sources = [] 19 | defines = [] 20 | extra_objects = [] 21 | with_cuda = False 22 | 23 | if torch.cuda.is_available(): 24 | with_cuda = True 25 | 26 | headers+= ['src/prroi_pooling_gpu.h'] 27 | sources += ['src/prroi_pooling_gpu.c'] 28 | defines += [('WITH_CUDA', None)] 29 | 30 | this_file = os.path.dirname(os.path.realpath(__file__)) 31 | extra_objects_cuda = ['src/prroi_pooling_gpu_impl.cu.o'] 32 | extra_objects_cuda = [os.path.join(this_file, fname) for fname in extra_objects_cuda] 33 | extra_objects.extend(extra_objects_cuda) 34 | else: 35 | # TODO(Jiayuan Mao @ 07/13): remove this restriction after we support the cpu implementation. 36 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') 37 | 38 | ffi = create_extension( 39 | '_prroi_pooling', 40 | headers=headers, 41 | sources=sources, 42 | define_macros=defines, 43 | relative_to=__file__, 44 | with_cuda=with_cuda, 45 | extra_objects=extra_objects 46 | ) 47 | 48 | if __name__ == '__main__': 49 | ffi.build() 50 | 51 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : prroi_pool.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import torch.nn as nn 13 | 14 | from .functional import prroi_pool2d 15 | 16 | __all__ = ['PrRoIPool2D'] 17 | 18 | 19 | class PrRoIPool2D(nn.Module): 20 | def __init__(self, pooled_height, pooled_width, spatial_scale): 21 | super().__init__() 22 | 23 | self.pooled_height = int(pooled_height) 24 | self.pooled_width = int(pooled_width) 25 | self.spatial_scale = float(spatial_scale) 26 | 27 | def forward(self, features, rois): 28 | return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) 29 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * File : prroi_pooling_gpu.h 3 | * Author : Jiayuan Mao, Tete Xiao 4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 5 | * Date : 07/13/2018 6 | * 7 | * Distributed under terms of the MIT license. 8 | * Copyright (c) 2017 Megvii Technology Limited. 9 | */ 10 | 11 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); 12 | 13 | int prroi_pooling_backward_cuda( 14 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 15 | int pooled_height, int pooled_width, float spatial_scale 16 | ); 17 | 18 | int prroi_pooling_coor_backward_cuda( 19 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 20 | int pooled_height, int pooled_width, float spatial_scal 21 | ); 22 | 23 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/prroi_pool/travis.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | # File : travis.sh 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # 6 | # Distributed under terms of the MIT license. 7 | # Copyright (c) 2017 Megvii Technology Limited. 8 | 9 | cd src 10 | echo "Working directory: " `pwd` 11 | echo "Compiling prroi_pooling kernels by nvcc..." 12 | nvcc -c -o prroi_pooling_gpu_impl.cu.o prroi_pooling_gpu_impl.cu -x cu -Xcompiler -fPIC -arch=sm_35 13 | 14 | cd ../ 15 | echo "Working directory: " `pwd` 16 | echo "Building python libraries..." 17 | python3 build.py 18 | 19 | -------------------------------------------------------------------------------- /SiamDW_D/libs/PreciseRoIPooling/pytorch/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/libs/PreciseRoIPooling/pytorch/tests/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/libs/RPN/models.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from libs.RPN.resnet_atrous import resnet50 4 | from libs.RPN.neck import AdjustAllLayer 5 | from libs.RPN.head import MultiRPN 6 | 7 | class ModelBuilder(nn.Module): 8 | def __init__(self): 9 | super(ModelBuilder, self).__init__() 10 | 11 | # build backbone 12 | self.backbone = resnet50(used_layers=[2, 3, 4]) 13 | 14 | # build neck 15 | # self.neck = AdjustAllLayer([512, 1024, 2048], [128, 256, 512]) 16 | self.neck = AdjustAllLayer([512, 1024, 2048], [256, 256, 256]) 17 | 18 | # channels = [128, 256, 512] 19 | channels = [256, 256, 256] 20 | 21 | self.rpn_head = MultiRPN(5, channels, True) 22 | 23 | def template(self, z): 24 | zf = self.backbone(z) 25 | zf = self.neck(zf) 26 | self.zf = zf 27 | 28 | def track(self, x): 29 | xf = self.backbone(x) 30 | xf = self.neck(xf) 31 | cls, loc = self.rpn_head(self.zf, xf) 32 | return cls, loc 33 | -------------------------------------------------------------------------------- /SiamDW_D/libs/RPN/neck.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | class AdjustLayer(nn.Module): 4 | def __init__(self, inplane, outplane): 5 | super(AdjustLayer, self).__init__() 6 | self.downsample = nn.Sequential( 7 | nn.Conv2d(inplane, outplane, kernel_size=1, bias=False), 8 | nn.BatchNorm2d(outplane), 9 | ) 10 | 11 | def forward(self, x): 12 | x = self.downsample(x) 13 | if x.size(3) < 20: 14 | l = 4 15 | r = l + 7 16 | x = x[:, :, l:r, l:r] 17 | return x 18 | 19 | class AdjustAllLayer(nn.Module): 20 | def __init__(self, in_channels, out_channels): 21 | super(AdjustAllLayer, self).__init__() 22 | self.num = len(out_channels) 23 | if self.num == 1: 24 | self.downsample = AdjustLayer(in_channels[0], out_channels[0]) 25 | else: 26 | for i in range(self.num): 27 | self.add_module('downsample'+str(i+2), 28 | AdjustLayer(in_channels[i], out_channels[i])) 29 | 30 | def forward(self, features): 31 | if self.num == 1: 32 | return self.downsample(features) 33 | else: 34 | out = [] 35 | for i in range(self.num): 36 | adj_layer = getattr(self, 'downsample'+str(i+2)) 37 | out.append(adj_layer(features[i])) 38 | return out -------------------------------------------------------------------------------- /SiamDW_D/libs/RPN/xcorr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | def xcorr_slow(x, kernel): 5 | """for loop to calculate cross correlation, slow version 6 | """ 7 | batch = x.size()[0] 8 | out = [] 9 | for i in range(batch): 10 | px = x[i] 11 | pk = kernel[i] 12 | px = px.view(1, px.size()[0], px.size()[1], px.size()[2]) 13 | pk = pk.view(-1, px.size()[1], pk.size()[1], pk.size()[2]) 14 | po = F.conv2d(px, pk) 15 | out.append(po) 16 | out = torch.cat(out, 0) 17 | return out 18 | 19 | def xcorr_fast(x, kernel): 20 | """group conv2d to calculate cross correlation, fast version 21 | """ 22 | batch = kernel.size()[0] 23 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 24 | px = x.view(1, -1, x.size()[2], x.size()[3]) 25 | po = F.conv2d(px, pk, groups=batch) 26 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 27 | return po 28 | 29 | def xcorr_depthwise(x, kernel): 30 | """depthwise cross correlation 31 | """ 32 | batch = kernel.size(0) 33 | channel = kernel.size(1) 34 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 35 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 36 | out = F.conv2d(x, kernel, groups=batch*channel) 37 | out = out.view(batch, channel, out.size(2), out.size(3)) 38 | return out -------------------------------------------------------------------------------- /SiamDW_D/libs/core/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .basetracker import BaseTracker -------------------------------------------------------------------------------- /SiamDW_D/libs/core/far_fusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .far_fusion import FAR_FUSION 2 | 3 | def get_tracker_class(): 4 | return FAR_FUSION 5 | 6 | 7 | 8 | # 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/core/resnext_far/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnext_far import ResnextFar 2 | 3 | def get_tracker_class(): 4 | return ResnextFar 5 | 6 | -------------------------------------------------------------------------------- /SiamDW_D/libs/core/senet_far/__init__.py: -------------------------------------------------------------------------------- 1 | from .senet_far import SenetFar 2 | 3 | def get_tracker_class(): 4 | return SenetFar 5 | 6 | -------------------------------------------------------------------------------- /SiamDW_D/libs/core/tracker_matlab.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | 4 | class Tracker: 5 | 6 | def __init__(self, name: str, parameter_name: str): 7 | self.name = name 8 | self.parameter_name = parameter_name 9 | 10 | tracker_module = importlib.import_module('libs.core.{}'.format(self.name)) 11 | 12 | self.parameters = self.get_parameters() 13 | self.tracker_class = tracker_module.get_tracker_class() 14 | 15 | def get_parameters(self): 16 | """Get parameters.""" 17 | 18 | param_module = importlib.import_module('test.settings.{}.{}'.format(self.name, self.parameter_name)) 19 | params = param_module.parameters() 20 | 21 | return params 22 | 23 | 24 | -------------------------------------------------------------------------------- /SiamDW_D/libs/features/color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from libs.features.featurebase import FeatureBase 4 | 5 | 6 | class RGB(FeatureBase): 7 | """RGB feature normalized to [-0.5, 0.5].""" 8 | def dim(self): 9 | return 3 10 | 11 | def stride(self): 12 | return self.pool_stride 13 | 14 | def extract(self, im: torch.Tensor): 15 | return im/255 - 0.5 16 | 17 | 18 | class Grayscale(FeatureBase): 19 | """Grayscale feature normalized to [-0.5, 0.5].""" 20 | def dim(self): 21 | return 1 22 | 23 | def stride(self): 24 | return self.pool_stride 25 | 26 | def extract(self, im: torch.Tensor): 27 | return torch.mean(im/255 - 0.5, 1, keepdim=True) 28 | -------------------------------------------------------------------------------- /SiamDW_D/libs/features/util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from libs.features.featurebase import FeatureBase 4 | 5 | 6 | class Concatenate(FeatureBase): 7 | """A feature that concatenates other features. 8 | args: 9 | features: List of features to concatenate. 10 | """ 11 | def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): 12 | super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) 13 | self.features = features 14 | 15 | self.input_stride = self.features[0].stride() 16 | 17 | for feat in self.features: 18 | if self.input_stride != feat.stride(): 19 | raise ValueError('Strides for the features must be the same for a bultiresolution feature.') 20 | 21 | def dim(self): 22 | return sum([f.dim() for f in self.features]) 23 | 24 | def stride(self): 25 | return self.pool_stride * self.input_stride 26 | 27 | def extract(self, im: torch.Tensor): 28 | return torch.cat([f.get_feature(im) for f in self.features], 1) 29 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .arraymisc import * 3 | from .utils import * 4 | from .fileio import * 5 | from .opencv_info import * 6 | from .image import * 7 | from .video import * 8 | from .visualization import * 9 | from .version import __version__ 10 | # The following modules are not imported to this level, so mmcv may be used 11 | # without PyTorch. 12 | # - runner 13 | # - parallel 14 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/arraymisc/__init__.py: -------------------------------------------------------------------------------- 1 | from .quantization import quantize, dequantize 2 | 3 | __all__ = ['quantize', 'dequantize'] 4 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/cnn/__init__.py: -------------------------------------------------------------------------------- 1 | from .alexnet import AlexNet 2 | from .vgg import VGG, make_vgg_layer 3 | from .resnet import ResNet, make_res_layer 4 | from .weight_init import (constant_init, xavier_init, normal_init, 5 | uniform_init, kaiming_init, caffe2_xavier_init) 6 | 7 | __all__ = [ 8 | 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', 9 | 'constant_init', 'xavier_init', 'normal_init', 'uniform_init', 10 | 'kaiming_init', 'caffe2_xavier_init' 11 | ] 12 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import load, dump, register_handler 2 | from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler 3 | from .parse import list_from_file, dict_from_file 4 | 5 | __all__ = [ 6 | 'load', 'dump', 'register_handler', 'BaseFileHandler', 'JsonHandler', 7 | 'PickleHandler', 'YamlHandler', 'list_from_file', 'dict_from_file' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseFileHandler 2 | from .json_handler import JsonHandler 3 | from .pickle_handler import PickleHandler 4 | from .yaml_handler import YamlHandler 5 | 6 | __all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] 7 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/handlers/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseFileHandler(object): 5 | 6 | __metaclass__ = ABCMeta # python 2 compatibility 7 | 8 | @abstractmethod 9 | def load_from_fileobj(self, file, **kwargs): 10 | pass 11 | 12 | @abstractmethod 13 | def dump_to_fileobj(self, obj, file, **kwargs): 14 | pass 15 | 16 | @abstractmethod 17 | def dump_to_str(self, obj, **kwargs): 18 | pass 19 | 20 | def load_from_path(self, filepath, mode='r', **kwargs): 21 | with open(filepath, mode) as f: 22 | return self.load_from_fileobj(f, **kwargs) 23 | 24 | def dump_to_path(self, obj, filepath, mode='w', **kwargs): 25 | with open(filepath, mode) as f: 26 | self.dump_to_fileobj(obj, f, **kwargs) 27 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/handlers/json_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from .base import BaseFileHandler 4 | 5 | 6 | class JsonHandler(BaseFileHandler): 7 | 8 | def load_from_fileobj(self, file): 9 | return json.load(file) 10 | 11 | def dump_to_fileobj(self, obj, file, **kwargs): 12 | json.dump(obj, file, **kwargs) 13 | 14 | def dump_to_str(self, obj, **kwargs): 15 | return json.dumps(obj, **kwargs) 16 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/handlers/pickle_handler.py: -------------------------------------------------------------------------------- 1 | from six.moves import cPickle as pickle 2 | 3 | from .base import BaseFileHandler 4 | 5 | 6 | class PickleHandler(BaseFileHandler): 7 | 8 | def load_from_fileobj(self, file, **kwargs): 9 | return pickle.load(file, **kwargs) 10 | 11 | def load_from_path(self, filepath, **kwargs): 12 | return super(PickleHandler, self).load_from_path( 13 | filepath, mode='rb', **kwargs) 14 | 15 | def dump_to_str(self, obj, **kwargs): 16 | kwargs.setdefault('protocol', 2) 17 | return pickle.dumps(obj, **kwargs) 18 | 19 | def dump_to_fileobj(self, obj, file, **kwargs): 20 | kwargs.setdefault('protocol', 2) 21 | pickle.dump(obj, file, **kwargs) 22 | 23 | def dump_to_path(self, obj, filepath, **kwargs): 24 | super(PickleHandler, self).dump_to_path( 25 | obj, filepath, mode='wb', **kwargs) 26 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/fileio/handlers/yaml_handler.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | try: 3 | from yaml import CLoader as Loader, CDumper as Dumper 4 | except ImportError: 5 | from yaml import Loader, Dumper 6 | 7 | from .base import BaseFileHandler 8 | 9 | 10 | class YamlHandler(BaseFileHandler): 11 | 12 | def load_from_fileobj(self, file, **kwargs): 13 | kwargs.setdefault('Loader', Loader) 14 | return yaml.load(file, **kwargs) 15 | 16 | def dump_to_fileobj(self, obj, file, **kwargs): 17 | kwargs.setdefault('Dumper', Dumper) 18 | yaml.dump(obj, file, **kwargs) 19 | 20 | def dump_to_str(self, obj, **kwargs): 21 | kwargs.setdefault('Dumper', Dumper) 22 | return yaml.dump(obj, **kwargs) 23 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/image/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import imread, imwrite, imfrombytes 2 | from .transforms import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, 3 | hsv2bgr, bgr2hls, hls2bgr, iminvert, imflip, imrotate, 4 | imcrop, impad, impad_to_multiple, imnormalize, 5 | imdenormalize, imresize, imresize_like, imrescale) 6 | 7 | __all__ = [ 8 | 'imread', 'imwrite', 'imfrombytes', 'bgr2gray', 'gray2bgr', 'bgr2rgb', 9 | 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 'bgr2hls', 'hls2bgr', 'iminvert', 10 | 'imflip', 'imrotate', 'imcrop', 'impad', 'impad_to_multiple', 11 | 'imnormalize', 'imdenormalize', 'imresize', 'imresize_like', 'imrescale' 12 | ] 13 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/image/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | from .colorspace import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, 2 | hsv2bgr, bgr2hls, hls2bgr, iminvert) 3 | from .geometry import imflip, imrotate, imcrop, impad, impad_to_multiple 4 | from .normalize import imnormalize, imdenormalize 5 | from .resize import imresize, imresize_like, imrescale 6 | 7 | __all__ = [ 8 | 'bgr2gray', 'gray2bgr', 'bgr2rgb', 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 9 | 'bgr2hls', 'hls2bgr', 'iminvert', 'imflip', 'imrotate', 'imcrop', 'impad', 10 | 'impad_to_multiple', 'imnormalize', 'imdenormalize', 'imresize', 11 | 'imresize_like', 'imrescale' 12 | ] 13 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/image/transforms/normalize.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .colorspace import bgr2rgb, rgb2bgr 4 | 5 | 6 | def imnormalize(img, mean, std, to_rgb=True): 7 | img = img.astype(np.float32) 8 | if to_rgb: 9 | img = bgr2rgb(img) 10 | return (img - mean) / std 11 | 12 | 13 | def imdenormalize(img, mean, std, to_bgr=True): 14 | img = (img * std) + mean 15 | if to_bgr: 16 | img = rgb2bgr(img) 17 | return img 18 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/opencv_info.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | 4 | def use_opencv2(): 5 | return cv2.__version__.split('.')[0] == '2' 6 | 7 | 8 | USE_OPENCV2 = use_opencv2() 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | from .collate import collate 2 | from .data_container import DataContainer 3 | from .data_parallel import MMDataParallel 4 | from .distributed import MMDistributedDataParallel 5 | from .scatter_gather import scatter, scatter_kwargs 6 | 7 | __all__ = [ 8 | 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', 9 | 'scatter', 'scatter_kwargs' 10 | ] 11 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/parallel/data_parallel.py: -------------------------------------------------------------------------------- 1 | from torch.nn.parallel import DataParallel 2 | 3 | from .scatter_gather import scatter_kwargs 4 | 5 | 6 | class MMDataParallel(DataParallel): 7 | 8 | def scatter(self, inputs, kwargs, device_ids): 9 | return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) 10 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/__init__.py: -------------------------------------------------------------------------------- 1 | from .runner import Runner 2 | from .log_buffer import LogBuffer 3 | from .hooks import (Hook, CheckpointHook, ClosureHook, LrUpdaterHook, 4 | OptimizerHook, IterTimerHook, DistSamplerSeedHook, 5 | LoggerHook, TextLoggerHook, PaviLoggerHook, 6 | TensorboardLoggerHook) 7 | from .checkpoint import (load_state_dict, load_checkpoint, weights_to_cpu, 8 | save_checkpoint) 9 | from .parallel_test import parallel_test 10 | from .priority import Priority, get_priority 11 | from .utils import (get_host_info, get_dist_info, master_only, get_time_str, 12 | obj_from_dict) 13 | 14 | __all__ = [ 15 | 'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook', 16 | 'LrUpdaterHook', 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 17 | 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook', 18 | 'load_state_dict', 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 19 | 'parallel_test', 'Priority', 'get_priority', 'get_host_info', 20 | 'get_dist_info', 'master_only', 'get_time_str', 'obj_from_dict' 21 | ] 22 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | from .checkpoint import CheckpointHook 3 | from .closure import ClosureHook 4 | from .lr_updater import LrUpdaterHook 5 | from .optimizer import OptimizerHook 6 | from .iter_timer import IterTimerHook 7 | from .sampler_seed import DistSamplerSeedHook 8 | from .memory import EmptyCacheHook 9 | from .logger import (LoggerHook, TextLoggerHook, PaviLoggerHook, 10 | TensorboardLoggerHook) 11 | 12 | __all__ = [ 13 | 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 14 | 'IterTimerHook', 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 15 | 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' 16 | ] 17 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/checkpoint.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | from ..utils import master_only 3 | 4 | 5 | class CheckpointHook(Hook): 6 | 7 | def __init__(self, 8 | interval=-1, 9 | save_optimizer=True, 10 | out_dir=None, 11 | **kwargs): 12 | self.interval = interval 13 | self.save_optimizer = save_optimizer 14 | self.out_dir = out_dir 15 | self.args = kwargs 16 | 17 | @master_only 18 | def after_train_epoch(self, runner): 19 | if not self.every_n_epochs(runner, self.interval): 20 | return 21 | 22 | if not self.out_dir: 23 | self.out_dir = runner.work_dir 24 | runner.save_checkpoint( 25 | self.out_dir, save_optimizer=self.save_optimizer, **self.args) 26 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/closure.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | 3 | 4 | class ClosureHook(Hook): 5 | 6 | def __init__(self, fn_name, fn): 7 | assert hasattr(self, fn_name) 8 | assert callable(fn) 9 | setattr(self, fn_name, fn) 10 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/hook.py: -------------------------------------------------------------------------------- 1 | class Hook(object): 2 | 3 | def before_run(self, runner): 4 | pass 5 | 6 | def after_run(self, runner): 7 | pass 8 | 9 | def before_epoch(self, runner): 10 | pass 11 | 12 | def after_epoch(self, runner): 13 | pass 14 | 15 | def before_iter(self, runner): 16 | pass 17 | 18 | def after_iter(self, runner): 19 | pass 20 | 21 | def before_train_epoch(self, runner): 22 | self.before_epoch(runner) 23 | 24 | def before_val_epoch(self, runner): 25 | self.before_epoch(runner) 26 | 27 | def after_train_epoch(self, runner): 28 | self.after_epoch(runner) 29 | 30 | def after_val_epoch(self, runner): 31 | self.after_epoch(runner) 32 | 33 | def before_train_iter(self, runner): 34 | self.before_iter(runner) 35 | 36 | def before_val_iter(self, runner): 37 | self.before_iter(runner) 38 | 39 | def after_train_iter(self, runner): 40 | self.after_iter(runner) 41 | 42 | def after_val_iter(self, runner): 43 | self.after_iter(runner) 44 | 45 | def every_n_epochs(self, runner, n): 46 | return (runner.epoch + 1) % n == 0 if n > 0 else False 47 | 48 | def every_n_inner_iters(self, runner, n): 49 | return (runner.inner_iter + 1) % n == 0 if n > 0 else False 50 | 51 | def every_n_iters(self, runner, n): 52 | return (runner.iter + 1) % n == 0 if n > 0 else False 53 | 54 | def end_of_epoch(self, runner): 55 | return runner.inner_iter + 1 == len(runner.data_loader) 56 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/iter_timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from .hook import Hook 4 | 5 | 6 | class IterTimerHook(Hook): 7 | 8 | def before_epoch(self, runner): 9 | self.t = time.time() 10 | 11 | def before_iter(self, runner): 12 | runner.log_buffer.update({'data_time': time.time() - self.t}) 13 | 14 | def after_iter(self, runner): 15 | runner.log_buffer.update({'time': time.time() - self.t}) 16 | self.t = time.time() 17 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/logger/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import LoggerHook 2 | from .pavi import PaviLoggerHook 3 | from .tensorboard import TensorboardLoggerHook 4 | from .text import TextLoggerHook 5 | 6 | __all__ = [ 7 | 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/logger/tensorboard.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .base import LoggerHook 4 | from ...utils import master_only 5 | 6 | 7 | class TensorboardLoggerHook(LoggerHook): 8 | 9 | def __init__(self, 10 | log_dir=None, 11 | interval=10, 12 | ignore_last=True, 13 | reset_flag=True): 14 | super(TensorboardLoggerHook, self).__init__(interval, ignore_last, 15 | reset_flag) 16 | self.log_dir = log_dir 17 | 18 | @master_only 19 | def before_run(self, runner): 20 | try: 21 | from tensorboardX import SummaryWriter 22 | except ImportError: 23 | raise ImportError('Please install tensorflow and tensorboardX ' 24 | 'to use TensorboardLoggerHook.') 25 | else: 26 | if self.log_dir is None: 27 | self.log_dir = osp.join(runner.work_dir, 'tf_logs') 28 | self.writer = SummaryWriter(self.log_dir) 29 | 30 | @master_only 31 | def log(self, runner): 32 | for var in runner.log_buffer.output: 33 | if var in ['time', 'data_time']: 34 | continue 35 | tag = '{}/{}'.format(var, runner.mode) 36 | record = runner.log_buffer.output[var] 37 | if isinstance(record, str): 38 | self.writer.add_text(tag, record, runner.iter) 39 | else: 40 | self.writer.add_scalar(tag, runner.log_buffer.output[var], 41 | runner.iter) 42 | 43 | @master_only 44 | def after_run(self, runner): 45 | self.writer.close() 46 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/memory.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .hook import Hook 4 | 5 | 6 | class EmptyCacheHook(Hook): 7 | 8 | def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): 9 | self._before_epoch = before_epoch 10 | self._after_epoch = after_epoch 11 | self._after_iter = after_iter 12 | 13 | def after_iter(self, runner): 14 | if self._after_iter: 15 | torch.cuda.empty_cache() 16 | 17 | def before_epoch(self, runner): 18 | if self._before_epoch: 19 | torch.cuda.empty_cache() 20 | 21 | def after_epoch(self, runner): 22 | if self._after_epoch: 23 | torch.cuda.empty_cache() 24 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/optimizer.py: -------------------------------------------------------------------------------- 1 | from torch.nn.utils import clip_grad 2 | 3 | from .hook import Hook 4 | 5 | 6 | class OptimizerHook(Hook): 7 | 8 | def __init__(self, grad_clip=None): 9 | self.grad_clip = grad_clip 10 | 11 | def clip_grads(self, params): 12 | clip_grad.clip_grad_norm_( 13 | filter(lambda p: p.requires_grad, params), **self.grad_clip) 14 | 15 | def after_train_iter(self, runner): 16 | runner.optimizer.zero_grad() 17 | runner.outputs['loss'].backward() 18 | if self.grad_clip is not None: 19 | self.clip_grads(runner.model.parameters()) 20 | runner.optimizer.step() 21 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/hooks/sampler_seed.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | 3 | 4 | class DistSamplerSeedHook(Hook): 5 | 6 | def before_epoch(self, runner): 7 | runner.data_loader.sampler.set_epoch(runner.epoch) 8 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/log_buffer.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import numpy as np 4 | 5 | 6 | class LogBuffer(object): 7 | 8 | def __init__(self): 9 | self.val_history = OrderedDict() 10 | self.n_history = OrderedDict() 11 | self.output = OrderedDict() 12 | self.ready = False 13 | 14 | def clear(self): 15 | self.val_history.clear() 16 | self.n_history.clear() 17 | self.clear_output() 18 | 19 | def clear_output(self): 20 | self.output.clear() 21 | self.ready = False 22 | 23 | def update(self, vars, count=1): 24 | assert isinstance(vars, dict) 25 | for key, var in vars.items(): 26 | if key not in self.val_history: 27 | self.val_history[key] = [] 28 | self.n_history[key] = [] 29 | self.val_history[key].append(var) 30 | self.n_history[key].append(count) 31 | 32 | def average(self, n=0): 33 | """Average latest n values or all values""" 34 | assert n >= 0 35 | for key in self.val_history: 36 | values = np.array(self.val_history[key][-n:]) 37 | nums = np.array(self.n_history[key][-n:]) 38 | avg = np.sum(values * nums) / np.sum(nums) 39 | self.output[key] = avg 40 | self.ready = True 41 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/runner/priority.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Priority(Enum): 5 | """Hook priority levels. 6 | 7 | +------------+------------+ 8 | | Level | Value | 9 | +============+============+ 10 | | HIGHEST | 0 | 11 | +------------+------------+ 12 | | VERY_HIGH | 10 | 13 | +------------+------------+ 14 | | HIGH | 30 | 15 | +------------+------------+ 16 | | NORMAL | 50 | 17 | +------------+------------+ 18 | | LOW | 70 | 19 | +------------+------------+ 20 | | VERY_LOW | 90 | 21 | +------------+------------+ 22 | | LOWEST | 100 | 23 | +------------+------------+ 24 | """ 25 | 26 | HIGHEST = 0 27 | VERY_HIGH = 10 28 | HIGH = 30 29 | NORMAL = 50 30 | LOW = 70 31 | VERY_LOW = 90 32 | LOWEST = 100 33 | 34 | 35 | def get_priority(priority): 36 | """Get priority value. 37 | 38 | Args: 39 | priority (int or str or :obj:`Priority`): Priority. 40 | 41 | Returns: 42 | int: The priority value. 43 | """ 44 | if isinstance(priority, int): 45 | if priority < 0 or priority > 100: 46 | raise ValueError('priority must be between 0 and 100') 47 | return priority 48 | elif isinstance(priority, Priority): 49 | return priority.value 50 | elif isinstance(priority, str): 51 | return Priority[priority.upper()].value 52 | else: 53 | raise TypeError('priority must be an integer or Priority enum value') 54 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import ConfigDict, Config 2 | from .misc import (is_str, iter_cast, list_cast, tuple_cast, is_seq_of, 3 | is_list_of, is_tuple_of, slice_list, concat_list, 4 | check_prerequisites, requires_package, requires_executable) 5 | from .path import (is_filepath, fopen, check_file_exist, mkdir_or_exist, 6 | symlink, scandir, FileNotFoundError) 7 | from .progressbar import ProgressBar, track_progress, track_parallel_progress 8 | from .timer import Timer, TimerError, check_time 9 | 10 | __all__ = [ 11 | 'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 12 | 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', 13 | 'check_prerequisites', 'requires_package', 'requires_executable', 14 | 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink', 15 | 'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress', 16 | 'track_parallel_progress', 'Timer', 'TimerError', 'check_time' 17 | ] 18 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.2.8' 2 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/video/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import Cache, VideoReader, frames2video 2 | from .processing import convert_video, resize_video, cut_video, concat_video 3 | from .optflow import flowread, flowwrite, quantize_flow, dequantize_flow 4 | 5 | __all__ = [ 6 | 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', 7 | 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', 8 | 'dequantize_flow' 9 | ] 10 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .color import Color, color_val 2 | from .image import imshow, imshow_bboxes, imshow_det_bboxes 3 | from .optflow import flowshow, flow2rgb, make_color_wheel 4 | 5 | __all__ = [ 6 | 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', 7 | 'flowshow', 'flow2rgb', 'make_color_wheel' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_D/libs/mmcv/visualization/color.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | import numpy as np 4 | 5 | from libs.mmcv.utils import is_str 6 | 7 | 8 | class Color(Enum): 9 | """An enum that defines common colors. 10 | 11 | Contains red, green, blue, cyan, yellow, magenta, white and black. 12 | """ 13 | red = (0, 0, 255) 14 | green = (0, 255, 0) 15 | blue = (255, 0, 0) 16 | cyan = (255, 255, 0) 17 | yellow = (0, 255, 255) 18 | magenta = (255, 0, 255) 19 | white = (255, 255, 255) 20 | black = (0, 0, 0) 21 | 22 | 23 | def color_val(color): 24 | """Convert various input to color tuples. 25 | 26 | Args: 27 | color (:obj:`Color`/str/tuple/int/ndarray): Color inputs 28 | 29 | Returns: 30 | tuple[int]: A tuple of 3 integers indicating BGR channels. 31 | """ 32 | if is_str(color): 33 | return Color[color].value 34 | elif isinstance(color, Color): 35 | return color.value 36 | elif isinstance(color, tuple): 37 | assert len(color) == 3 38 | for channel in color: 39 | assert channel >= 0 and channel <= 255 40 | return color 41 | elif isinstance(color, int): 42 | assert color >= 0 and color <= 255 43 | return color, color, color 44 | elif isinstance(color, np.ndarray): 45 | assert color.ndim == 1 and color.size == 3 46 | assert np.all((color >= 0) & (color <= 255)) 47 | color = color.astype(np.uint8) 48 | return tuple(color) 49 | else: 50 | raise TypeError('Invalid type for color: {}'.format(type(color))) 51 | -------------------------------------------------------------------------------- /SiamDW_D/libs/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import * 2 | from .resnext import * 3 | from .senet import * 4 | -------------------------------------------------------------------------------- /SiamDW_D/libs/models/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .iounet import IoUNet 2 | from .iounet_dropout import IoUNet_dropout 3 | -------------------------------------------------------------------------------- /SiamDW_D/libs/models/modules/blocks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, 4 | batch_norm=True, relu=True): 5 | layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, 6 | padding=padding, dilation=dilation, bias=bias)] 7 | if batch_norm: 8 | layers.append(nn.BatchNorm2d(out_planes)) 9 | if relu: 10 | layers.append(nn.ReLU(inplace=True)) 11 | return nn.Sequential(*layers) 12 | 13 | 14 | class LinearBlock(nn.Module): 15 | def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): 16 | super().__init__() 17 | self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) 18 | self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None 19 | self.relu = nn.ReLU(inplace=True) if relu else None 20 | 21 | def forward(self, x): 22 | x = self.linear(x.view(x.shape[0], -1)) 23 | if self.bn is not None: 24 | x = self.bn(x.view(x.shape[0], x.shape[1], 1, 1)) 25 | if self.relu is not None: 26 | x = self.relu(x) 27 | return x.view(x.shape[0], -1) -------------------------------------------------------------------------------- /SiamDW_D/libs/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .params import * 2 | from .tensordict import * -------------------------------------------------------------------------------- /SiamDW_D/libs/utils/params.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from libs.utils.tensorlist import TensorList 4 | 5 | 6 | class TrackerParams: 7 | """Class for tracker parameters.""" 8 | def free_memory(self): 9 | for a in dir(self): 10 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 11 | getattr(self, a).free_memory() 12 | 13 | 14 | class FeatureParams: 15 | """Class for feature specific parameters""" 16 | def __init__(self, *args, **kwargs): 17 | if len(args) > 0: 18 | raise ValueError 19 | 20 | for name, val in kwargs.items(): 21 | if isinstance(val, list): 22 | setattr(self, name, TensorList(val)) 23 | else: 24 | setattr(self, name, val) 25 | 26 | 27 | def Choice(*args): 28 | """Can be used to sample random parameter values.""" 29 | return random.choice(args) 30 | -------------------------------------------------------------------------------- /SiamDW_D/libs/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('TkAgg') 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import torch 6 | 7 | 8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None): 9 | """Display a 2D tensor. 10 | args: 11 | fig_num: Figure number. 12 | title: Title of figure. 13 | """ 14 | a_np = a.squeeze().cpu().clone().detach().numpy() 15 | if a_np.ndim == 3: 16 | a_np = np.transpose(a_np, (1, 2, 0)) 17 | plt.figure(fig_num) 18 | plt.tight_layout() 19 | plt.cla() 20 | plt.imshow(a_np) 21 | plt.axis('off') 22 | plt.axis('equal') 23 | if title is not None: 24 | plt.title(title) 25 | plt.draw() 26 | plt.pause(0.001) 27 | 28 | 29 | def plot_graph(a: torch.Tensor, fig_num = None, title = None): 30 | """Plot graph. Data is a 1D tensor. 31 | args: 32 | fig_num: Figure number. 33 | title: Title of figure. 34 | """ 35 | a_np = a.squeeze().cpu().clone().detach().numpy() 36 | if a_np.ndim > 1: 37 | raise ValueError 38 | plt.figure(fig_num) 39 | # plt.tight_layout() 40 | plt.cla() 41 | plt.plot(a_np) 42 | if title is not None: 43 | plt.title(title) 44 | plt.draw() 45 | plt.pause(0.001) 46 | -------------------------------------------------------------------------------- /SiamDW_D/libs/utils/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | class EnvSettings: 5 | def __init__(self): 6 | main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 7 | 8 | self.network_path = '{}/../test/networks/'.format(main_path) 9 | 10 | 11 | def local_env_settings(): 12 | settings = EnvSettings() 13 | return settings 14 | -------------------------------------------------------------------------------- /SiamDW_D/libs/utils/tensordict.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from collections import OrderedDict 4 | 5 | class TensorDict(OrderedDict): 6 | """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" 7 | 8 | def concat(self, other): 9 | """Concatenates two dicts without copying internal data.""" 10 | return TensorDict(self, **other) 11 | 12 | def copy(self): 13 | return TensorDict(super(TensorDict, self).copy()) 14 | 15 | def __getattr__(self, name): 16 | if not hasattr(torch.Tensor, name): 17 | raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) 18 | 19 | def apply_attr(*args, **kwargs): 20 | return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) 21 | return apply_attr 22 | 23 | def attribute(self, attr: str, *args): 24 | return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) 25 | 26 | def apply(self, fn, *args, **kwargs): 27 | return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) 28 | 29 | @staticmethod 30 | def _iterable(a): 31 | return isinstance(a, (TensorDict, list)) 32 | 33 | -------------------------------------------------------------------------------- /SiamDW_D/test/data/README: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/test/data/README -------------------------------------------------------------------------------- /SiamDW_D/test/networks/README.md: -------------------------------------------------------------------------------- 1 | # Checkpoint Path 2 | 3 | -------------------------------------------------------------------------------- /SiamDW_D/test/settings/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from settings.datasets.votlt18dataset import VOTLT18Dataset 2 | from settings.datasets.votlt19dataset import VOTLT19Dataset 3 | #from settings.datasets.oxuvadataset import OXUVADataset 4 | from settings.datasets.lasotdataset import LaSOTDataset 5 | from settings.datasets.otbdataset import OTBDataset 6 | from settings.datasets.rgbd_ddataset import VOTRGBD19DDataset 7 | from settings.datasets.rgbd_rgbdataset import VOTRGBD19RGBDataset 8 | from settings.datasets.nfsdataset import NFSDataset 9 | from settings.datasets.uavdataset import UAVDataset 10 | from settings.datasets.tpldataset import TPLDataset 11 | from settings.datasets.vot18dataset import VOT18Dataset 12 | from settings.datasets.vot19dataset import VOT19Dataset 13 | from settings.datasets.got10kdataset import GOT10KDatasetTest, GOT10KDatasetVal, GOT10KDatasetLTRVal 14 | from settings.datasets.data import Sequence 15 | 16 | -------------------------------------------------------------------------------- /SiamDW_D/test/settings/envs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class EnvSettings: 4 | def __init__(self): 5 | main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 6 | self.results_path = '{}/tracking_results/'.format(main_path) 7 | self.otb_path = '{}/data/OTB2015'.format(main_path) 8 | self.nfs_path = '' 9 | self.uav_path = '' 10 | self.tpl_path = '' 11 | self.vot18_path = '' 12 | self.vot19_path = '' 13 | self.votlt18_path = '{}/data/VOT2018-LT'.format(main_path) 14 | self.votlt19_path = '{}/data/VOT2019-LT'.format(main_path) 15 | self.oxuva_path = '{}/data/long-term-tracking-benchmark/dataset/images'.format(main_path) 16 | self.oxuva_list = '{}/data/long-term-tracking-benchmark/dataset/tasks/test.csv'.format(main_path) 17 | self.rgbd_path = '{}/data/VOT2019-RGBD'.format(main_path) 18 | self.got10k_path = '' 19 | self.lasot_path = '{}/data/LaSOTBenchmark'.format(main_path) 20 | self.trackingnet_path = '' 21 | 22 | -------------------------------------------------------------------------------- /SiamDW_D/test/settings/far_fusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/test/settings/far_fusion/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/test/settings/resnext_far/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/test/settings/resnext_far/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/test/settings/senet_far/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_D/test/settings/senet_far/__init__.py -------------------------------------------------------------------------------- /SiamDW_D/test/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from .evaluation import * 2 | from .params import * -------------------------------------------------------------------------------- /SiamDW_D/test/utils/gdrive_download: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The script taken from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html 4 | 5 | url=$1 6 | filename=$2 7 | 8 | [ -z "$url" ] && echo A URL or ID is required first argument && exit 1 9 | 10 | fileid="" 11 | declare -a patterns=("s/.*\/file\/d\/\(.*\)\/.*/\1/p" "s/.*id\=\(.*\)/\1/p" "s/\(.*\)/\1/p") 12 | for i in "${patterns[@]}" 13 | do 14 | fileid=$(echo $url | sed -n $i) 15 | [ ! -z "$fileid" ] && break 16 | done 17 | 18 | [ -z "$fileid" ] && echo Could not find Google ID && exit 1 19 | 20 | echo File ID: $fileid 21 | 22 | tmp_file="$filename.$$.file" 23 | tmp_cookies="$filename.$$.cookies" 24 | tmp_headers="$filename.$$.headers" 25 | 26 | url='https://docs.google.com/uc?export=download&id='$fileid 27 | echo Downloading: "$url > $tmp_file" 28 | wget --save-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 29 | 30 | if [[ ! $(find "$tmp_file" -type f -size +10000c 2>/dev/null) ]]; then 31 | confirm=$(cat "$tmp_file" | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p') 32 | fi 33 | 34 | if [ ! -z "$confirm" ]; then 35 | url='https://docs.google.com/uc?export=download&id='$fileid'&confirm='$confirm 36 | echo Downloading: "$url > $tmp_file" 37 | wget --load-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 38 | fi 39 | 40 | [ -z "$filename" ] && filename=$(cat "$tmp_headers" | sed -rn 's/.*filename=\"(.*)\".*/\1/p') 41 | [ -z "$filename" ] && filename="google_drive.file" 42 | 43 | echo Moving: "$tmp_file > $filename" 44 | 45 | mv "$tmp_file" "$filename" 46 | 47 | rm -f "$tmp_cookies" "$tmp_headers" 48 | 49 | echo Saved: "$filename" 50 | echo DONE! 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /SiamDW_D/test/utils/params.py: -------------------------------------------------------------------------------- 1 | from pytracking import TensorList 2 | import random 3 | 4 | 5 | class TrackerParams: 6 | """Class for tracker parameters.""" 7 | def free_memory(self): 8 | for a in dir(self): 9 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 10 | getattr(self, a).free_memory() 11 | 12 | 13 | class FeatureParams: 14 | """Class for feature specific parameters""" 15 | def __init__(self, *args, **kwargs): 16 | if len(args) > 0: 17 | raise ValueError 18 | 19 | for name, val in kwargs.items(): 20 | if isinstance(val, list): 21 | setattr(self, name, TensorList(val)) 22 | else: 23 | setattr(self, name, val) 24 | 25 | 26 | def Choice(*args): 27 | """Can be used to sample random parameter values.""" 28 | return random.choice(args) 29 | -------------------------------------------------------------------------------- /SiamDW_D/test/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('TkAgg') 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import torch 6 | 7 | 8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None): 9 | """Display a 2D tensor. 10 | args: 11 | fig_num: Figure number. 12 | title: Title of figure. 13 | """ 14 | a_np = a.squeeze().cpu().clone().detach().numpy() 15 | if a_np.ndim == 3: 16 | a_np = np.transpose(a_np, (1, 2, 0)) 17 | plt.figure(fig_num) 18 | plt.tight_layout() 19 | plt.cla() 20 | plt.imshow(a_np) 21 | plt.axis('off') 22 | plt.axis('equal') 23 | if title is not None: 24 | plt.title(title) 25 | plt.draw() 26 | plt.pause(0.001) 27 | 28 | 29 | def plot_graph(a: torch.Tensor, fig_num = None, title = None): 30 | """Plot graph. Data is a 1D tensor. 31 | args: 32 | fig_num: Figure number. 33 | title: Title of figure. 34 | """ 35 | a_np = a.squeeze().cpu().clone().detach().numpy() 36 | if a_np.ndim > 1: 37 | raise ValueError 38 | plt.figure(fig_num) 39 | # plt.tight_layout() 40 | plt.cla() 41 | plt.plot(a_np) 42 | if title is not None: 43 | plt.title(title) 44 | plt.draw() 45 | plt.pause(0.001) 46 | -------------------------------------------------------------------------------- /SiamDW_LT/README.md: -------------------------------------------------------------------------------- 1 | # SiamDW_LT 2 | 3 | If you failed to install and run this tracker, please email me (lzuqer@gmail.com). 4 | 5 | ## Prerequisites 6 | ### Install python packages 7 | The python version should be 3.6.x, if not please run, 8 | ``` 9 | conda install -y python=3.6 10 | ``` 11 | 12 | and then run, 13 | ``` 14 | bash install.sh 15 | ``` 16 | 17 | ### Download pretrained model 18 | Download models [here](https://drive.google.com/open?id=1fJ_V5WCKROoBseLBQk3xALBqWaMb0kY8). 19 | Unzip networks.zip in `SiamDW_LT/test/` 20 | 21 | ### Set pretrained model path 22 | - row of 14th in `SiamDW_LT/test/settings/resnext_far/resnext.py`, 23 | please modify `main_path` to your code `SiamDW_LT/test` directory. eg. 24 | ``` 25 | main_path = '/home/v-had/VOT2019/SiamDW_LT/test/' 26 | ``` 27 | 28 | ### Prepare data 29 | You can creat a soft link in test dir. eg. 30 | ``` 31 | ln -s $Your-LongTerm-data-path data 32 | ``` 33 | or just move LongTerm data to `SiamDW_LT/test/data`. 34 | Then, modify `self.votlt19_path` in `SiamDW_LT/test/settings/envs.py` to your LongTerm data path. 35 | 36 | ### Prepare to run 37 | Define your experiments in `SiamDW_LT/test/settings/exp.py`. The default is LongTerm. 38 | 39 | ## Run tracker 40 | Set gpus id and processes in `SiamDW_LT/test/parallel_test.py`. 41 | Then run it. 42 | ``` 43 | python parallel_test.py 44 | 45 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/install.sh: -------------------------------------------------------------------------------- 1 | cd mmdet/ops/nms 2 | python compile.py 3 | cd ../../../ 4 | pip install mmcv 5 | pip uninstall -y mmcv 6 | pip install sklearn 7 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/FPNlib/mmdet/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .env import init_dist, get_root_logger, set_random_seed 2 | from .inference import inference_detector, show_result 3 | 4 | __all__ = [ 5 | 'init_dist', 'get_root_logger', 'set_random_seed', 6 | 'inference_detector', 'show_result' 7 | ] 8 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .mask import * # noqa: F401, F403 4 | from .loss import * # noqa: F401, F403 5 | from .post_processing import * # noqa: F401, F403 6 | from .utils import * # noqa: F401, F403 7 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import anchor_target 3 | 4 | __all__ = ['AnchorGenerator', 'anchor_target'] 5 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .geometry import bbox_overlaps 2 | from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult 3 | from .samplers import (BaseSampler, PseudoSampler, RandomSampler, 4 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 5 | CombinedSampler, SamplingResult) 6 | from .assign_sampling import build_assigner, build_sampler, assign_and_sample 7 | from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, 8 | bbox_mapping_back, bbox2roi, roi2bbox, bbox2result) 9 | from .bbox_target import bbox_target 10 | 11 | __all__ = [ 12 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 13 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 14 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 15 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 16 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 17 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'bbox_target' 18 | ] 19 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict( 11 | cfg, assigners, default_args=kwargs) 12 | else: 13 | raise TypeError('Invalid type {} for building a sampler'.format( 14 | type(cfg))) 15 | 16 | 17 | def build_sampler(cfg, **kwargs): 18 | if isinstance(cfg, samplers.BaseSampler): 19 | return cfg 20 | elif isinstance(cfg, dict): 21 | return mmcv.runner.obj_from_dict( 22 | cfg, samplers, default_args=kwargs) 23 | else: 24 | raise TypeError('Invalid type {} for building a sampler'.format( 25 | type(cfg))) 26 | 27 | 28 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 29 | bbox_assigner = build_assigner(cfg.assigner) 30 | bbox_sampler = build_sampler(cfg.sampler) 31 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 32 | gt_labels) 33 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 34 | gt_labels) 35 | return assign_result, sampling_result 36 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_assigner import BaseAssigner 2 | from .max_iou_assigner import MaxIoUAssigner 3 | from .assign_result import AssignResult 4 | 5 | __all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] 6 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/assigners/assign_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AssignResult(object): 5 | 6 | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): 7 | self.num_gts = num_gts 8 | self.gt_inds = gt_inds 9 | self.max_overlaps = max_overlaps 10 | self.labels = labels 11 | 12 | def add_gt_(self, gt_labels): 13 | self_inds = torch.arange( 14 | 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) 15 | self.gt_inds = torch.cat([self_inds, self.gt_inds]) 16 | self.max_overlaps = torch.cat( 17 | [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) 18 | if self.labels is not None: 19 | self.labels = torch.cat([gt_labels, self.labels]) 20 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .pseudo_sampler import PseudoSampler 3 | from .random_sampler import RandomSampler 4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 6 | from .combined_sampler import CombinedSampler 7 | from .ohem_sampler import OHEMSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from ..assign_sampling import build_sampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/bbox/samplers/sampling_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SamplingResult(object): 5 | 6 | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, 7 | gt_flags): 8 | self.pos_inds = pos_inds 9 | self.neg_inds = neg_inds 10 | self.pos_bboxes = bboxes[pos_inds] 11 | self.neg_bboxes = bboxes[neg_inds] 12 | self.pos_is_gt = gt_flags[pos_inds] 13 | 14 | self.num_gts = gt_bboxes.shape[0] 15 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 16 | self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] 17 | if assign_result.labels is not None: 18 | self.pos_gt_labels = assign_result.labels[pos_inds] 19 | else: 20 | self.pos_gt_labels = None 21 | 22 | @property 23 | def bboxes(self): 24 | return torch.cat([self.pos_bboxes, self.neg_bboxes]) 25 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .losses import (weighted_nll_loss, weighted_cross_entropy, 2 | weighted_binary_cross_entropy, sigmoid_focal_loss, 3 | weighted_sigmoid_focal_loss, mask_cross_entropy, 4 | smooth_l1_loss, weighted_smoothl1, accuracy) 5 | 6 | __all__ = [ 7 | 'weighted_nll_loss', 'weighted_cross_entropy', 8 | 'weighted_binary_cross_entropy', 'sigmoid_focal_loss', 9 | 'weighted_sigmoid_focal_loss', 'mask_cross_entropy', 'smooth_l1_loss', 10 | 'weighted_smoothl1', 'accuracy' 11 | ] 12 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import split_combined_polys 2 | from .mask_target import mask_target 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/mask/mask_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import mmcv 4 | 5 | 6 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, 7 | cfg): 8 | cfg_list = [cfg for _ in range(len(pos_proposals_list))] 9 | mask_targets = map(mask_target_single, pos_proposals_list, 10 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list) 11 | mask_targets = torch.cat(list(mask_targets)) 12 | return mask_targets 13 | 14 | 15 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): 16 | mask_size = cfg.mask_size 17 | num_pos = pos_proposals.size(0) 18 | mask_targets = [] 19 | if num_pos > 0: 20 | proposals_np = pos_proposals.cpu().numpy() 21 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() 22 | for i in range(num_pos): 23 | gt_mask = gt_masks[pos_assigned_gt_inds[i]] 24 | bbox = proposals_np[i, :].astype(np.int32) 25 | x1, y1, x2, y2 = bbox 26 | w = np.maximum(x2 - x1 + 1, 1) 27 | h = np.maximum(y2 - y1 + 1, 1) 28 | # mask is uint8 both before and after resizing 29 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], 30 | (mask_size, mask_size)) 31 | mask_targets.append(target) 32 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( 33 | pos_proposals.device) 34 | else: 35 | mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) 36 | return mask_targets 37 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, 3 | merge_aug_scores, merge_aug_masks) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import allreduce_grads, DistOptimizerHook 2 | from .misc import tensor2imgs, unmap, multi_apply 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .necks import * # noqa: F401,F403 3 | from .anchor_heads import * # noqa: F401,F403 4 | from .bbox_heads import * # noqa: F401,F403 5 | from .detectors import * # noqa: F401,F403 6 | from .registry import BACKBONES, NECKS, HEADS, DETECTORS 7 | from .builder import (build_backbone, build_neck, 8 | build_head, build_detector) 9 | 10 | __all__ = [ 11 | 'BACKBONES', 'NECKS', 'HEADS', 'DETECTORS', 12 | 'build_backbone', 'build_neck', 'build_head', 13 | 'build_detector' 14 | ] 15 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .rpn_head import RPNHead 3 | 4 | __all__ = ['AnchorHead', 'RPNHead'] 5 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import ResNet 2 | from .resnext import ResNeXt 3 | from .ssd_vgg import SSDVGG 4 | 5 | __all__ = ['ResNet', 'ResNeXt', 'SSDVGG'] 6 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | 4 | __all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead'] 5 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | from torch import nn 3 | 4 | from .registry import BACKBONES, NECKS, HEADS, DETECTORS 5 | 6 | 7 | def _build_module(cfg, registry, default_args): 8 | assert isinstance(cfg, dict) and 'type' in cfg 9 | assert isinstance(default_args, dict) or default_args is None 10 | args = cfg.copy() 11 | obj_type = args.pop('type') 12 | if mmcv.is_str(obj_type): 13 | if obj_type not in registry.module_dict: 14 | raise KeyError('{} is not in the {} registry'.format( 15 | obj_type, registry.name)) 16 | obj_type = registry.module_dict[obj_type] 17 | elif not isinstance(obj_type, type): 18 | raise TypeError('type must be a str or valid type, but got {}'.format( 19 | type(obj_type))) 20 | if default_args is not None: 21 | for name, value in default_args.items(): 22 | args.setdefault(name, value) 23 | return obj_type(**args) 24 | 25 | 26 | def build(cfg, registry, default_args=None): 27 | if isinstance(cfg, list): 28 | modules = [_build_module(cfg_, registry, default_args) for cfg_ in cfg] 29 | return nn.Sequential(*modules) 30 | else: 31 | return _build_module(cfg, registry, default_args) 32 | 33 | 34 | def build_backbone(cfg): 35 | return build(cfg, BACKBONES) 36 | 37 | 38 | def build_neck(cfg): 39 | return build(cfg, NECKS) 40 | 41 | def build_head(cfg): 42 | return build(cfg, HEADS) 43 | 44 | 45 | def build_detector(cfg, train_cfg=None, test_cfg=None): 46 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 47 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseDetector 2 | from .rpn import RPN 3 | 4 | __all__ = [ 5 | 'BaseDetector', 'RPN' 6 | ] 7 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | 3 | __all__ = ['FPN'] 4 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Registry(object): 5 | 6 | def __init__(self, name): 7 | self._name = name 8 | self._module_dict = dict() 9 | 10 | @property 11 | def name(self): 12 | return self._name 13 | 14 | @property 15 | def module_dict(self): 16 | return self._module_dict 17 | 18 | def _register_module(self, module_class): 19 | """Register a module. 20 | 21 | Args: 22 | module (:obj:`nn.Module`): Module to be registered. 23 | """ 24 | if not issubclass(module_class, nn.Module): 25 | raise TypeError( 26 | 'module must be a child of nn.Module, but got {}'.format( 27 | type(module_class))) 28 | module_name = module_class.__name__ 29 | if module_name in self._module_dict: 30 | raise KeyError('{} is already registered in {}'.format( 31 | module_name, self.name)) 32 | self._module_dict[module_name] = module_class 33 | 34 | def register_module(self, cls): 35 | self._register_module(cls) 36 | return cls 37 | 38 | 39 | BACKBONES = Registry('backbone') 40 | NECKS = Registry('neck') 41 | HEADS = Registry('head') 42 | DETECTORS = Registry('detector') 43 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule 2 | from .norm import build_norm_layer 3 | from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init) 4 | 5 | __all__ = [ 6 | 'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init', 7 | 'uniform_init', 'kaiming_init' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms import nms, soft_nms 2 | 3 | __all__ = [ 4 | 'nms', 'soft_nms' 5 | ] 6 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/nms/.gitignore: -------------------------------------------------------------------------------- 1 | *.cpp 2 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/nms/compile.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.system('rm -f *.so') 4 | os.system('python setup.py build_ext --inplace') 5 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id, size_t base); 3 | size_t nms_Malloc(); 4 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/FPNlib/mmdet/ops/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Faster R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | assert sizeof(int) == sizeof(np.int32_t) 12 | 13 | cdef extern from "gpu_nms.hpp": 14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int, size_t) nogil 15 | size_t nms_Malloc() nogil 16 | 17 | memory_pool = {} 18 | 19 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 20 | np.int32_t device_id=0): 21 | cdef int boxes_num = dets.shape[0] 22 | cdef int boxes_dim = 5 23 | cdef int num_out 24 | cdef size_t base 25 | cdef np.ndarray[np.int32_t, ndim=1] \ 26 | keep = np.zeros(boxes_num, dtype=np.int32) 27 | cdef np.ndarray[np.float32_t, ndim=1] \ 28 | scores = dets[:, 4] 29 | cdef np.ndarray[np.int_t, ndim=1] \ 30 | order = scores.argsort()[::-1] 31 | cdef np.ndarray[np.float32_t, ndim=2] \ 32 | sorted_dets = dets[order, :5] 33 | cdef float cthresh = thresh 34 | if device_id not in memory_pool: 35 | with nogil: 36 | base = nms_Malloc() 37 | memory_pool[device_id] = base 38 | # print "malloc", base 39 | base = memory_pool[device_id] 40 | with nogil: 41 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, cthresh, device_id, base) 42 | keep = keep[:num_out] 43 | return list(order[keep]) 44 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jiayuan Mao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/PreciseRoIPooling/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/_assets/prroi_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/PreciseRoIPooling/_assets/prroi_visualization.png -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/PreciseRoIPooling/pytorch/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | /_prroi_pooling 3 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : __init__.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | from .prroi_pool import * 13 | 14 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/build.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : build.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import os 13 | import torch 14 | 15 | from torch.utils.ffi import create_extension 16 | 17 | headers = [] 18 | sources = [] 19 | defines = [] 20 | extra_objects = [] 21 | with_cuda = False 22 | 23 | if torch.cuda.is_available(): 24 | with_cuda = True 25 | 26 | headers+= ['src/prroi_pooling_gpu.h'] 27 | sources += ['src/prroi_pooling_gpu.c'] 28 | defines += [('WITH_CUDA', None)] 29 | 30 | this_file = os.path.dirname(os.path.realpath(__file__)) 31 | extra_objects_cuda = ['src/prroi_pooling_gpu_impl.cu.o'] 32 | extra_objects_cuda = [os.path.join(this_file, fname) for fname in extra_objects_cuda] 33 | extra_objects.extend(extra_objects_cuda) 34 | else: 35 | # TODO(Jiayuan Mao @ 07/13): remove this restriction after we support the cpu implementation. 36 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') 37 | 38 | ffi = create_extension( 39 | '_prroi_pooling', 40 | headers=headers, 41 | sources=sources, 42 | define_macros=defines, 43 | relative_to=__file__, 44 | with_cuda=with_cuda, 45 | extra_objects=extra_objects 46 | ) 47 | 48 | if __name__ == '__main__': 49 | ffi.build() 50 | 51 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : prroi_pool.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import torch.nn as nn 13 | 14 | from .functional import prroi_pool2d 15 | 16 | __all__ = ['PrRoIPool2D'] 17 | 18 | 19 | class PrRoIPool2D(nn.Module): 20 | def __init__(self, pooled_height, pooled_width, spatial_scale): 21 | super().__init__() 22 | 23 | self.pooled_height = int(pooled_height) 24 | self.pooled_width = int(pooled_width) 25 | self.spatial_scale = float(spatial_scale) 26 | 27 | def forward(self, features, rois): 28 | return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) 29 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * File : prroi_pooling_gpu.h 3 | * Author : Jiayuan Mao, Tete Xiao 4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 5 | * Date : 07/13/2018 6 | * 7 | * Distributed under terms of the MIT license. 8 | * Copyright (c) 2017 Megvii Technology Limited. 9 | */ 10 | 11 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); 12 | 13 | int prroi_pooling_backward_cuda( 14 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 15 | int pooled_height, int pooled_width, float spatial_scale 16 | ); 17 | 18 | int prroi_pooling_coor_backward_cuda( 19 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 20 | int pooled_height, int pooled_width, float spatial_scal 21 | ); 22 | 23 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/prroi_pool/travis.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | # File : travis.sh 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # 6 | # Distributed under terms of the MIT license. 7 | # Copyright (c) 2017 Megvii Technology Limited. 8 | 9 | cd src 10 | echo "Working directory: " `pwd` 11 | echo "Compiling prroi_pooling kernels by nvcc..." 12 | nvcc -c -o prroi_pooling_gpu_impl.cu.o prroi_pooling_gpu_impl.cu -x cu -Xcompiler -fPIC -arch=sm_35 13 | 14 | cd ../ 15 | echo "Working directory: " `pwd` 16 | echo "Building python libraries..." 17 | python3 build.py 18 | 19 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/PreciseRoIPooling/pytorch/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/PreciseRoIPooling/pytorch/tests/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/libs/RPN/models.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from libs.RPNplspls.resnet_atrous import resnet50 4 | from libs.RPNplspls.neck import AdjustAllLayer 5 | from libs.RPNplspls.head import MultiRPN 6 | 7 | class ModelBuilder(nn.Module): 8 | def __init__(self): 9 | super(ModelBuilder, self).__init__() 10 | 11 | # build backbone 12 | self.backbone = resnet50(used_layers=[2, 3, 4]) 13 | 14 | # build neck 15 | self.neck = AdjustAllLayer([512, 1024, 2048], [128, 256, 512]) 16 | 17 | channels = [128, 256, 512] 18 | 19 | self.rpn_head = MultiRPN(5, channels, True) 20 | 21 | def template(self, z): 22 | zf = self.backbone(z) 23 | zf = self.neck(zf) 24 | self.zf = zf 25 | 26 | def track(self, x): 27 | xf = self.backbone(x) 28 | xf = self.neck(xf) 29 | cls, loc = self.rpn_head(self.zf, xf) 30 | return cls, loc 31 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/RPN/neck.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | class AdjustLayer(nn.Module): 4 | def __init__(self, inplane, outplane): 5 | super(AdjustLayer, self).__init__() 6 | self.downsample = nn.Sequential( 7 | nn.Conv2d(inplane, outplane, kernel_size=1, bias=False), 8 | nn.BatchNorm2d(outplane), 9 | ) 10 | 11 | def forward(self, x): 12 | x = self.downsample(x) 13 | if x.size(3) < 20: 14 | l = 4 15 | r = l + 7 16 | x = x[:, :, l:r, l:r] 17 | return x 18 | 19 | class AdjustAllLayer(nn.Module): 20 | def __init__(self, in_channels, out_channels): 21 | super(AdjustAllLayer, self).__init__() 22 | self.num = len(out_channels) 23 | if self.num == 1: 24 | self.downsample = AdjustLayer(in_channels[0], out_channels[0]) 25 | else: 26 | for i in range(self.num): 27 | self.add_module('downsample'+str(i+2), 28 | AdjustLayer(in_channels[i], out_channels[i])) 29 | 30 | def forward(self, features): 31 | if self.num == 1: 32 | return self.downsample(features) 33 | else: 34 | out = [] 35 | for i in range(self.num): 36 | adj_layer = getattr(self, 'downsample'+str(i+2)) 37 | out.append(adj_layer(features[i])) 38 | return out -------------------------------------------------------------------------------- /SiamDW_LT/libs/RPN/xcorr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | def xcorr_slow(x, kernel): 5 | """for loop to calculate cross correlation, slow version 6 | """ 7 | batch = x.size()[0] 8 | out = [] 9 | for i in range(batch): 10 | px = x[i] 11 | pk = kernel[i] 12 | px = px.view(1, px.size()[0], px.size()[1], px.size()[2]) 13 | pk = pk.view(-1, px.size()[1], pk.size()[1], pk.size()[2]) 14 | po = F.conv2d(px, pk) 15 | out.append(po) 16 | out = torch.cat(out, 0) 17 | return out 18 | 19 | def xcorr_fast(x, kernel): 20 | """group conv2d to calculate cross correlation, fast version 21 | """ 22 | batch = kernel.size()[0] 23 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 24 | px = x.view(1, -1, x.size()[2], x.size()[3]) 25 | po = F.conv2d(px, pk, groups=batch) 26 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 27 | return po 28 | 29 | def xcorr_depthwise(x, kernel): 30 | """depthwise cross correlation 31 | """ 32 | batch = kernel.size(0) 33 | channel = kernel.size(1) 34 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 35 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 36 | out = F.conv2d(x, kernel, groups=batch*channel) 37 | out = out.view(batch, channel, out.size(2), out.size(3)) 38 | return out -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .basetracker import BaseTracker -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/far_fusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .far_fusion import FAR_FUSION 2 | 3 | def get_tracker_class(): 4 | return FAR_FUSION 5 | 6 | 7 | 8 | # 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/far_fusion/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/core/far_fusion/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/far_fusion/__pycache__/far_fusion.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/core/far_fusion/__pycache__/far_fusion.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/far_fusion/__pycache__/flip_star.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/core/far_fusion/__pycache__/flip_star.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/fstar/__init__.py: -------------------------------------------------------------------------------- 1 | from .fstar import FSTAR 2 | 3 | def get_tracker_class(): 4 | return FSTAR 5 | 6 | 7 | 8 | # -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/resnext_far/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnext_libs import Resnextlibs 2 | 3 | def get_tracker_class(): 4 | return Resnextlibs 5 | 6 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/senet_far/__init__.py: -------------------------------------------------------------------------------- 1 | from .senet_libs import Senetlibs 2 | 3 | def get_tracker_class(): 4 | return Senetlibs 5 | 6 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/star/__init__.py: -------------------------------------------------------------------------------- 1 | from .star import STAR 2 | 3 | def get_tracker_class(): 4 | return STAR 5 | 6 | 7 | 8 | # -------------------------------------------------------------------------------- /SiamDW_LT/libs/core/tracker_matlab.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | 4 | class Tracker: 5 | 6 | def __init__(self, name: str, parameter_name: str): 7 | self.name = name 8 | self.parameter_name = parameter_name 9 | 10 | tracker_module = importlib.import_module('libs.core.{}'.format(self.name)) 11 | 12 | self.parameters = self.get_parameters() 13 | self.tracker_class = tracker_module.get_tracker_class() 14 | 15 | def get_parameters(self): 16 | """Get parameters.""" 17 | 18 | param_module = importlib.import_module('test.settings.{}.{}'.format(self.name, self.parameter_name)) 19 | params = param_module.parameters() 20 | 21 | return params 22 | 23 | 24 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/features/color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from libs.features.featurebase import FeatureBase 4 | 5 | 6 | class RGB(FeatureBase): 7 | """RGB feature normalized to [-0.5, 0.5].""" 8 | def dim(self): 9 | return 3 10 | 11 | def stride(self): 12 | return self.pool_stride 13 | 14 | def extract(self, im: torch.Tensor): 15 | return im/255 - 0.5 16 | 17 | 18 | class Grayscale(FeatureBase): 19 | """Grayscale feature normalized to [-0.5, 0.5].""" 20 | def dim(self): 21 | return 1 22 | 23 | def stride(self): 24 | return self.pool_stride 25 | 26 | def extract(self, im: torch.Tensor): 27 | return torch.mean(im/255 - 0.5, 1, keepdim=True) 28 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/features/util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from libs.features.featurebase import FeatureBase 4 | 5 | 6 | class Concatenate(FeatureBase): 7 | """A feature that concatenates other features. 8 | args: 9 | features: List of features to concatenate. 10 | """ 11 | def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): 12 | super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) 13 | self.features = features 14 | 15 | self.input_stride = self.features[0].stride() 16 | 17 | for feat in self.features: 18 | if self.input_stride != feat.stride(): 19 | raise ValueError('Strides for the features must be the same for a bultiresolution feature.') 20 | 21 | def dim(self): 22 | return sum([f.dim() for f in self.features]) 23 | 24 | def stride(self): 25 | return self.pool_stride * self.input_stride 26 | 27 | def extract(self, im: torch.Tensor): 28 | return torch.cat([f.get_feature(im) for f in self.features], 1) 29 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .arraymisc import * 3 | from .utils import * 4 | from .fileio import * 5 | from .opencv_info import * 6 | from .image import * 7 | from .video import * 8 | from .visualization import * 9 | from .version import __version__ 10 | # The following modules are not imported to this level, so mmcv may be used 11 | # without PyTorch. 12 | # - runner 13 | # - parallel 14 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/arraymisc/__init__.py: -------------------------------------------------------------------------------- 1 | from .quantization import quantize, dequantize 2 | 3 | __all__ = ['quantize', 'dequantize'] 4 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/cnn/__init__.py: -------------------------------------------------------------------------------- 1 | from .alexnet import AlexNet 2 | from .vgg import VGG, make_vgg_layer 3 | from .resnet import ResNet, make_res_layer 4 | from .weight_init import (constant_init, xavier_init, normal_init, 5 | uniform_init, kaiming_init, caffe2_xavier_init) 6 | 7 | __all__ = [ 8 | 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', 9 | 'constant_init', 'xavier_init', 'normal_init', 'uniform_init', 10 | 'kaiming_init', 'caffe2_xavier_init' 11 | ] 12 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import load, dump, register_handler 2 | from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler 3 | from .parse import list_from_file, dict_from_file 4 | 5 | __all__ = [ 6 | 'load', 'dump', 'register_handler', 'BaseFileHandler', 'JsonHandler', 7 | 'PickleHandler', 'YamlHandler', 'list_from_file', 'dict_from_file' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseFileHandler 2 | from .json_handler import JsonHandler 3 | from .pickle_handler import PickleHandler 4 | from .yaml_handler import YamlHandler 5 | 6 | __all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] 7 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/base.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/json_handler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/json_handler.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseFileHandler(object): 5 | 6 | __metaclass__ = ABCMeta # python 2 compatibility 7 | 8 | @abstractmethod 9 | def load_from_fileobj(self, file, **kwargs): 10 | pass 11 | 12 | @abstractmethod 13 | def dump_to_fileobj(self, obj, file, **kwargs): 14 | pass 15 | 16 | @abstractmethod 17 | def dump_to_str(self, obj, **kwargs): 18 | pass 19 | 20 | def load_from_path(self, filepath, mode='r', **kwargs): 21 | with open(filepath, mode) as f: 22 | return self.load_from_fileobj(f, **kwargs) 23 | 24 | def dump_to_path(self, obj, filepath, mode='w', **kwargs): 25 | with open(filepath, mode) as f: 26 | self.dump_to_fileobj(obj, f, **kwargs) 27 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/json_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from .base import BaseFileHandler 4 | 5 | 6 | class JsonHandler(BaseFileHandler): 7 | 8 | def load_from_fileobj(self, file): 9 | return json.load(file) 10 | 11 | def dump_to_fileobj(self, obj, file, **kwargs): 12 | json.dump(obj, file, **kwargs) 13 | 14 | def dump_to_str(self, obj, **kwargs): 15 | return json.dumps(obj, **kwargs) 16 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/pickle_handler.py: -------------------------------------------------------------------------------- 1 | from six.moves import cPickle as pickle 2 | 3 | from .base import BaseFileHandler 4 | 5 | 6 | class PickleHandler(BaseFileHandler): 7 | 8 | def load_from_fileobj(self, file, **kwargs): 9 | return pickle.load(file, **kwargs) 10 | 11 | def load_from_path(self, filepath, **kwargs): 12 | return super(PickleHandler, self).load_from_path( 13 | filepath, mode='rb', **kwargs) 14 | 15 | def dump_to_str(self, obj, **kwargs): 16 | kwargs.setdefault('protocol', 2) 17 | return pickle.dumps(obj, **kwargs) 18 | 19 | def dump_to_fileobj(self, obj, file, **kwargs): 20 | kwargs.setdefault('protocol', 2) 21 | pickle.dump(obj, file, **kwargs) 22 | 23 | def dump_to_path(self, obj, filepath, **kwargs): 24 | super(PickleHandler, self).dump_to_path( 25 | obj, filepath, mode='wb', **kwargs) 26 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/fileio/handlers/yaml_handler.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | try: 3 | from yaml import CLoader as Loader, CDumper as Dumper 4 | except ImportError: 5 | from yaml import Loader, Dumper 6 | 7 | from .base import BaseFileHandler 8 | 9 | 10 | class YamlHandler(BaseFileHandler): 11 | 12 | def load_from_fileobj(self, file, **kwargs): 13 | kwargs.setdefault('Loader', Loader) 14 | return yaml.load(file, **kwargs) 15 | 16 | def dump_to_fileobj(self, obj, file, **kwargs): 17 | kwargs.setdefault('Dumper', Dumper) 18 | yaml.dump(obj, file, **kwargs) 19 | 20 | def dump_to_str(self, obj, **kwargs): 21 | kwargs.setdefault('Dumper', Dumper) 22 | return yaml.dump(obj, **kwargs) 23 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import imread, imwrite, imfrombytes 2 | from .transforms import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, 3 | hsv2bgr, bgr2hls, hls2bgr, iminvert, imflip, imrotate, 4 | imcrop, impad, impad_to_multiple, imnormalize, 5 | imdenormalize, imresize, imresize_like, imrescale) 6 | 7 | __all__ = [ 8 | 'imread', 'imwrite', 'imfrombytes', 'bgr2gray', 'gray2bgr', 'bgr2rgb', 9 | 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 'bgr2hls', 'hls2bgr', 'iminvert', 10 | 'imflip', 'imrotate', 'imcrop', 'impad', 'impad_to_multiple', 11 | 'imnormalize', 'imdenormalize', 'imresize', 'imresize_like', 'imrescale' 12 | ] 13 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | from .colorspace import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, 2 | hsv2bgr, bgr2hls, hls2bgr, iminvert) 3 | from .geometry import imflip, imrotate, imcrop, impad, impad_to_multiple 4 | from .normalize import imnormalize, imdenormalize 5 | from .resize import imresize, imresize_like, imrescale 6 | 7 | __all__ = [ 8 | 'bgr2gray', 'gray2bgr', 'bgr2rgb', 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 9 | 'bgr2hls', 'hls2bgr', 'iminvert', 'imflip', 'imrotate', 'imcrop', 'impad', 10 | 'impad_to_multiple', 'imnormalize', 'imdenormalize', 'imresize', 11 | 'imresize_like', 'imrescale' 12 | ] 13 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/image/transforms/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__pycache__/colorspace.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/image/transforms/__pycache__/colorspace.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__pycache__/geometry.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/image/transforms/__pycache__/geometry.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__pycache__/normalize.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/image/transforms/__pycache__/normalize.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/__pycache__/resize.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/image/transforms/__pycache__/resize.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/image/transforms/normalize.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .colorspace import bgr2rgb, rgb2bgr 4 | 5 | 6 | def imnormalize(img, mean, std, to_rgb=True): 7 | img = img.astype(np.float32) 8 | if to_rgb: 9 | img = bgr2rgb(img) 10 | return (img - mean) / std 11 | 12 | 13 | def imdenormalize(img, mean, std, to_bgr=True): 14 | img = (img * std) + mean 15 | if to_bgr: 16 | img = rgb2bgr(img) 17 | return img 18 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/opencv_info.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | 4 | def use_opencv2(): 5 | return cv2.__version__.split('.')[0] == '2' 6 | 7 | 8 | USE_OPENCV2 = use_opencv2() 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | from .collate import collate 2 | from .data_container import DataContainer 3 | from .data_parallel import MMDataParallel 4 | from .distributed import MMDistributedDataParallel 5 | from .scatter_gather import scatter, scatter_kwargs 6 | 7 | __all__ = [ 8 | 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', 9 | 'scatter', 'scatter_kwargs' 10 | ] 11 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/parallel/data_parallel.py: -------------------------------------------------------------------------------- 1 | from torch.nn.parallel import DataParallel 2 | 3 | from .scatter_gather import scatter_kwargs 4 | 5 | 6 | class MMDataParallel(DataParallel): 7 | 8 | def scatter(self, inputs, kwargs, device_ids): 9 | return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) 10 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/__init__.py: -------------------------------------------------------------------------------- 1 | from .runner import Runner 2 | from .log_buffer import LogBuffer 3 | from .hooks import (Hook, CheckpointHook, ClosureHook, LrUpdaterHook, 4 | OptimizerHook, IterTimerHook, DistSamplerSeedHook, 5 | LoggerHook, TextLoggerHook, PaviLoggerHook, 6 | TensorboardLoggerHook) 7 | from .checkpoint import (load_state_dict, load_checkpoint, weights_to_cpu, 8 | save_checkpoint) 9 | from .parallel_test import parallel_test 10 | from .priority import Priority, get_priority 11 | from .utils import (get_host_info, get_dist_info, master_only, get_time_str, 12 | obj_from_dict) 13 | 14 | __all__ = [ 15 | 'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook', 16 | 'LrUpdaterHook', 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 17 | 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook', 18 | 'load_state_dict', 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 19 | 'parallel_test', 'Priority', 'get_priority', 'get_host_info', 20 | 'get_dist_info', 'master_only', 'get_time_str', 'obj_from_dict' 21 | ] 22 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | from .checkpoint import CheckpointHook 3 | from .closure import ClosureHook 4 | from .lr_updater import LrUpdaterHook 5 | from .optimizer import OptimizerHook 6 | from .iter_timer import IterTimerHook 7 | from .sampler_seed import DistSamplerSeedHook 8 | from .memory import EmptyCacheHook 9 | from .logger import (LoggerHook, TextLoggerHook, PaviLoggerHook, 10 | TensorboardLoggerHook) 11 | 12 | __all__ = [ 13 | 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 14 | 'IterTimerHook', 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 15 | 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' 16 | ] 17 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/checkpoint.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/checkpoint.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/closure.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/closure.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/hook.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/hook.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/iter_timer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/iter_timer.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/lr_updater.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/lr_updater.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/memory.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/memory.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/optimizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/optimizer.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/checkpoint.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | from ..utils import master_only 3 | 4 | 5 | class CheckpointHook(Hook): 6 | 7 | def __init__(self, 8 | interval=-1, 9 | save_optimizer=True, 10 | out_dir=None, 11 | **kwargs): 12 | self.interval = interval 13 | self.save_optimizer = save_optimizer 14 | self.out_dir = out_dir 15 | self.args = kwargs 16 | 17 | @master_only 18 | def after_train_epoch(self, runner): 19 | if not self.every_n_epochs(runner, self.interval): 20 | return 21 | 22 | if not self.out_dir: 23 | self.out_dir = runner.work_dir 24 | runner.save_checkpoint( 25 | self.out_dir, save_optimizer=self.save_optimizer, **self.args) 26 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/closure.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | 3 | 4 | class ClosureHook(Hook): 5 | 6 | def __init__(self, fn_name, fn): 7 | assert hasattr(self, fn_name) 8 | assert callable(fn) 9 | setattr(self, fn_name, fn) 10 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/hook.py: -------------------------------------------------------------------------------- 1 | class Hook(object): 2 | 3 | def before_run(self, runner): 4 | pass 5 | 6 | def after_run(self, runner): 7 | pass 8 | 9 | def before_epoch(self, runner): 10 | pass 11 | 12 | def after_epoch(self, runner): 13 | pass 14 | 15 | def before_iter(self, runner): 16 | pass 17 | 18 | def after_iter(self, runner): 19 | pass 20 | 21 | def before_train_epoch(self, runner): 22 | self.before_epoch(runner) 23 | 24 | def before_val_epoch(self, runner): 25 | self.before_epoch(runner) 26 | 27 | def after_train_epoch(self, runner): 28 | self.after_epoch(runner) 29 | 30 | def after_val_epoch(self, runner): 31 | self.after_epoch(runner) 32 | 33 | def before_train_iter(self, runner): 34 | self.before_iter(runner) 35 | 36 | def before_val_iter(self, runner): 37 | self.before_iter(runner) 38 | 39 | def after_train_iter(self, runner): 40 | self.after_iter(runner) 41 | 42 | def after_val_iter(self, runner): 43 | self.after_iter(runner) 44 | 45 | def every_n_epochs(self, runner, n): 46 | return (runner.epoch + 1) % n == 0 if n > 0 else False 47 | 48 | def every_n_inner_iters(self, runner, n): 49 | return (runner.inner_iter + 1) % n == 0 if n > 0 else False 50 | 51 | def every_n_iters(self, runner, n): 52 | return (runner.iter + 1) % n == 0 if n > 0 else False 53 | 54 | def end_of_epoch(self, runner): 55 | return runner.inner_iter + 1 == len(runner.data_loader) 56 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/iter_timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from .hook import Hook 4 | 5 | 6 | class IterTimerHook(Hook): 7 | 8 | def before_epoch(self, runner): 9 | self.t = time.time() 10 | 11 | def before_iter(self, runner): 12 | runner.log_buffer.update({'data_time': time.time() - self.t}) 13 | 14 | def after_iter(self, runner): 15 | runner.log_buffer.update({'time': time.time() - self.t}) 16 | self.t = time.time() 17 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import LoggerHook 2 | from .pavi import PaviLoggerHook 3 | from .tensorboard import TensorboardLoggerHook 4 | from .text import TextLoggerHook 5 | 6 | __all__ = [ 7 | 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/base.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/text.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/libs/mmcv/runner/hooks/logger/__pycache__/text.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/memory.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .hook import Hook 4 | 5 | 6 | class EmptyCacheHook(Hook): 7 | 8 | def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): 9 | self._before_epoch = before_epoch 10 | self._after_epoch = after_epoch 11 | self._after_iter = after_iter 12 | 13 | def after_iter(self, runner): 14 | if self._after_iter: 15 | torch.cuda.empty_cache() 16 | 17 | def before_epoch(self, runner): 18 | if self._before_epoch: 19 | torch.cuda.empty_cache() 20 | 21 | def after_epoch(self, runner): 22 | if self._after_epoch: 23 | torch.cuda.empty_cache() 24 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/optimizer.py: -------------------------------------------------------------------------------- 1 | from torch.nn.utils import clip_grad 2 | 3 | from .hook import Hook 4 | 5 | 6 | class OptimizerHook(Hook): 7 | 8 | def __init__(self, grad_clip=None): 9 | self.grad_clip = grad_clip 10 | 11 | def clip_grads(self, params): 12 | clip_grad.clip_grad_norm_( 13 | filter(lambda p: p.requires_grad, params), **self.grad_clip) 14 | 15 | def after_train_iter(self, runner): 16 | runner.optimizer.zero_grad() 17 | runner.outputs['loss'].backward() 18 | if self.grad_clip is not None: 19 | self.clip_grads(runner.model.parameters()) 20 | runner.optimizer.step() 21 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/hooks/sampler_seed.py: -------------------------------------------------------------------------------- 1 | from .hook import Hook 2 | 3 | 4 | class DistSamplerSeedHook(Hook): 5 | 6 | def before_epoch(self, runner): 7 | runner.data_loader.sampler.set_epoch(runner.epoch) 8 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/log_buffer.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import numpy as np 4 | 5 | 6 | class LogBuffer(object): 7 | 8 | def __init__(self): 9 | self.val_history = OrderedDict() 10 | self.n_history = OrderedDict() 11 | self.output = OrderedDict() 12 | self.ready = False 13 | 14 | def clear(self): 15 | self.val_history.clear() 16 | self.n_history.clear() 17 | self.clear_output() 18 | 19 | def clear_output(self): 20 | self.output.clear() 21 | self.ready = False 22 | 23 | def update(self, vars, count=1): 24 | assert isinstance(vars, dict) 25 | for key, var in vars.items(): 26 | if key not in self.val_history: 27 | self.val_history[key] = [] 28 | self.n_history[key] = [] 29 | self.val_history[key].append(var) 30 | self.n_history[key].append(count) 31 | 32 | def average(self, n=0): 33 | """Average latest n values or all values""" 34 | assert n >= 0 35 | for key in self.val_history: 36 | values = np.array(self.val_history[key][-n:]) 37 | nums = np.array(self.n_history[key][-n:]) 38 | avg = np.sum(values * nums) / np.sum(nums) 39 | self.output[key] = avg 40 | self.ready = True 41 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/runner/priority.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Priority(Enum): 5 | """Hook priority levels. 6 | 7 | +------------+------------+ 8 | | Level | Value | 9 | +============+============+ 10 | | HIGHEST | 0 | 11 | +------------+------------+ 12 | | VERY_HIGH | 10 | 13 | +------------+------------+ 14 | | HIGH | 30 | 15 | +------------+------------+ 16 | | NORMAL | 50 | 17 | +------------+------------+ 18 | | LOW | 70 | 19 | +------------+------------+ 20 | | VERY_LOW | 90 | 21 | +------------+------------+ 22 | | LOWEST | 100 | 23 | +------------+------------+ 24 | """ 25 | 26 | HIGHEST = 0 27 | VERY_HIGH = 10 28 | HIGH = 30 29 | NORMAL = 50 30 | LOW = 70 31 | VERY_LOW = 90 32 | LOWEST = 100 33 | 34 | 35 | def get_priority(priority): 36 | """Get priority value. 37 | 38 | Args: 39 | priority (int or str or :obj:`Priority`): Priority. 40 | 41 | Returns: 42 | int: The priority value. 43 | """ 44 | if isinstance(priority, int): 45 | if priority < 0 or priority > 100: 46 | raise ValueError('priority must be between 0 and 100') 47 | return priority 48 | elif isinstance(priority, Priority): 49 | return priority.value 50 | elif isinstance(priority, str): 51 | return Priority[priority.upper()].value 52 | else: 53 | raise TypeError('priority must be an integer or Priority enum value') 54 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import ConfigDict, Config 2 | from .misc import (is_str, iter_cast, list_cast, tuple_cast, is_seq_of, 3 | is_list_of, is_tuple_of, slice_list, concat_list, 4 | check_prerequisites, requires_package, requires_executable) 5 | from .path import (is_filepath, fopen, check_file_exist, mkdir_or_exist, 6 | symlink, scandir, FileNotFoundError) 7 | from .progressbar import ProgressBar, track_progress, track_parallel_progress 8 | from .timer import Timer, TimerError, check_time 9 | 10 | __all__ = [ 11 | 'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 12 | 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', 13 | 'check_prerequisites', 'requires_package', 'requires_executable', 14 | 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink', 15 | 'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress', 16 | 'track_parallel_progress', 'Timer', 'TimerError', 'check_time' 17 | ] 18 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.2.8' 2 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/video/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import Cache, VideoReader, frames2video 2 | from .processing import convert_video, resize_video, cut_video, concat_video 3 | from .optflow import flowread, flowwrite, quantize_flow, dequantize_flow 4 | 5 | __all__ = [ 6 | 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', 7 | 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', 8 | 'dequantize_flow' 9 | ] 10 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .color import Color, color_val 2 | from .image import imshow, imshow_bboxes, imshow_det_bboxes 3 | from .optflow import flowshow, flow2rgb, make_color_wheel 4 | 5 | __all__ = [ 6 | 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', 7 | 'flowshow', 'flow2rgb', 'make_color_wheel' 8 | ] 9 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/mmcv/visualization/color.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | import numpy as np 4 | 5 | from mmcv.utils import is_str 6 | 7 | 8 | class Color(Enum): 9 | """An enum that defines common colors. 10 | 11 | Contains red, green, blue, cyan, yellow, magenta, white and black. 12 | """ 13 | red = (0, 0, 255) 14 | green = (0, 255, 0) 15 | blue = (255, 0, 0) 16 | cyan = (255, 255, 0) 17 | yellow = (0, 255, 255) 18 | magenta = (255, 0, 255) 19 | white = (255, 255, 255) 20 | black = (0, 0, 0) 21 | 22 | 23 | def color_val(color): 24 | """Convert various input to color tuples. 25 | 26 | Args: 27 | color (:obj:`Color`/str/tuple/int/ndarray): Color inputs 28 | 29 | Returns: 30 | tuple[int]: A tuple of 3 integers indicating BGR channels. 31 | """ 32 | if is_str(color): 33 | return Color[color].value 34 | elif isinstance(color, Color): 35 | return color.value 36 | elif isinstance(color, tuple): 37 | assert len(color) == 3 38 | for channel in color: 39 | assert channel >= 0 and channel <= 255 40 | return color 41 | elif isinstance(color, int): 42 | assert color >= 0 and color <= 255 43 | return color, color, color 44 | elif isinstance(color, np.ndarray): 45 | assert color.ndim == 1 and color.size == 3 46 | assert np.all((color >= 0) & (color <= 255)) 47 | color = color.astype(np.uint8) 48 | return tuple(color) 49 | else: 50 | raise TypeError('Invalid type for color: {}'.format(type(color))) 51 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import * 2 | from .resnext import * 3 | from .senet import * 4 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/models/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .iounet import IoUNet 2 | from .iounet_dropout import IoUNet_dropout 3 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/models/modules/blocks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, 4 | batch_norm=True, relu=True): 5 | layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, 6 | padding=padding, dilation=dilation, bias=bias)] 7 | if batch_norm: 8 | layers.append(nn.BatchNorm2d(out_planes)) 9 | if relu: 10 | layers.append(nn.ReLU(inplace=True)) 11 | return nn.Sequential(*layers) 12 | 13 | 14 | class LinearBlock(nn.Module): 15 | def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): 16 | super().__init__() 17 | self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) 18 | self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None 19 | self.relu = nn.ReLU(inplace=True) if relu else None 20 | 21 | def forward(self, x): 22 | x = self.linear(x.view(x.shape[0], -1)) 23 | if self.bn is not None: 24 | x = self.bn(x.view(x.shape[0], x.shape[1], 1, 1)) 25 | if self.relu is not None: 26 | x = self.relu(x) 27 | return x.view(x.shape[0], -1) -------------------------------------------------------------------------------- /SiamDW_LT/libs/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .params import * 2 | from .tensordict import * -------------------------------------------------------------------------------- /SiamDW_LT/libs/utils/params.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from libs.utils.tensorlist import TensorList 4 | 5 | 6 | class TrackerParams: 7 | """Class for tracker parameters.""" 8 | def free_memory(self): 9 | for a in dir(self): 10 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 11 | getattr(self, a).free_memory() 12 | 13 | 14 | class FeatureParams: 15 | """Class for feature specific parameters""" 16 | def __init__(self, *args, **kwargs): 17 | if len(args) > 0: 18 | raise ValueError 19 | 20 | for name, val in kwargs.items(): 21 | if isinstance(val, list): 22 | setattr(self, name, TensorList(val)) 23 | else: 24 | setattr(self, name, val) 25 | 26 | 27 | def Choice(*args): 28 | """Can be used to sample random parameter values.""" 29 | return random.choice(args) 30 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/utils/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | class EnvSettings: 5 | def __init__(self): 6 | main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 7 | 8 | self.network_path = '{}/../test/networks/'.format(main_path) 9 | 10 | 11 | def local_env_settings(): 12 | settings = EnvSettings() 13 | return settings 14 | -------------------------------------------------------------------------------- /SiamDW_LT/libs/utils/tensordict.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from collections import OrderedDict 4 | 5 | class TensorDict(OrderedDict): 6 | """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" 7 | 8 | def concat(self, other): 9 | """Concatenates two dicts without copying internal data.""" 10 | return TensorDict(self, **other) 11 | 12 | def copy(self): 13 | return TensorDict(super(TensorDict, self).copy()) 14 | 15 | def __getattr__(self, name): 16 | if not hasattr(torch.Tensor, name): 17 | raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) 18 | 19 | def apply_attr(*args, **kwargs): 20 | return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) 21 | return apply_attr 22 | 23 | def attribute(self, attr: str, *args): 24 | return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) 25 | 26 | def apply(self, fn, *args, **kwargs): 27 | return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) 28 | 29 | @staticmethod 30 | def _iterable(a): 31 | return isinstance(a, (TensorDict, list)) 32 | 33 | -------------------------------------------------------------------------------- /SiamDW_LT/test/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 hongyuan_yu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SiamDW_LT/test/matlab/_init_paths.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import os.path as osp 6 | import sys 7 | 8 | 9 | def add_path(path): 10 | if path not in sys.path: 11 | sys.path.insert(0, path) 12 | 13 | 14 | this_dir = osp.dirname(__file__) 15 | 16 | lib_path = osp.join(this_dir, '..') 17 | add_path(lib_path) 18 | -------------------------------------------------------------------------------- /SiamDW_LT/test/requirements: -------------------------------------------------------------------------------- 1 | torch==0.3.1 2 | numpy==1.12.1 3 | opencv-python==3.1.0.5 4 | torchvision 5 | matplotlib==2.2.2 6 | cffi 7 | mpi4py 8 | pytorch_fft -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from settings.datasets.votlt18dataset import VOTLT18Dataset 2 | from settings.datasets.votlt19dataset import VOTLT19Dataset 3 | from settings.datasets.oxuvadataset import OXUVADataset 4 | from settings.datasets.lasotdataset import LaSOTDataset 5 | from settings.datasets.otbdataset import OTBDataset 6 | from settings.datasets.rgbd_ddataset import VOTRGBD19DDataset 7 | from settings.datasets.rgbd_rgbdataset import VOTRGBD19RGBDataset 8 | from settings.datasets.nfsdataset import NFSDataset 9 | from settings.datasets.uavdataset import UAVDataset 10 | from settings.datasets.tpldataset import TPLDataset 11 | from settings.datasets.vot18dataset import VOT18Dataset 12 | from settings.datasets.vot19dataset import VOT19Dataset 13 | from settings.datasets.got10kdataset import GOT10KDatasetTest, GOT10KDatasetVal, GOT10KDatasetLTRVal 14 | from settings.datasets.data import Sequence 15 | 16 | -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/envs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class EnvSettings: 4 | def __init__(self): 5 | main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 6 | self.results_path = '{}/tracking_results/'.format(main_path) 7 | self.otb_path = '{}/data/OTB2015'.format(main_path) 8 | self.nfs_path = '' 9 | self.uav_path = '' 10 | self.tpl_path = '' 11 | self.vot18_path = '' 12 | self.vot19_path = '' 13 | self.votlt18_path = '{}/data/VOT2018-LT'.format(main_path) 14 | self.votlt19_path = '{}/data/VOT2019-LT'.format(main_path) 15 | self.oxuva_path = '{}/data/long-term-tracking-benchmark/dataset/images'.format(main_path) 16 | self.oxuva_list = '{}/data/long-term-tracking-benchmark/dataset/tasks/test.csv'.format(main_path) 17 | self.rgbd_path = '{}/data/VOT2019-RGBD'.format(main_path) 18 | self.got10k_path = '' 19 | self.lasot_path = '{}/data/LaSOTBenchmark'.format(main_path) 20 | self.trackingnet_path = '' 21 | 22 | -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/far_fusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/test/settings/far_fusion/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/fstar/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/test/settings/fstar/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/resnext_far/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/test/settings/resnext_far/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/senet_far/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/test/settings/senet_far/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/test/settings/star/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_LT/test/settings/star/__init__.py -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from .evaluation import * 2 | from .params import * -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/draw_plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import random 3 | 4 | def draw_line(x, y, name): 5 | for idx, xx in enumerate(x): 6 | yy = y[idx] 7 | plt.plot(xx, yy, color=(random.random(), random.random(), random.random())) 8 | 9 | plt.savefig(name) 10 | 11 | def main(): 12 | X = [] 13 | Y = [] 14 | 15 | x = [0.38,0.359263 , 16 | 17 | 0.3559 , 18 | 19 | 0.393935, 20 | 21 | 0.357380 , 22 | 23 | 0.348148 , 24 | 25 | 0.373181 , 26 | 27 | 0.378639 , 28 | 29 | 0.392282 , 30 | 31 | 0.361640 , 32 | 33 | 0.370160 , 34 | 35 | 0.385925 , 36 | 37 | 0.371413 , 38 | 39 | 0.375 , 40 | 41 | 0.371292 ] 42 | y=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] 43 | X.append(x) 44 | Y.append(y) 45 | draw_line(Y, X, "/data/home/v-had/debug.jpg") 46 | 47 | if __name__ == '__main__': 48 | main() -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/extract_backbone.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import collections 4 | 5 | def main(): 6 | chk_path = '/data/home/v-had/github/VOT-2019/Hao/pytracking/pytracking/networks/res50_unfix2_TLVC.pth.tar' 7 | save_path = '/data/home/v-had/.torch/models/res50_unfix.pth' 8 | example = '/data/home/v-had/.torch/models/resnet50-19c8e357.pth' 9 | chk = torch.load(chk_path) 10 | model = chk['net'] 11 | new_backbone = collections.OrderedDict() 12 | for key in model.keys(): 13 | if 'feature_extractor' in key: 14 | new_key = key[18:] 15 | new_backbone[new_key] = model[key] 16 | torch.save(new_backbone, save_path) 17 | 18 | 19 | 20 | if __name__ == '__main__': 21 | main() -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/gdrive_download: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The script taken from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html 4 | 5 | url=$1 6 | filename=$2 7 | 8 | [ -z "$url" ] && echo A URL or ID is required first argument && exit 1 9 | 10 | fileid="" 11 | declare -a patterns=("s/.*\/file\/d\/\(.*\)\/.*/\1/p" "s/.*id\=\(.*\)/\1/p" "s/\(.*\)/\1/p") 12 | for i in "${patterns[@]}" 13 | do 14 | fileid=$(echo $url | sed -n $i) 15 | [ ! -z "$fileid" ] && break 16 | done 17 | 18 | [ -z "$fileid" ] && echo Could not find Google ID && exit 1 19 | 20 | echo File ID: $fileid 21 | 22 | tmp_file="$filename.$$.file" 23 | tmp_cookies="$filename.$$.cookies" 24 | tmp_headers="$filename.$$.headers" 25 | 26 | url='https://docs.google.com/uc?export=download&id='$fileid 27 | echo Downloading: "$url > $tmp_file" 28 | wget --save-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 29 | 30 | if [[ ! $(find "$tmp_file" -type f -size +10000c 2>/dev/null) ]]; then 31 | confirm=$(cat "$tmp_file" | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p') 32 | fi 33 | 34 | if [ ! -z "$confirm" ]; then 35 | url='https://docs.google.com/uc?export=download&id='$fileid'&confirm='$confirm 36 | echo Downloading: "$url > $tmp_file" 37 | wget --load-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 38 | fi 39 | 40 | [ -z "$filename" ] && filename=$(cat "$tmp_headers" | sed -rn 's/.*filename=\"(.*)\".*/\1/p') 41 | [ -z "$filename" ] && filename="google_drive.file" 42 | 43 | echo Moving: "$tmp_file > $filename" 44 | 45 | mv "$tmp_file" "$filename" 46 | 47 | rm -f "$tmp_cookies" "$tmp_headers" 48 | 49 | echo Saved: "$filename" 50 | echo DONE! 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/generate_epoch_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def main(): 4 | template_path = "parameter/atom/restore_vgg19_TLVC.py" 5 | data = [] 6 | with open(template_path, "r") as f: 7 | for line in f: 8 | data.append(line) 9 | 10 | for i in range(28, 41): 11 | file_path = "parameter/atom/restore_vgg19_TLVC_{}.py".format(i) 12 | with open(file_path, "w") as f: 13 | for item in data: 14 | if "new_vgg19_TLVC" in item: 15 | print(item) 16 | item = item.replace('new_vgg19_TLVC', 'ATOMnet_ep00{:02d}'.format(i)) 17 | f.write(item) 18 | 19 | if __name__ == '__main__': 20 | main() -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/get_eao.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def main(): 4 | path = '/data/home/v-had/new_unfix.html' 5 | sta = ['unread', 'ST', 'restore'] 6 | with open(path, "r") as f: 7 | for line in f: 8 | if line.startswith(""): 9 | line = line.replace('>', " ") 10 | line = line.replace('<', ' ') 11 | line = line.split(" ") 12 | data = [] 13 | for idx, item in enumerate(line): 14 | for x in sta: 15 | if item.startswith(x): 16 | data.append(item) 17 | if item.startswith("0"): 18 | data.append(item) 19 | print(len(data)) 20 | res = dict() 21 | for i in range(len(data)): 22 | if i % 2 == 1: continue 23 | if res.get(data[i][:-4]) == None: 24 | res[data[i][:-4]] = [float(data[i+1])] 25 | else: 26 | res[data[i][:-4]].append(float(data[i+1])) 27 | for key in res.keys(): 28 | data = res[key] 29 | data = np.array(data) 30 | print(key, data.mean()) 31 | 32 | if __name__ == '__main__': 33 | main() -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/params.py: -------------------------------------------------------------------------------- 1 | from pytracking import TensorList 2 | import random 3 | 4 | 5 | class TrackerParams: 6 | """Class for tracker parameters.""" 7 | def free_memory(self): 8 | for a in dir(self): 9 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 10 | getattr(self, a).free_memory() 11 | 12 | 13 | class FeatureParams: 14 | """Class for feature specific parameters""" 15 | def __init__(self, *args, **kwargs): 16 | if len(args) > 0: 17 | raise ValueError 18 | 19 | for name, val in kwargs.items(): 20 | if isinstance(val, list): 21 | setattr(self, name, TensorList(val)) 22 | else: 23 | setattr(self, name, val) 24 | 25 | 26 | def Choice(*args): 27 | """Can be used to sample random parameter values.""" 28 | return random.choice(args) 29 | -------------------------------------------------------------------------------- /SiamDW_LT/test/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('TkAgg') 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import torch 6 | 7 | 8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None): 9 | """Display a 2D tensor. 10 | args: 11 | fig_num: Figure number. 12 | title: Title of figure. 13 | """ 14 | a_np = a.squeeze().cpu().clone().detach().numpy() 15 | if a_np.ndim == 3: 16 | a_np = np.transpose(a_np, (1, 2, 0)) 17 | plt.figure(fig_num) 18 | plt.tight_layout() 19 | plt.cla() 20 | plt.imshow(a_np) 21 | plt.axis('off') 22 | plt.axis('equal') 23 | if title is not None: 24 | plt.title(title) 25 | plt.draw() 26 | plt.pause(0.001) 27 | 28 | 29 | def plot_graph(a: torch.Tensor, fig_num = None, title = None): 30 | """Plot graph. Data is a 1D tensor. 31 | args: 32 | fig_num: Figure number. 33 | title: Title of figure. 34 | """ 35 | a_np = a.squeeze().cpu().clone().detach().numpy() 36 | if a_np.ndim > 1: 37 | raise ValueError 38 | plt.figure(fig_num) 39 | # plt.tight_layout() 40 | plt.cla() 41 | plt.plot(a_np) 42 | if title is not None: 43 | plt.title(title) 44 | plt.draw() 45 | plt.pause(0.001) 46 | -------------------------------------------------------------------------------- /SiamDW_T/install.sh: -------------------------------------------------------------------------------- 1 | pip install torch==0.3.1 2 | pip install cffi 3 | pip install opencv-python 4 | pip install torchvision==0.2.1 5 | pip install pytorch_fft 6 | pip install shapely 7 | 8 | echo "****************** Installing PreROIPooling ******************" 9 | base_dir=$(pwd) 10 | cd libs/models/external/PreciseRoIPooling/pytorch/prroi_pool 11 | PATH=/usr/local/cuda/bin/:$PATH 12 | bash travis.sh 13 | cd $base_dir 14 | 15 | echo "" 16 | echo "****************** Installation complete! ******************" 17 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/__init__.py: -------------------------------------------------------------------------------- 1 | from core.libs import TensorList, TensorDict 2 | import core.libs.complex as complex 3 | import core.libs.operation as operation 4 | import core.libs.fourier as fourier 5 | import core.libs.dcf as dcf 6 | import core.libs.optimization as optimization 7 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .data import Sequence 2 | from .tracker import Tracker 3 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/evaluation/local.py: -------------------------------------------------------------------------------- 1 | import os 2 | from core.evaluation.environment import EnvSettings 3 | 4 | def local_env_settings(): 5 | settings = EnvSettings() 6 | abs_path = os.path.abspath(os.path.dirname(__file__)) 7 | # Set your local paths here. 8 | 9 | settings.network_path = os.path.join(abs_path, '../../../snapshot') # Where tracking networks are stored. 10 | settings.results_path = os.path.join(abs_path, '../../../tracking_results/') # Where to store tracking results 11 | 12 | 13 | return settings 14 | 15 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/evaluation/tracker.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import pickle 4 | from core.evaluation.environment import env_settings 5 | 6 | 7 | class Tracker: 8 | """Wraps the tracker for evaluation and running purposes. 9 | args: 10 | name: Name of tracking method. 11 | parameter_name: Name of parameter file. 12 | run_id: The run id. 13 | """ 14 | 15 | def __init__(self, name: str, parameter_name: str, exp_name: str, run_id: int = None, checkpoint_id = None, flag: str = None): 16 | self.name = name 17 | self.exp_name = exp_name 18 | self.parameter_name = parameter_name 19 | self.run_id = run_id 20 | self.flag = flag 21 | self.checkpoint_id = checkpoint_id 22 | 23 | env = env_settings() 24 | 25 | tracker_module = importlib.import_module('core.tracker.{}'.format(self.name)) 26 | 27 | self.parameters = self.get_parameters() 28 | self.tracker_class = tracker_module.get_tracker_class() 29 | 30 | self.default_visualization = getattr(self.parameters, 'visualization', False) 31 | self.default_debug = getattr(self.parameters, 'debug', 0) 32 | 33 | 34 | 35 | def get_parameters(self): 36 | """Get parameters.""" 37 | 38 | param_module = importlib.import_module('core.parameter.{}.{}'.format(self.name, self.parameter_name)) 39 | params = param_module.parameters(self.checkpoint_id) 40 | 41 | return params 42 | 43 | 44 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/experiments/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/experiments/exp.py: -------------------------------------------------------------------------------- 1 | from libs.core.evaluation import VOTDataset, TDataset, RGBTDataset, Tracker 2 | 3 | 4 | 5 | # two nets 6 | def res50_twonets(run_id): # rgb of rgbt 7 | trackers = [] 8 | for i in range(15): 9 | tracker_rgb = Tracker('improved', 'unrestore_res50_RGB', 'RGBandT', i, flag='RGB') 10 | tracker_t = Tracker('improved', 'unrestore_res50_T', 'RGBandT', i, flag='T') 11 | trackers.append([tracker_rgb, tracker_t]) 12 | dataset = TDataset() 13 | 14 | return trackers, dataset 15 | 16 | 17 | def res50_twonets_epoch(run_id): # rgb of rgbt 18 | trackers = [] 19 | for i in [14]: 20 | for j in range(1,5): 21 | tracker_rgb = Tracker('improved', 'unrestore_res50_RGB', 'RGBandT', j, i, flag='RGB') 22 | tracker_t = Tracker('improved', 'unrestore_res50_T', 'RGBandT', j, i, flag='T') 23 | trackers.append([tracker_rgb, tracker_t]) 24 | dataset = TDataset() 25 | 26 | return trackers, dataset 27 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/features/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/features/color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from libs.core.features.featurebase import FeatureBase 3 | 4 | 5 | class RGB(FeatureBase): 6 | """RGB feature normalized to [-0.5, 0.5].""" 7 | def dim(self): 8 | return 3 9 | 10 | def stride(self): 11 | return self.pool_stride 12 | 13 | def extract(self, im: torch.Tensor): 14 | return im/255 - 0.5 15 | 16 | 17 | class Grayscale(FeatureBase): 18 | """Grayscale feature normalized to [-0.5, 0.5].""" 19 | def dim(self): 20 | return 1 21 | 22 | def stride(self): 23 | return self.pool_stride 24 | 25 | def extract(self, im: torch.Tensor): 26 | return torch.mean(im/255 - 0.5, 1, keepdim=True) 27 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/features/util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from libs.core.features.featurebase import FeatureBase 3 | 4 | 5 | class Concatenate(FeatureBase): 6 | """A feature that concatenates other features. 7 | args: 8 | features: List of features to concatenate. 9 | """ 10 | def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): 11 | super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) 12 | self.features = features 13 | 14 | self.input_stride = self.features[0].stride() 15 | 16 | for feat in self.features: 17 | if self.input_stride != feat.stride(): 18 | raise ValueError('Strides for the features must be the same for a bultiresolution feature.') 19 | 20 | def dim(self): 21 | return sum([f.dim() for f in self.features]) 22 | 23 | def stride(self): 24 | return self.pool_stride * self.input_stride 25 | 26 | def extract(self, im: torch.Tensor): 27 | return torch.cat([f.get_feature(im) for f in self.features], 1) 28 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/libs/NMS/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/_ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/libs/NMS/_ext/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/_ext/nms/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from torch.utils.ffi import _wrap_function 3 | from ._nms import lib as _lib, ffi as _ffi 4 | 5 | __all__ = [] 6 | def _import_symbols(locals): 7 | for symbol in dir(_lib): 8 | fn = getattr(_lib, symbol) 9 | if callable(fn): 10 | locals[symbol] = _wrap_function(fn, _ffi) 11 | else: 12 | locals[symbol] = fn 13 | __all__.append(symbol) 14 | 15 | _import_symbols(locals()) 16 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/_ext/nms/_nms.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/libs/NMS/_ext/nms/_nms.so -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from torch.utils.ffi import create_extension 4 | 5 | 6 | sources = ['src/nms.c'] 7 | headers = ['src/nms.h'] 8 | defines = [] 9 | with_cuda = False 10 | 11 | if torch.cuda.is_available(): 12 | print('Including CUDA code.') 13 | sources += ['src/nms_cuda.c'] 14 | headers += ['src/nms_cuda.h'] 15 | defines += [('WITH_CUDA', None)] 16 | with_cuda = True 17 | 18 | this_file = os.path.dirname(os.path.realpath(__file__)) 19 | print(this_file) 20 | extra_objects = ['src/cuda/nms_kernel.cu.o'] 21 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects] 22 | 23 | ffi = create_extension( 24 | '_ext.nms', 25 | headers=headers, 26 | sources=sources, 27 | define_macros=defines, 28 | relative_to=__file__, 29 | with_cuda=with_cuda, 30 | extra_objects=extra_objects 31 | ) 32 | 33 | if __name__ == '__main__': 34 | ffi.build() 35 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/pth_nms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ._ext import nms 3 | import numpy as np 4 | 5 | def pth_nms(dets, thresh): 6 | """ 7 | dets has to be a tensor 8 | """ 9 | if not dets.is_cuda: 10 | x1 = dets[:, 0] 11 | y1 = dets[:, 1] 12 | x2 = dets[:, 2] 13 | y2 = dets[:, 3] 14 | scores = dets[:, 4] 15 | 16 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 17 | order = scores.sort(0, descending=True)[1] 18 | # order = torch.from_numpy(np.ascontiguousarray(scores.numpy().argsort()[::-1])).long() 19 | 20 | keep = torch.LongTensor(dets.size(0)) 21 | num_out = torch.LongTensor(1) 22 | nms.cpu_nms(keep, num_out, dets, order, areas, thresh) 23 | 24 | return keep[:num_out[0]] 25 | else: 26 | x1 = dets[:, 0] 27 | y1 = dets[:, 1] 28 | x2 = dets[:, 2] 29 | y2 = dets[:, 3] 30 | scores = dets[:, 4] 31 | 32 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 33 | order = scores.sort(0, descending=True)[1] 34 | # order = torch.from_numpy(np.ascontiguousarray(scores.cpu().numpy().argsort()[::-1])).long().cuda() 35 | 36 | dets = dets[order].contiguous() 37 | 38 | keep = torch.LongTensor(dets.size(0)) 39 | num_out = torch.LongTensor(1) 40 | # keep = torch.cuda.LongTensor(dets.size(0)) 41 | # num_out = torch.cuda.LongTensor(1) 42 | nms.gpu_nms(keep, num_out, dets, thresh) 43 | 44 | return order[keep[:num_out[0]].cuda()].contiguous() 45 | # return order[keep[:num_out[0]]].contiguous() 46 | 47 | 48 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/src/cuda/nms_kernel.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/libs/NMS/src/cuda/nms_kernel.cu.o -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/src/cuda/nms_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _NMS_KERNEL 2 | #define _NMS_KERNEL 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 9 | int const threadsPerBlock = sizeof(unsigned long long) * 8; 10 | 11 | void _nms(int boxes_num, float * boxes_dev, 12 | unsigned long long * mask_dev, float nms_overlap_thresh); 13 | 14 | #ifdef __cplusplus 15 | } 16 | #endif 17 | 18 | #endif 19 | 20 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/src/nms.h: -------------------------------------------------------------------------------- 1 | int cpu_nms(THLongTensor * keep_out, THLongTensor * num_out, THFloatTensor * boxes, THLongTensor * order, THFloatTensor * areas, float nms_overlap_thresh); -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/NMS/src/nms_cuda.h: -------------------------------------------------------------------------------- 1 | int gpu_nms(THLongTensor * keep_out, THLongTensor* num_out, THCudaTensor * boxes, float nms_overlap_thresh); -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/__init__.py: -------------------------------------------------------------------------------- 1 | from .tensorlist import TensorList 2 | from .tensordict import TensorDict -------------------------------------------------------------------------------- /SiamDW_T/libs/core/libs/tensordict.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import torch 3 | 4 | 5 | class TensorDict(OrderedDict): 6 | """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" 7 | 8 | def concat(self, other): 9 | """Concatenates two dicts without copying internal data.""" 10 | return TensorDict(self, **other) 11 | 12 | def copy(self): 13 | return TensorDict(super(TensorDict, self).copy()) 14 | 15 | def __getattr__(self, name): 16 | if not hasattr(torch.Tensor, name): 17 | raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) 18 | 19 | def apply_attr(*args, **kwargs): 20 | return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) 21 | return apply_attr 22 | 23 | def attribute(self, attr: str, *args): 24 | return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) 25 | 26 | def apply(self, fn, *args, **kwargs): 27 | return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) 28 | 29 | @staticmethod 30 | def _iterable(a): 31 | return isinstance(a, (TensorDict, list)) 32 | 33 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/parameter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/parameter/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/parameter/improved/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/parameter/improved/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/parameter/improved/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/parameter/improved/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/parameter/improved/__pycache__/unrestore_res50_RGB.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/parameter/improved/__pycache__/unrestore_res50_RGB.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/parameter/improved/__pycache__/unrestore_res50_T.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/parameter/improved/__pycache__/unrestore_res50_T.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .basetracker import BaseTracker -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/base/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/base/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/base/__pycache__/basetracker.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/base/__pycache__/basetracker.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/improved/__init__.py: -------------------------------------------------------------------------------- 1 | from .improved import AAS 2 | 3 | def get_tracker_class(): 4 | return AAS -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/improved/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/improved/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/improved/__pycache__/atom.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/improved/__pycache__/atom.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/tracker/improved/__pycache__/optim.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/core/tracker/improved/__pycache__/optim.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from .evaluation import * 2 | from .params import * -------------------------------------------------------------------------------- /SiamDW_T/libs/core/utils/gdrive_download: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The script taken from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html 4 | 5 | url=$1 6 | filename=$2 7 | 8 | [ -z "$url" ] && echo A URL or ID is required first argument && exit 1 9 | 10 | fileid="" 11 | declare -a patterns=("s/.*\/file\/d\/\(.*\)\/.*/\1/p" "s/.*id\=\(.*\)/\1/p" "s/\(.*\)/\1/p") 12 | for i in "${patterns[@]}" 13 | do 14 | fileid=$(echo $url | sed -n $i) 15 | [ ! -z "$fileid" ] && break 16 | done 17 | 18 | [ -z "$fileid" ] && echo Could not find Google ID && exit 1 19 | 20 | echo File ID: $fileid 21 | 22 | tmp_file="$filename.$$.file" 23 | tmp_cookies="$filename.$$.cookies" 24 | tmp_headers="$filename.$$.headers" 25 | 26 | url='https://docs.google.com/uc?export=download&id='$fileid 27 | echo Downloading: "$url > $tmp_file" 28 | wget --save-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 29 | 30 | if [[ ! $(find "$tmp_file" -type f -size +10000c 2>/dev/null) ]]; then 31 | confirm=$(cat "$tmp_file" | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p') 32 | fi 33 | 34 | if [ ! -z "$confirm" ]; then 35 | url='https://docs.google.com/uc?export=download&id='$fileid'&confirm='$confirm 36 | echo Downloading: "$url > $tmp_file" 37 | wget --load-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file" 38 | fi 39 | 40 | [ -z "$filename" ] && filename=$(cat "$tmp_headers" | sed -rn 's/.*filename=\"(.*)\".*/\1/p') 41 | [ -z "$filename" ] && filename="google_drive.file" 42 | 43 | echo Moving: "$tmp_file > $filename" 44 | 45 | mv "$tmp_file" "$filename" 46 | 47 | rm -f "$tmp_cookies" "$tmp_headers" 48 | 49 | echo Saved: "$filename" 50 | echo DONE! 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/utils/params.py: -------------------------------------------------------------------------------- 1 | from core import TensorList 2 | import random 3 | 4 | 5 | class TrackerParams: 6 | """Class for tracker parameters.""" 7 | def free_memory(self): 8 | for a in dir(self): 9 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'): 10 | getattr(self, a).free_memory() 11 | 12 | 13 | class FeatureParams: 14 | """Class for feature specific parameters""" 15 | def __init__(self, *args, **kwargs): 16 | if len(args) > 0: 17 | raise ValueError 18 | 19 | for name, val in kwargs.items(): 20 | if isinstance(val, list): 21 | setattr(self, name, TensorList(val)) 22 | else: 23 | setattr(self, name, val) 24 | 25 | 26 | def Choice(*args): 27 | """Can be used to sample random parameter values.""" 28 | return random.choice(args) 29 | -------------------------------------------------------------------------------- /SiamDW_T/libs/core/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('TkAgg') 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import torch 6 | 7 | 8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None): 9 | """Display a 2D tensor. 10 | args: 11 | fig_num: Figure number. 12 | title: Title of figure. 13 | """ 14 | a_np = a.squeeze().cpu().clone().detach().numpy() 15 | if a_np.ndim == 3: 16 | a_np = np.transpose(a_np, (1, 2, 0)) 17 | plt.figure(fig_num) 18 | plt.tight_layout() 19 | plt.cla() 20 | plt.imshow(a_np) 21 | plt.axis('off') 22 | plt.axis('equal') 23 | if title is not None: 24 | plt.title(title) 25 | plt.draw() 26 | plt.pause(0.001) 27 | 28 | 29 | def plot_graph(a: torch.Tensor, fig_num = None, title = None): 30 | """Plot graph. Data is a 1D tensor. 31 | args: 32 | fig_num: Figure number. 33 | title: Title of figure. 34 | """ 35 | a_np = a.squeeze().cpu().clone().detach().numpy() 36 | if a_np.ndim > 1: 37 | raise ValueError 38 | plt.figure(fig_num) 39 | # plt.tight_layout() 40 | plt.cla() 41 | plt.plot(a_np) 42 | if title is not None: 43 | plt.title(title) 44 | plt.draw() 45 | plt.pause(0.001) 46 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .admin.loading import load_network 2 | from .admin.model_constructor import model_constructor -------------------------------------------------------------------------------- /SiamDW_T/libs/models/actors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_actor import BaseActor 2 | from .bbreg import AASActor 3 | from .bbreg_para import AASParaActor 4 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/actors/base_actor.py: -------------------------------------------------------------------------------- 1 | from libs.core import TensorDict 2 | 3 | 4 | class BaseActor: 5 | """ Base class for actor. The actor class handles the passing of the data through the network 6 | and calculation the loss""" 7 | def __init__(self, net, objective): 8 | """ 9 | args: 10 | net - The network to train 11 | objective - The loss function 12 | """ 13 | self.net = net 14 | self.objective = objective 15 | 16 | def __call__(self, data: TensorDict): 17 | """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and 18 | return the training stats for the input data 19 | args: 20 | data - A TensorDict containing all the necessary data blocks. 21 | 22 | returns: 23 | loss - loss for the input data 24 | stats - a dict containing detailed losses 25 | """ 26 | raise NotImplementedError 27 | 28 | def to(self, device): 29 | """ Move the network to device 30 | args: 31 | device - device to use. 'cpu' or 'cuda' 32 | """ 33 | self.net.to(device) 34 | 35 | def train(self, mode=True): 36 | """ Set whether the network is in train mode. 37 | args: 38 | mode (True) - Bool specifying whether in training mode. 39 | """ 40 | self.net.train(mode) 41 | 42 | def eval(self): 43 | """ Set network to eval mode""" 44 | self.train(False) -------------------------------------------------------------------------------- /SiamDW_T/libs/models/actors/bbreg.py: -------------------------------------------------------------------------------- 1 | from . import BaseActor 2 | 3 | 4 | class AASActor(BaseActor): 5 | 6 | def __call__(self, data): 7 | """ 8 | args: 9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 10 | 'test_proposals' and 'proposal_iou'. 11 | 12 | returns: 13 | loss - the training loss 14 | states - dict containing detailed losses 15 | """ 16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals' 17 | iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) 18 | 19 | iou_pred = iou_pred.view(-1, iou_pred.shape[2]) 20 | iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2]) 21 | 22 | # Compute loss 23 | loss = self.objective(iou_pred, iou_gt) 24 | 25 | # Return training stats 26 | stats = {'Loss/total': loss.item(), 27 | 'Loss/iou': loss.item()} 28 | 29 | return loss, stats -------------------------------------------------------------------------------- /SiamDW_T/libs/models/actors/bbreg_para.py: -------------------------------------------------------------------------------- 1 | from . import BaseActor 2 | import torch 3 | 4 | 5 | class AASParaActor(BaseActor): 6 | def __call__(self, data): 7 | """ 8 | args: 9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 10 | 'test_proposals' and 'proposal_iou'. 11 | 12 | returns: 13 | loss - the training loss 14 | states - dict containing detailed losses 15 | """ 16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals' 17 | iou_pred = self.net(data['rgb_train_images'].squeeze(), data['rgb_test_images'].squeeze(), data['t_train_images'].squeeze(), 18 | data['t_test_images'].squeeze(), data['rgb_train_anno'].squeeze(), 19 | data['rgb_test_proposals'].squeeze(), data['t_train_anno'].squeeze(), data['t_test_proposals'].squeeze()) 20 | 21 | iou_pred = iou_pred.view(-1, iou_pred.shape[2]) 22 | 23 | temp_gt = torch.cat((data['rgb_proposal_iou'], data['t_proposal_iou']), dim=-1) 24 | iou_gt = temp_gt.view(-1, temp_gt.shape[2]) 25 | # iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2]) 26 | 27 | # Compute loss 28 | loss = self.objective(iou_pred, iou_gt) 29 | 30 | # Return training stats 31 | stats = {'Loss/total': loss.item(), 32 | 'Loss/iou': loss.item()} 33 | 34 | return loss, stats -------------------------------------------------------------------------------- /SiamDW_T/libs/models/admin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/admin/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/models/admin/local.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | class EnvironmentSettings: 4 | def __init__(self): 5 | self.workspace_dir = osp.join('saved_checkpoint') # Base directory for saving network checkpoints. 6 | self.tensorboard_dir = self.workspace_dir + '/tensorboard/' # Directory for tensorboard files. 7 | self.gtot_dir = '' 8 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/admin/settings.py: -------------------------------------------------------------------------------- 1 | from models.admin.environment import env_settings 2 | 3 | 4 | class Settings: 5 | """ Training settings, e.g. the paths to datasets and networks.""" 6 | def __init__(self): 7 | self.set_default() 8 | 9 | def set_default(self): 10 | self.env = env_settings() 11 | self.use_gpu = True 12 | 13 | 14 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .loader import modelsLoader 2 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/data/image_loader.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | # import jpeg4py 3 | 4 | def default_image_loader(path): 5 | """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, 6 | but reverts to the opencv_loader if the former is not available.""" 7 | # if default_image_loader.use_jpeg4py is None: 8 | # # Try using jpeg4py 9 | # im = jpeg4py_loader(path) 10 | # if im is None: 11 | # default_image_loader.use_jpeg4py = False 12 | # print('Using opencv_loader instead.') 13 | # else: 14 | # default_image_loader.use_jpeg4py = True 15 | # return im 16 | # if default_image_loader.use_jpeg4py: 17 | # return jpeg4py_loader(path) 18 | return opencv_loader(path) 19 | 20 | default_image_loader.use_jpeg4py = None 21 | 22 | 23 | def jpeg4py_loader(path): 24 | """ Image reading using jpeg4py (https://github.com/ajkxyz/jpeg4py)""" 25 | try: 26 | return jpeg4py.JPEG(path).decode() 27 | except Exception as e: 28 | print('ERROR: Could not read image "{}"'.format(path)) 29 | print(e) 30 | return None 31 | 32 | 33 | def opencv_loader(path): 34 | """ Read image using opencv's imread function and returns it in rgb format""" 35 | # for depth image 36 | 37 | im = cv.imread(path, cv.IMREAD_COLOR) 38 | # convert to rgb and return 39 | return cv.cvtColor(im, cv.COLOR_BGR2RGB) 40 | 41 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jiayuan Mao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/_assets/prroi_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/external/PreciseRoIPooling/_assets/prroi_visualization.png -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | /_prroi_pooling 3 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : __init__.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | from .prroi_pool import * 13 | 14 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/build.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : build.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import os 13 | import torch 14 | 15 | from torch.utils.ffi import create_extension 16 | 17 | headers = [] 18 | sources = [] 19 | defines = [] 20 | extra_objects = [] 21 | with_cuda = False 22 | 23 | if torch.cuda.is_available(): 24 | with_cuda = True 25 | 26 | headers+= ['src/prroi_pooling_gpu.h'] 27 | sources += ['src/prroi_pooling_gpu.c'] 28 | defines += [('WITH_CUDA', None)] 29 | 30 | this_file = os.path.dirname(os.path.realpath(__file__)) 31 | extra_objects_cuda = ['src/prroi_pooling_gpu_impl.cu.o'] 32 | extra_objects_cuda = [os.path.join(this_file, fname) for fname in extra_objects_cuda] 33 | extra_objects.extend(extra_objects_cuda) 34 | else: 35 | # TODO(Jiayuan Mao @ 07/13): remove this restriction after we support the cpu implementation. 36 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') 37 | 38 | ffi = create_extension( 39 | '_prroi_pooling', 40 | headers=headers, 41 | sources=sources, 42 | define_macros=defines, 43 | relative_to=__file__, 44 | with_cuda=with_cuda, 45 | extra_objects=extra_objects 46 | ) 47 | 48 | if __name__ == '__main__': 49 | ffi.build() 50 | 51 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : prroi_pool.py 4 | # Author : Jiayuan Mao, Tete Xiao 5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 6 | # Date : 07/13/2018 7 | # 8 | # This file is part of PreciseRoIPooling. 9 | # Distributed under terms of the MIT license. 10 | # Copyright (c) 2017 Megvii Technology Limited. 11 | 12 | import torch.nn as nn 13 | 14 | from .functional import prroi_pool2d 15 | 16 | __all__ = ['PrRoIPool2D'] 17 | 18 | 19 | class PrRoIPool2D(nn.Module): 20 | def __init__(self, pooled_height, pooled_width, spatial_scale): 21 | super().__init__() 22 | 23 | self.pooled_height = int(pooled_height) 24 | self.pooled_width = int(pooled_width) 25 | self.spatial_scale = float(spatial_scale) 26 | 27 | def forward(self, features, rois): 28 | return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) 29 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * File : prroi_pooling_gpu.h 3 | * Author : Jiayuan Mao, Tete Xiao 4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com 5 | * Date : 07/13/2018 6 | * 7 | * Distributed under terms of the MIT license. 8 | * Copyright (c) 2017 Megvii Technology Limited. 9 | */ 10 | 11 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); 12 | 13 | int prroi_pooling_backward_cuda( 14 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 15 | int pooled_height, int pooled_width, float spatial_scale 16 | ); 17 | 18 | int prroi_pooling_coor_backward_cuda( 19 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, 20 | int pooled_height, int pooled_width, float spatial_scal 21 | ); 22 | 23 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/external/PreciseRoIPooling/pytorch/prroi_pool/travis.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | # File : travis.sh 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # 6 | # Distributed under terms of the MIT license. 7 | # Copyright (c) 2017 Megvii Technology Limited. 8 | 9 | cd src 10 | echo "Working directory: " `pwd` 11 | echo "Compiling prroi_pooling kernels by nvcc..." 12 | nvcc -c -o prroi_pooling_gpu_impl.cu.o prroi_pooling_gpu_impl.cu -x cu -Xcompiler -fPIC -arch=sm_52 13 | 14 | cd ../ 15 | echo "Working directory: " `pwd` 16 | echo "Building python libraries..." 17 | python3 build.py 18 | 19 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import * 2 | from .resnet18_vggm import * 3 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/backbone/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/backbone/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/backbone/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/backbone/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/backbone/__pycache__/resnet18_vggm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/backbone/__pycache__/resnet18_vggm.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/bbreg/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .aas_iou_net import RGBIoUNet 3 | from .aas_iou_net import TIoUNet 4 | from .aas_iou_net import iou_predictor 5 | -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/bbreg/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/bbreg/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/bbreg/__pycache__/atom.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/bbreg/__pycache__/atom.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/bbreg/__pycache__/atom_iou_net.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/bbreg/__pycache__/atom_iou_net.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/layers/__init__.py -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/layers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/layers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/layers/__pycache__/blocks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/researchmm/VOT2019/eaf84c2b58a8ed3ff6ca464dcfdd52519507ae36/SiamDW_T/libs/models/models/layers/__pycache__/blocks.cpython-36.pyc -------------------------------------------------------------------------------- /SiamDW_T/libs/models/models/layers/blocks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, 5 | batch_norm=True, relu=True): 6 | layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, 7 | padding=padding, dilation=dilation, bias=bias)] 8 | if batch_norm: 9 | layers.append(nn.BatchNorm2d(out_planes)) 10 | if relu: 11 | layers.append(nn.ReLU(inplace=True)) 12 | return nn.Sequential(*layers) 13 | 14 | 15 | class LinearBlock(nn.Module): 16 | def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): 17 | super().__init__() 18 | self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) 19 | self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None 20 | self.relu = nn.ReLU(inplace=True) if relu else None 21 | 22 | def forward(self, x): 23 | x = self.linear(x.view(x.shape[0], -1)) 24 | if self.bn is not None: 25 | x = self.bn(x.view(x.shape[0], x.shape[1], 1, 1)) 26 | if self.relu is not None: 27 | x = self.relu(x) 28 | return x.view(x.shape[0], -1) -------------------------------------------------------------------------------- /SiamDW_T/rgbt_tracking/_init_paths.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Zhipeng Zhang (zhangzhipeng2017@ia.ac.cn) 5 | # Details: import other paths 6 | # ------------------------------------------------------------------------------ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | 12 | import os.path as osp 13 | import sys 14 | 15 | 16 | def add_path(path): 17 | if path not in sys.path: 18 | sys.path.insert(0, path) 19 | 20 | 21 | this_dir = osp.dirname(__file__) 22 | 23 | lib_path = osp.join(this_dir, '../libs') 24 | add_path(lib_path) 25 | --------------------------------------------------------------------------------