├── README.md ├── SiamAPN++ ├── LICENSE ├── experiments │ └── config.yaml ├── pretrained_models │ └── alexnet-bn.pth ├── pysot │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── __init__.cpython-38.pyc │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── config.cpython-37.pyc │ │ │ ├── config.cpython-38.pyc │ │ │ ├── config_adapn.cpython-37.pyc │ │ │ ├── config_adapn.cpython-38.pyc │ │ │ ├── config_apn.cpython-37.pyc │ │ │ └── config_apn.cpython-38.pyc │ │ └── config_adapn.py │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── anchortarget.cpython-37.pyc │ │ │ ├── anchortarget.cpython-38.pyc │ │ │ ├── anchortarget_adapn.cpython-37.pyc │ │ │ ├── anchortarget_adapn.cpython-38.pyc │ │ │ ├── anchortarget_apn.cpython-37.pyc │ │ │ ├── anchortarget_apn.cpython-38.pyc │ │ │ ├── augmentation.cpython-38.pyc │ │ │ ├── dataset.cpython-38.pyc │ │ │ ├── dataset_adapn.cpython-38.pyc │ │ │ └── dataset_apn.cpython-38.pyc │ │ ├── anchortarget_adapn.py │ │ ├── augmentation.py │ │ └── dataset_adapn.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── loss.cpython-37.pyc │ │ │ ├── loss.cpython-38.pyc │ │ │ ├── loss_adapn.cpython-37.pyc │ │ │ ├── loss_adapn.cpython-38.pyc │ │ │ ├── loss_apn.cpython-37.pyc │ │ │ ├── loss_apn.cpython-38.pyc │ │ │ ├── loss_car.cpython-38.pyc │ │ │ ├── model_builder.cpython-38.pyc │ │ │ ├── model_builder_adapn.cpython-37.pyc │ │ │ ├── model_builder_adapn.cpython-38.pyc │ │ │ ├── model_builder_apn.cpython-37.pyc │ │ │ ├── model_builder_apn.cpython-38.pyc │ │ │ ├── newalexnet.cpython-38.pyc │ │ │ ├── newbackbone.cpython-38.pyc │ │ │ ├── utile.cpython-38.pyc │ │ │ ├── utile_adapn.cpython-37.pyc │ │ │ ├── utile_adapn.cpython-38.pyc │ │ │ ├── utile_apn.cpython-37.pyc │ │ │ └── utile_apn.cpython-38.pyc │ │ ├── backbone │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── alexnet.cpython-37.pyc │ │ │ │ ├── alexnet.cpython-38.pyc │ │ │ │ ├── mobile_v2.cpython-37.pyc │ │ │ │ ├── mobile_v2.cpython-38.pyc │ │ │ │ ├── newalexnet.cpython-37.pyc │ │ │ │ ├── newalexnet.cpython-38.pyc │ │ │ │ ├── resnet_atrous.cpython-37.pyc │ │ │ │ └── resnet_atrous.cpython-38.pyc │ │ │ └── alexnet.py │ │ ├── init_weight.py │ │ ├── loss_adapn.py │ │ ├── model_builder_adapn.py │ │ └── utile_adapn.py │ ├── tracker │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── adsiamapn_tracker.cpython-37.pyc │ │ │ ├── adsiamapn_tracker.cpython-38.pyc │ │ │ ├── base_tracker.cpython-37.pyc │ │ │ ├── base_tracker.cpython-38.pyc │ │ │ ├── dsiamrpn_tracker.cpython-38.pyc │ │ │ ├── siamapn_tracker.cpython-37.pyc │ │ │ └── siamapn_tracker.cpython-38.pyc │ │ ├── adsiamapn_tracker.py │ │ └── base_tracker.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── average_meter.cpython-38.pyc │ │ ├── bbox.cpython-37.pyc │ │ ├── bbox.cpython-38.pyc │ │ ├── distributed.cpython-38.pyc │ │ ├── location_grid.cpython-38.pyc │ │ ├── log_helper.cpython-38.pyc │ │ ├── lr_scheduler.cpython-38.pyc │ │ ├── lr_scheduler_adapn.cpython-38.pyc │ │ ├── lr_scheduler_apn.cpython-38.pyc │ │ ├── misc.cpython-38.pyc │ │ ├── model_load.cpython-37.pyc │ │ ├── model_load.cpython-38.pyc │ │ └── xcorr.cpython-38.pyc │ │ ├── average_meter.py │ │ ├── bbox.py │ │ ├── distributed.py │ │ ├── location_grid.py │ │ ├── log_helper.py │ │ ├── lr_scheduler_adapn.py │ │ ├── misc.py │ │ ├── model_load.py │ │ └── xcorr.py ├── toolkit │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── __init__.cpython-38.pyc │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── dataset.cpython-37.pyc │ │ │ ├── dataset.cpython-38.pyc │ │ │ ├── dtb.cpython-38.pyc │ │ │ ├── got10k.cpython-38.pyc │ │ │ ├── lasot.cpython-38.pyc │ │ │ ├── otb.cpython-38.pyc │ │ │ ├── uav.cpython-38.pyc │ │ │ ├── uav10fps.cpython-37.pyc │ │ │ ├── uav10fps.cpython-38.pyc │ │ │ ├── uav20l.cpython-37.pyc │ │ │ ├── uav20l.cpython-38.pyc │ │ │ ├── uavdt.cpython-38.pyc │ │ │ ├── v4r.cpython-37.pyc │ │ │ ├── v4r.cpython-38.pyc │ │ │ ├── video.cpython-37.pyc │ │ │ ├── video.cpython-38.pyc │ │ │ ├── visdrone.cpython-38.pyc │ │ │ ├── visdrone1.cpython-37.pyc │ │ │ └── visdrone1.cpython-38.pyc │ │ ├── dataset.py │ │ ├── uav10fps.py │ │ ├── uav20l.py │ │ └── video.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── ope_benchmark.cpython-38.pyc │ │ └── ope_benchmark.py │ ├── utils │ │ ├── __pycache__ │ │ │ └── statistics.cpython-38.pyc │ │ ├── region.pyx │ │ └── statistics.py │ └── visualization │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── draw_success_precision.cpython-38.pyc │ │ └── draw_utils.cpython-38.pyc │ │ ├── draw_success_precision.py │ │ └── draw_utils.py ├── tools │ ├── demo_apn++.py │ ├── eval.py │ ├── test.py │ └── train_apn++.py └── training_dataset │ ├── coco │ ├── gen_json.py │ ├── par_crop.py │ ├── pycocotools │ │ ├── Makefile │ │ ├── __init__.py │ │ ├── _mask.pyx │ │ ├── coco.py │ │ ├── cocoeval.py │ │ ├── common │ │ │ ├── gason.cpp │ │ │ ├── gason.h │ │ │ ├── maskApi.c │ │ │ └── maskApi.h │ │ ├── mask.py │ │ └── setup.py │ ├── readme.md │ └── visual.py │ ├── got10k │ ├── gen_json.py │ ├── par_crop.py │ └── readme.md │ ├── vid │ ├── gen_json.py │ ├── par_crop.py │ ├── parse_vid.py │ ├── readme.md │ └── visual.py │ └── yt_bb │ ├── checknum.py │ ├── gen_json.py │ ├── par_crop.py │ ├── readme.md │ └── visual.py ├── SiamAPN ├── LICENSE ├── experiments │ └── config.yaml ├── pretrained_models │ └── alexnet-bn.pth ├── pysot │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── __init__.cpython-38.pyc │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── config.cpython-37.pyc │ │ │ ├── config.cpython-38.pyc │ │ │ ├── config_adapn.cpython-37.pyc │ │ │ ├── config_adapn.cpython-38.pyc │ │ │ ├── config_apn.cpython-37.pyc │ │ │ └── config_apn.cpython-38.pyc │ │ └── config_apn.py │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── anchortarget.cpython-37.pyc │ │ │ ├── anchortarget.cpython-38.pyc │ │ │ ├── anchortarget_adapn.cpython-37.pyc │ │ │ ├── anchortarget_adapn.cpython-38.pyc │ │ │ ├── anchortarget_apn.cpython-37.pyc │ │ │ ├── anchortarget_apn.cpython-38.pyc │ │ │ ├── augmentation.cpython-38.pyc │ │ │ ├── dataset.cpython-38.pyc │ │ │ ├── dataset_adapn.cpython-38.pyc │ │ │ └── dataset_apn.cpython-38.pyc │ │ ├── anchortarget_apn.py │ │ ├── augmentation.py │ │ └── dataset_apn.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── loss.cpython-37.pyc │ │ │ ├── loss.cpython-38.pyc │ │ │ ├── loss_adapn.cpython-37.pyc │ │ │ ├── loss_adapn.cpython-38.pyc │ │ │ ├── loss_apn.cpython-37.pyc │ │ │ ├── loss_apn.cpython-38.pyc │ │ │ ├── loss_car.cpython-38.pyc │ │ │ ├── model_builder.cpython-38.pyc │ │ │ ├── model_builder_adapn.cpython-37.pyc │ │ │ ├── model_builder_adapn.cpython-38.pyc │ │ │ ├── model_builder_apn.cpython-37.pyc │ │ │ ├── model_builder_apn.cpython-38.pyc │ │ │ ├── newalexnet.cpython-38.pyc │ │ │ ├── newbackbone.cpython-38.pyc │ │ │ ├── utile.cpython-38.pyc │ │ │ ├── utile_adapn.cpython-37.pyc │ │ │ ├── utile_adapn.cpython-38.pyc │ │ │ ├── utile_apn.cpython-37.pyc │ │ │ └── utile_apn.cpython-38.pyc │ │ ├── backbone │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── alexnet.cpython-37.pyc │ │ │ │ ├── alexnet.cpython-38.pyc │ │ │ │ ├── mobile_v2.cpython-37.pyc │ │ │ │ ├── mobile_v2.cpython-38.pyc │ │ │ │ ├── newalexnet.cpython-37.pyc │ │ │ │ ├── newalexnet.cpython-38.pyc │ │ │ │ ├── resnet_atrous.cpython-37.pyc │ │ │ │ └── resnet_atrous.cpython-38.pyc │ │ │ └── alexnet.py │ │ ├── init_weight.py │ │ ├── loss_apn.py │ │ ├── model_builder_apn.py │ │ └── utile_apn.py │ ├── tracker │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── adsiamapn_tracker.cpython-37.pyc │ │ │ ├── adsiamapn_tracker.cpython-38.pyc │ │ │ ├── base_tracker.cpython-37.pyc │ │ │ ├── base_tracker.cpython-38.pyc │ │ │ ├── dsiamrpn_tracker.cpython-38.pyc │ │ │ ├── siamapn_tracker.cpython-37.pyc │ │ │ └── siamapn_tracker.cpython-38.pyc │ │ ├── base_tracker.py │ │ └── siamapn_tracker.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── average_meter.cpython-38.pyc │ │ ├── bbox.cpython-37.pyc │ │ ├── bbox.cpython-38.pyc │ │ ├── distributed.cpython-38.pyc │ │ ├── location_grid.cpython-38.pyc │ │ ├── log_helper.cpython-38.pyc │ │ ├── lr_scheduler.cpython-38.pyc │ │ ├── lr_scheduler_adapn.cpython-38.pyc │ │ ├── lr_scheduler_apn.cpython-38.pyc │ │ ├── misc.cpython-38.pyc │ │ ├── model_load.cpython-37.pyc │ │ ├── model_load.cpython-38.pyc │ │ └── xcorr.cpython-38.pyc │ │ ├── average_meter.py │ │ ├── bbox.py │ │ ├── distributed.py │ │ ├── location_grid.py │ │ ├── log_helper.py │ │ ├── lr_scheduler_apn.py │ │ ├── misc.py │ │ ├── model_load.py │ │ └── xcorr.py ├── toolkit │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── __init__.cpython-38.pyc │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── dataset.cpython-37.pyc │ │ │ ├── dataset.cpython-38.pyc │ │ │ ├── dtb.cpython-38.pyc │ │ │ ├── got10k.cpython-38.pyc │ │ │ ├── lasot.cpython-38.pyc │ │ │ ├── otb.cpython-38.pyc │ │ │ ├── uav.cpython-38.pyc │ │ │ ├── uav10fps.cpython-37.pyc │ │ │ ├── uav10fps.cpython-38.pyc │ │ │ ├── uav20l.cpython-37.pyc │ │ │ ├── uav20l.cpython-38.pyc │ │ │ ├── uavdt.cpython-38.pyc │ │ │ ├── v4r.cpython-37.pyc │ │ │ ├── v4r.cpython-38.pyc │ │ │ ├── video.cpython-37.pyc │ │ │ ├── video.cpython-38.pyc │ │ │ ├── visdrone.cpython-38.pyc │ │ │ ├── visdrone1.cpython-37.pyc │ │ │ └── visdrone1.cpython-38.pyc │ │ ├── dataset.py │ │ ├── uav10fps.py │ │ ├── uav20l.py │ │ ├── v4r.py │ │ ├── video.py │ │ └── visdrone1.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── ope_benchmark.cpython-38.pyc │ │ └── ope_benchmark.py │ ├── utils │ │ ├── __pycache__ │ │ │ └── statistics.cpython-38.pyc │ │ ├── region.pyx │ │ └── statistics.py │ └── visualization │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── draw_success_precision.cpython-38.pyc │ │ └── draw_utils.cpython-38.pyc │ │ ├── draw_success_precision.py │ │ └── draw_utils.py ├── tools │ ├── demo_apn.py │ ├── eval.py │ ├── test.py │ └── train_apn.py └── training_dataset │ ├── coco │ ├── gen_json.py │ ├── par_crop.py │ ├── pycocotools │ │ ├── Makefile │ │ ├── __init__.py │ │ ├── _mask.pyx │ │ ├── coco.py │ │ ├── cocoeval.py │ │ ├── common │ │ │ ├── gason.cpp │ │ │ ├── gason.h │ │ │ ├── maskApi.c │ │ │ └── maskApi.h │ │ ├── mask.py │ │ └── setup.py │ ├── readme.md │ └── visual.py │ ├── got10k │ ├── gen_json.py │ ├── par_crop.py │ └── readme.md │ ├── vid │ ├── gen_json.py │ ├── par_crop.py │ ├── parse_vid.py │ ├── readme.md │ └── visual.py │ └── yt_bb │ ├── checknum.py │ ├── gen_json.py │ ├── par_crop.py │ ├── readme.md │ └── visual.py ├── UAVTrack112 └── README.md └── requirement.txt /SiamAPN++/experiments/config.yaml: -------------------------------------------------------------------------------- 1 | META_ARC: "ADSiamAPN_alexnet" 2 | 3 | BACKBONE: 4 | TYPE: "alexnet" 5 | PRETRAINED: 'alexnet-bn.pth' 6 | TRAIN_LAYERS: ['layer3','layer4','layer5'] 7 | TRAIN_EPOCH: 10 8 | LAYERS_LR: 0.1 9 | 10 | TRACK: 11 | TYPE: 'ADSiamAPNtracker' 12 | EXEMPLAR_SIZE: 127 13 | INSTANCE_SIZE: 287 14 | CONTEXT_AMOUNT: 0.5 15 | STRIDE: 8 16 | w1: 1.2 17 | w2: 1.3 18 | w3: 1.1 19 | 20 | TRAIN: 21 | EPOCH: 50 22 | START_EPOCH: 0 23 | epochthrelod: 0 24 | SEARCH_SIZE: 287 25 | BATCH_SIZE: 220 26 | NUM_GPU: 2 27 | BASE_LR: 0.005 28 | RESUME: '' 29 | WEIGHT_DECAY : 0.0001 30 | PRETRAINED: '' 31 | OUTPUT_SIZE: 21 32 | NUM_WORKERS: 7 33 | LOC_WEIGHT: 2.2 34 | CLS_WEIGHT: 1.0 35 | SHAPE_WEIGHT: 1.8 36 | w1: 1.2 37 | w2: 1.3 38 | w3: 1.1 39 | w4: 1.5 40 | w5: 1.0 41 | 42 | POS_NUM : 16 43 | TOTAL_NUM : 64 44 | NEG_NUM : 16 45 | LARGER: 1.0 46 | range : 1.0 47 | LR: 48 | TYPE: 'log' 49 | KWARGS: 50 | start_lr: 0.01 51 | end_lr: 0.0005 52 | 53 | LR_WARMUP: 54 | TYPE: 'step' 55 | EPOCH: 5 56 | KWARGS: 57 | start_lr: 0.005 58 | end_lr: 0.01 59 | step: 1 60 | 61 | DATASET: 62 | NAMES: 63 | - 'VID' 64 | - 'COCO' 65 | - 'GOT' 66 | - 'YOUTUBEBB' 67 | 68 | 69 | TEMPLATE: 70 | SHIFT: 4 71 | SCALE: 0.05 72 | BLUR: 0.0 73 | FLIP: 0.0 74 | COLOR: 1.0 75 | 76 | SEARCH: 77 | SHIFT: 64 78 | SCALE: 0.18 79 | BLUR: 0.2 80 | FLIP: 0.0 81 | COLOR: 1.0 82 | 83 | NEG: 0.05 84 | GRAY: 0.0 85 | -------------------------------------------------------------------------------- /SiamAPN++/pretrained_models/alexnet-bn.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pretrained_models/alexnet-bn.pth -------------------------------------------------------------------------------- /SiamAPN++/pysot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/core/__pycache__/config_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/core/__pycache__/config_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/anchortarget_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/anchortarget_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/augmentation.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/augmentation.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/dataset.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/dataset_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/dataset_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/datasets/__pycache__/dataset_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/datasets/__pycache__/dataset_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/loss_car.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/loss_car.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/model_builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/model_builder.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/model_builder_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/model_builder_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/model_builder_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/model_builder_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/model_builder_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/model_builder_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/model_builder_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/model_builder_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/newalexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/newalexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/newbackbone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/newbackbone.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/utile.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/utile.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/utile_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/utile_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/utile_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/utile_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/utile_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/utile_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/__pycache__/utile_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/__pycache__/utile_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/alexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/alexnet.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/alexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/alexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/mobile_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/mobile_v2.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/mobile_v2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/mobile_v2.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/newalexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/newalexnet.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/newalexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/newalexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/__pycache__/resnet_atrous.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/models/backbone/__pycache__/resnet_atrous.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/backbone/alexnet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class AlexNet(nn.Module): 5 | configs = [3, 96, 256, 384, 384, 256] 6 | 7 | def __init__(self, width_mult=1): 8 | configs = list(map(lambda x: 3 if x == 3 else 9 | int(x*width_mult), AlexNet.configs)) 10 | super(AlexNet, self).__init__() 11 | self.layer1 = nn.Sequential( 12 | nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2), 13 | nn.BatchNorm2d(configs[1]), 14 | nn.MaxPool2d(kernel_size=3, stride=2), 15 | nn.ReLU(inplace=True), 16 | ) 17 | self.layer2 = nn.Sequential( 18 | nn.Conv2d(configs[1], configs[2], kernel_size=5), 19 | nn.BatchNorm2d(configs[2]), 20 | nn.MaxPool2d(kernel_size=3, stride=2), 21 | nn.ReLU(inplace=True), 22 | ) 23 | self.layer3 = nn.Sequential( 24 | nn.Conv2d(configs[2], configs[3], kernel_size=3), 25 | nn.BatchNorm2d(configs[3]), 26 | nn.ReLU(inplace=True), 27 | ) 28 | self.layer4 = nn.Sequential( 29 | nn.Conv2d(configs[3], configs[4], kernel_size=3), 30 | nn.BatchNorm2d(configs[4]), 31 | nn.ReLU(inplace=True), 32 | ) 33 | 34 | self.layer5 = nn.Sequential( 35 | nn.Conv2d(configs[4], configs[5], kernel_size=3), 36 | nn.BatchNorm2d(configs[5]), 37 | ) 38 | self.feature_size = configs[5] 39 | for param in self.layer1.parameters(): 40 | param.requires_grad = False 41 | for param in self.layer2.parameters(): 42 | param.requires_grad = False 43 | 44 | def forward(self, x): 45 | x = self.layer1(x) 46 | x = self.layer2(x) 47 | x = self.layer3(x) 48 | x1 = self.layer4(x) 49 | x = self.layer5(x1) 50 | return x1,x 51 | 52 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/init_weight.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def init_weights(model): 5 | for m in model.modules(): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight.data, 8 | mode='fan_out', 9 | nonlinearity='relu') 10 | elif isinstance(m, nn.BatchNorm2d): 11 | m.weight.data.fill_(1) 12 | m.bias.data.zero_() 13 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/models/loss_adapn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | from torch import nn 8 | 9 | import torch 10 | import torch.nn.functional as F 11 | 12 | 13 | def get_cls_loss(pred, label, select): 14 | if len(select.size()) == 0 or \ 15 | select.size() == torch.Size([0]): 16 | return 0 17 | pred = torch.index_select(pred, 0, select) 18 | label = torch.index_select(label, 0, select) 19 | label=label.long() 20 | return F.nll_loss(pred, label) 21 | 22 | 23 | def select_cross_entropy_loss(pred, label): 24 | pred = pred.view(-1, 2) 25 | label = label.view(-1) 26 | pos = label.data.eq(1).nonzero(as_tuple =False).squeeze().cuda() 27 | neg = label.data.eq(0).nonzero(as_tuple =False).squeeze().cuda() 28 | loss_pos = get_cls_loss(pred, label, pos) 29 | loss_neg = get_cls_loss(pred, label, neg) 30 | return loss_pos * 0.5 + loss_neg * 0.5 31 | 32 | def shaloss(pre,label,weight): 33 | loss1=torch.abs(pre-label) 34 | num=torch.where(loss1<0.04) 35 | loss1[num]=25*loss1[num]**2 36 | loss=((loss1)*weight).sum()/(weight).sum() 37 | return loss 38 | 39 | def weight_l1_loss(pred_loc, label_loc, loss_weight): 40 | b, _, sh, sw = pred_loc.size() 41 | pred_loc = pred_loc.view(b, 4, -1, sh, sw) 42 | diff = ((pred_loc - label_loc).abs()) 43 | diff[torch.where(diff<0.001)]=1000*diff[torch.where(diff<0.001)]**2 44 | diff = diff.sum(dim=1).view(b, -1, sh, sw) 45 | loss = diff * loss_weight 46 | return loss.sum().div(b) 47 | 48 | class IOULoss(nn.Module): 49 | def forward(self, pred, target, weight=None): 50 | 51 | pred_left = pred[:,:, 0] 52 | pred_top = pred[:,:, 1] 53 | pred_right = pred[:,:, 2] 54 | pred_bottom = pred[:,:, 3] 55 | 56 | target_left = target[:,:, 0] 57 | target_top = target[:,:, 1] 58 | target_right = target[:,:, 2] 59 | target_bottom = target[:,:, 3] 60 | 61 | target_aera = (target_right-target_left ) * \ 62 | (target_bottom-target_top) 63 | pred_aera = (pred_right-pred_left ) * \ 64 | (pred_bottom-pred_top) 65 | 66 | w_intersect = torch.min(pred_right, target_right)-torch.max(pred_left, target_left) 67 | w_intersect=w_intersect.clamp(min=0) 68 | h_intersect = torch.min(pred_bottom, target_bottom) -torch.max(pred_top, target_top) 69 | h_intersect=h_intersect.clamp(min=0) 70 | area_intersect = w_intersect * h_intersect 71 | area_union = target_aera + pred_aera - area_intersect 72 | ious=((area_intersect ) / (area_union +1e-6)).clamp(min=0)+1e-6 73 | 74 | 75 | losses = -(1-ious)*(1.5-ious)*torch.log(ious) 76 | 77 | weight=weight.view(losses.size()) 78 | if weight.sum()>0: 79 | 80 | return (losses * weight).sum() / (weight.sum()+1e-6) 81 | else: 82 | return (losses *weight).sum() 83 | 84 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/base_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/base_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/base_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/base_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/dsiamrpn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/dsiamrpn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/siamapn_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/siamapn_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/__pycache__/siamapn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/tracker/__pycache__/siamapn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/tracker/base_tracker.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import cv2 9 | import numpy as np 10 | import torch 11 | 12 | from pysot.core.config_adapn import cfg 13 | 14 | 15 | class BaseTracker(object): 16 | """ Base tracker of single objec tracking 17 | """ 18 | def init(self, img, bbox): 19 | """ 20 | args: 21 | img(np.ndarray): BGR image 22 | bbox(list): [x, y, width, height] 23 | x, y need to be 0-based 24 | """ 25 | raise NotImplementedError 26 | 27 | def track(self, img): 28 | """ 29 | args: 30 | img(np.ndarray): BGR image 31 | return: 32 | bbox(list):[x, y, width, height] 33 | """ 34 | raise NotImplementedError 35 | 36 | 37 | class SiameseTracker(BaseTracker): 38 | def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): 39 | """ 40 | args: 41 | im: bgr based image 42 | pos: center position 43 | model_sz: exemplar size 44 | s_z: original size 45 | avg_chans: channel average 46 | """ 47 | if isinstance(pos, float): 48 | pos = [pos, pos] 49 | sz = original_sz 50 | im_sz = im.shape 51 | c = (original_sz + 1) / 2 52 | # context_xmin = round(pos[0] - c) # py2 and py3 round 53 | context_xmin = np.floor(pos[0] - c + 0.5) 54 | context_xmax = context_xmin + sz - 1 55 | # context_ymin = round(pos[1] - c) 56 | context_ymin = np.floor(pos[1] - c + 0.5) 57 | context_ymax = context_ymin + sz - 1 58 | left_pad = int(max(0., -context_xmin)) 59 | top_pad = int(max(0., -context_ymin)) 60 | right_pad = int(max(0., context_xmax - im_sz[1] + 1)) 61 | bottom_pad = int(max(0., context_ymax - im_sz[0] + 1)) 62 | 63 | context_xmin = context_xmin + left_pad 64 | context_xmax = context_xmax + left_pad 65 | context_ymin = context_ymin + top_pad 66 | context_ymax = context_ymax + top_pad 67 | 68 | r, c, k = im.shape 69 | if any([top_pad, bottom_pad, left_pad, right_pad]): 70 | size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k) 71 | te_im = np.zeros(size, np.uint8) 72 | te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im 73 | if top_pad: 74 | te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans 75 | if bottom_pad: 76 | te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans 77 | if left_pad: 78 | te_im[:, 0:left_pad, :] = avg_chans 79 | if right_pad: 80 | te_im[:, c + left_pad:, :] = avg_chans 81 | im_patch = te_im[int(context_ymin):int(context_ymax + 1), 82 | int(context_xmin):int(context_xmax + 1), :] 83 | else: 84 | im_patch = im[int(context_ymin):int(context_ymax + 1), 85 | int(context_xmin):int(context_xmax + 1), :] 86 | 87 | if not np.array_equal(model_sz, original_sz): 88 | im_patch = cv2.resize(im_patch, (model_sz, model_sz)) 89 | im_patch = im_patch.transpose(2, 0, 1) 90 | im_patch = im_patch[np.newaxis, :, :, :] 91 | im_patch = im_patch.astype(np.float32) 92 | im_patch = torch.from_numpy(im_patch) 93 | if cfg.CUDA: 94 | im_patch = im_patch.cuda() 95 | return im_patch 96 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/average_meter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/average_meter.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/bbox.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/bbox.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/bbox.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/bbox.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/distributed.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/distributed.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/location_grid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/location_grid.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/log_helper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/log_helper.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/lr_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/lr_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/lr_scheduler_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/lr_scheduler_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/lr_scheduler_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/lr_scheduler_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/misc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/misc.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/model_load.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/model_load.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/model_load.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/model_load.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/__pycache__/xcorr.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/pysot/utils/__pycache__/xcorr.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/average_meter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | 9 | class Meter(object): 10 | def __init__(self, name, val, avg): 11 | self.name = name 12 | self.val = val 13 | self.avg = avg 14 | 15 | def __repr__(self): 16 | return "{name}: {val:.6f} ({avg:.6f})".format( 17 | name=self.name, val=self.val, avg=self.avg 18 | ) 19 | 20 | def __format__(self, *tuples, **kwargs): 21 | return self.__repr__() 22 | 23 | 24 | class AverageMeter: 25 | """Computes and stores the average and current value""" 26 | def __init__(self, num=100): 27 | self.num = num 28 | self.reset() 29 | 30 | def reset(self): 31 | self.val = {} 32 | self.sum = {} 33 | self.count = {} 34 | self.history = {} 35 | 36 | def update(self, batch=1, **kwargs): 37 | val = {} 38 | for k in kwargs: 39 | val[k] = kwargs[k] / float(batch) 40 | self.val.update(val) 41 | for k in kwargs: 42 | if k not in self.sum: 43 | self.sum[k] = 0 44 | self.count[k] = 0 45 | self.history[k] = [] 46 | self.sum[k] += kwargs[k] 47 | self.count[k] += batch 48 | for _ in range(batch): 49 | self.history[k].append(val[k]) 50 | 51 | if self.num <= 0: 52 | # < 0, average all 53 | self.history[k] = [] 54 | 55 | # == 0: no average 56 | if self.num == 0: 57 | self.sum[k] = self.val[k] 58 | self.count[k] = 1 59 | 60 | elif len(self.history[k]) > self.num: 61 | pop_num = len(self.history[k]) - self.num 62 | for _ in range(pop_num): 63 | self.sum[k] -= self.history[k][0] 64 | del self.history[k][0] 65 | self.count[k] -= 1 66 | 67 | def __repr__(self): 68 | s = '' 69 | for k in self.sum: 70 | s += self.format_str(k) 71 | return s 72 | 73 | def format_str(self, attr): 74 | return "{name}: {val:.6f} ({avg:.6f}) ".format( 75 | name=attr, 76 | val=float(self.val[attr]), 77 | avg=float(self.sum[attr]) / self.count[attr]) 78 | 79 | def __getattr__(self, attr): 80 | if attr in self.__dict__: 81 | return super(AverageMeter, self).__getattr__(attr) 82 | if attr not in self.sum: 83 | print("invalid key '{}'".format(attr)) 84 | return Meter(attr, 0, 0) 85 | return Meter(attr, self.val[attr], self.avg(attr)) 86 | 87 | def avg(self, attr): 88 | return float(self.sum[attr]) / self.count[attr] 89 | 90 | 91 | if __name__ == '__main__': 92 | avg1 = AverageMeter(10) 93 | avg2 = AverageMeter(0) 94 | avg3 = AverageMeter(-1) 95 | 96 | for i in range(20): 97 | avg1.update(s=i) 98 | avg2.update(s=i) 99 | avg3.update(s=i) 100 | 101 | print('iter {}'.format(i)) 102 | print(avg1.s) 103 | print(avg2.s) 104 | print(avg3.s) 105 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/location_grid.py: -------------------------------------------------------------------------------- 1 | import torch 2 | def compute_locations(features,stride): 3 | h, w = features.size()[-2:] 4 | locations_per_level = compute_locations_per_level( 5 | h, w, stride, 6 | features.device 7 | ) 8 | return locations_per_level 9 | 10 | 11 | def compute_locations_per_level(h, w, stride, device): 12 | shifts_x = torch.arange( 13 | 0, w * stride, step=stride, 14 | dtype=torch.float32, device=device 15 | ) 16 | shifts_y = torch.arange( 17 | 0, h * stride, step=stride, 18 | dtype=torch.float32, device=device 19 | ) 20 | shift_y, shift_x = torch.meshgrid((shifts_y, shifts_x)) 21 | shift_x = shift_x.reshape(-1) 22 | shift_y = shift_y.reshape(-1) 23 | # locations = torch.stack((shift_x, shift_y), dim=1) + stride + 3*stride # (size_z-1)/2*size_z 28 24 | # locations = torch.stack((shift_x, shift_y), dim=1) + stride 25 | locations = torch.stack((shift_x, shift_y), dim=1) + 32 #alex:48 // 32 26 | return locations 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import os 9 | import numpy as np 10 | import torch 11 | 12 | from colorama import Fore, Style 13 | 14 | 15 | __all__ = ['commit', 'describe'] 16 | 17 | 18 | def _exec(cmd): 19 | f = os.popen(cmd, 'r', 1) 20 | return f.read().strip() 21 | 22 | 23 | def _bold(s): 24 | return "\033[1m%s\033[0m" % s 25 | 26 | 27 | def _color(s): 28 | # return f'{Fore.RED}{s}{Style.RESET_ALL}' 29 | return "{}{}{}".format(Fore.RED,s,Style.RESET_ALL) 30 | 31 | 32 | def _describe(model, lines=None, spaces=0): 33 | head = " " * spaces 34 | for name, p in model.named_parameters(): 35 | if '.' in name: 36 | continue 37 | if p.requires_grad: 38 | name = _color(name) 39 | line = "{head}- {name}".format(head=head, name=name) 40 | lines.append(line) 41 | 42 | for name, m in model.named_children(): 43 | space_num = len(name) + spaces + 1 44 | if m.training: 45 | name = _color(name) 46 | line = "{head}.{name} ({type})".format( 47 | head=head, 48 | name=name, 49 | type=m.__class__.__name__) 50 | lines.append(line) 51 | _describe(m, lines, space_num) 52 | 53 | 54 | def commit(): 55 | root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) 56 | cmd = "cd {}; git log | head -n1 | awk '{{print $2}}'".format(root) 57 | commit = _exec(cmd) 58 | cmd = "cd {}; git log --oneline | head -n1".format(root) 59 | commit_log = _exec(cmd) 60 | return "commit : {}\n log : {}".format(commit, commit_log) 61 | 62 | 63 | def describe(net, name=None): 64 | num = 0 65 | lines = [] 66 | if name is not None: 67 | lines.append(name) 68 | num = len(name) 69 | _describe(net, lines, num) 70 | return "\n".join(lines) 71 | 72 | 73 | def bbox_clip(x, min_value, max_value): 74 | new_x = max(min_value, min(x, max_value)) 75 | return new_x 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/model_load.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import logging 9 | 10 | import torch 11 | 12 | 13 | logger = logging.getLogger('global') 14 | 15 | 16 | def check_keys(model, pretrained_state_dict): 17 | ckpt_keys = set(pretrained_state_dict.keys()) 18 | model_keys = set(model.state_dict().keys()) 19 | used_pretrained_keys = model_keys & ckpt_keys 20 | unused_pretrained_keys = ckpt_keys - model_keys 21 | missing_keys = model_keys - ckpt_keys 22 | # filter 'num_batches_tracked' 23 | missing_keys = [x for x in missing_keys 24 | if not x.endswith('num_batches_tracked')] 25 | if len(missing_keys) > 0: 26 | logger.info('[Warning] missing keys: {}'.format(missing_keys)) 27 | logger.info('missing keys:{}'.format(len(missing_keys))) 28 | if len(unused_pretrained_keys) > 0: 29 | logger.info('[Warning] unused_pretrained_keys: {}'.format( 30 | unused_pretrained_keys)) 31 | logger.info('unused checkpoint keys:{}'.format( 32 | len(unused_pretrained_keys))) 33 | logger.info('used keys:{}'.format(len(used_pretrained_keys))) 34 | assert len(used_pretrained_keys) > 0, \ 35 | 'load NONE from pretrained checkpoint' 36 | return True 37 | 38 | 39 | def remove_prefix(state_dict, prefix): 40 | ''' Old style model is stored with all names of parameters 41 | share common prefix 'module.' ''' 42 | logger.info('remove prefix \'{}\''.format(prefix)) 43 | f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x 44 | return {f(key): value for key, value in state_dict.items()} 45 | 46 | 47 | def load_pretrain(model, pretrained_path): 48 | logger.info('load pretrained model from {}'.format(pretrained_path)) 49 | device = torch.cuda.current_device() 50 | pretrained_dict = torch.load(pretrained_path, 51 | map_location=lambda storage, loc: storage.cuda(device)) 52 | if "state_dict" in pretrained_dict.keys(): 53 | pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 54 | 'module.') 55 | else: 56 | pretrained_dict = remove_prefix(pretrained_dict, 'module.') 57 | 58 | try: 59 | check_keys(model, pretrained_dict) 60 | except: 61 | logger.info('[Warning]: using pretrain as features.\ 62 | Adding "features." as prefix') 63 | new_dict = {} 64 | for k, v in pretrained_dict.items(): 65 | k = 'features.' + k 66 | new_dict[k] = v 67 | pretrained_dict = new_dict 68 | check_keys(model, pretrained_dict) 69 | model.load_state_dict(pretrained_dict, strict=False) 70 | return model 71 | 72 | 73 | def restore_from(model, optimizer, ckpt_path): 74 | device = torch.cuda.current_device() 75 | ckpt = torch.load(ckpt_path, 76 | map_location=lambda storage, loc: storage.cuda(device)) 77 | epoch = ckpt['epoch'] 78 | 79 | ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.') 80 | check_keys(model, ckpt_model_dict) 81 | model.load_state_dict(ckpt_model_dict, strict=False) 82 | 83 | check_keys(optimizer, ckpt['optimizer']) 84 | optimizer.load_state_dict(ckpt['optimizer']) 85 | return model, optimizer, epoch 86 | -------------------------------------------------------------------------------- /SiamAPN++/pysot/utils/xcorr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | 12 | def xcorr_slow(x, kernel): 13 | """for loop to calculate cross correlation, slow version 14 | """ 15 | batch = x.size()[0] 16 | out = [] 17 | for i in range(batch): 18 | px = x[i] 19 | pk = kernel[i] 20 | px = px.view(1, px.size()[0], px.size()[1], px.size()[2]) 21 | pk = pk.view(-1, px.size()[1], pk.size()[1], pk.size()[2]) 22 | po = F.conv2d(px, pk) 23 | out.append(po) 24 | out = torch.cat(out, 0) 25 | return out 26 | 27 | 28 | def xcorr_fast(x, kernel): 29 | """group conv2d to calculate cross correlation, fast version 30 | """ 31 | batch = kernel.size()[0] 32 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 33 | px = x.view(1, -1, x.size()[2], x.size()[3]) 34 | po = F.conv2d(px, pk, groups=batch) 35 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 36 | return po 37 | 38 | 39 | def xcorr_depthwise(x, kernel): 40 | """depthwise cross correlation 41 | """ 42 | batch = kernel.size(0) 43 | channel = kernel.size(1) 44 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 45 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 46 | out = F.conv2d(x, kernel, groups=batch*channel) 47 | out = out.view(batch, channel, out.size(2), out.size(3)) 48 | return out 49 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/__init__.py -------------------------------------------------------------------------------- /SiamAPN++/toolkit/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .uav10fps import UAV10Dataset 2 | from .uav20l import UAV20Dataset 3 | from .visdrone1 import VISDRONED2018Dataset 4 | from .v4r import V4RDataset 5 | class DatasetFactory(object): 6 | @staticmethod 7 | def create_dataset(**kwargs): 8 | 9 | 10 | assert 'name' in kwargs, "should provide dataset name" 11 | name = kwargs['name'] 12 | 13 | if 'UAV123_10fps' in name: 14 | dataset = UAV10Dataset(**kwargs) 15 | elif 'UAV20l' in name: 16 | dataset = UAV20Dataset(**kwargs) 17 | elif 'VISDRONED2018' in name: 18 | dataset = VISDRONED2018Dataset(**kwargs) 19 | elif 'V4RFlight112' in name: 20 | dataset = V4RDataset(**kwargs) 21 | else: 22 | raise Exception("unknow dataset {}".format(kwargs['name'])) 23 | return dataset 24 | 25 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/dataset.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/dtb.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/dtb.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/got10k.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/got10k.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/lasot.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/lasot.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/otb.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/otb.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uav.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uav.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uav10fps.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uav10fps.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uav10fps.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uav10fps.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uav20l.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uav20l.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uav20l.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uav20l.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/uavdt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/uavdt.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/v4r.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/v4r.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/v4r.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/v4r.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/video.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/video.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/video.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/video.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/visdrone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/visdrone.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/visdrone1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/visdrone1.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/__pycache__/visdrone1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/datasets/__pycache__/visdrone1.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/dataset.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | class Dataset(object): 4 | def __init__(self, name, dataset_root): 5 | self.name = name 6 | self.dataset_root = dataset_root 7 | self.videos = None 8 | 9 | def __getitem__(self, idx): 10 | if isinstance(idx, str): 11 | return self.videos[idx] 12 | elif isinstance(idx, int): 13 | return self.videos[sorted(list(self.videos.keys()))[idx]] 14 | 15 | def __len__(self): 16 | return len(self.videos) 17 | 18 | def __iter__(self): 19 | keys = sorted(list(self.videos.keys())) 20 | for key in keys: 21 | yield self.videos[key] 22 | 23 | def set_tracker(self, path, tracker_names): 24 | """ 25 | Args: 26 | path: path to tracker results, 27 | tracker_names: list of tracker name 28 | """ 29 | self.tracker_path = path 30 | self.tracker_names = tracker_names 31 | # for video in tqdm(self.videos.values(), 32 | # desc='loading tacker result', ncols=100): 33 | # video.load_tracker(path, tracker_names) 34 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/uav10fps.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | from glob import glob 8 | 9 | from .dataset import Dataset 10 | from .video import Video 11 | 12 | def ca(): 13 | path='./test_dataset/UAV123_10fps' 14 | 15 | name_list=os.listdir(path+'/data_seq') 16 | name_list.sort() 17 | a=123 18 | b=[] 19 | for i in range(a): 20 | b.append(name_list[i]) 21 | c=[] 22 | 23 | for jj in range(a): 24 | imgs=path+'/data_seq/'+str(name_list[jj]) 25 | txt=path+'/anno/'+str(name_list[jj])+'.txt' 26 | bbox=[] 27 | f = open(txt) # 返回一个文件对象 28 | file= f.readlines() 29 | li=os.listdir(imgs) 30 | li.sort() 31 | for ii in range(len(file)): 32 | li[ii]=name_list[jj]+'/'+li[ii] 33 | 34 | line = file[ii].strip('\n').split(',') 35 | 36 | try: 37 | line[0]=int(line[0]) 38 | except: 39 | line[0]=float(line[0]) 40 | try: 41 | line[1]=int(line[1]) 42 | except: 43 | line[1]=float(line[1]) 44 | try: 45 | line[2]=int(line[2]) 46 | except: 47 | line[2]=float(line[2]) 48 | try: 49 | line[3]=int(line[3]) 50 | except: 51 | line[3]=float(line[3]) 52 | bbox.append(line) 53 | 54 | if len(bbox)!=len(li): 55 | print (jj) 56 | f.close() 57 | c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]}) 58 | 59 | d=dict(zip(b,c)) 60 | 61 | return d 62 | 63 | class UAVVideo(Video): 64 | """ 65 | Args: 66 | name: video name 67 | root: dataset root 68 | video_dir: video directory 69 | init_rect: init rectangle 70 | img_names: image names 71 | gt_rect: groundtruth rectangle 72 | attr: attribute of video 73 | """ 74 | def __init__(self, name, root, video_dir, init_rect, img_names, 75 | gt_rect, attr, load_img=False): 76 | super(UAVVideo, self).__init__(name, root, video_dir, 77 | init_rect, img_names, gt_rect, attr, load_img) 78 | 79 | 80 | class UAV10Dataset(Dataset): 81 | """ 82 | Args: 83 | name: dataset name, should be 'UAV123', 'UAV20L' 84 | dataset_root: dataset root 85 | load_img: wether to load all imgs 86 | """ 87 | def __init__(self, name, dataset_root, load_img=False): 88 | super(UAV10Dataset, self).__init__(name, dataset_root) 89 | meta_data = ca() 90 | 91 | # load videos 92 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 93 | self.videos = {} 94 | for video in pbar: 95 | pbar.set_postfix_str(video) 96 | self.videos[video] = UAVVideo(video, 97 | dataset_root+'/data_seq', 98 | meta_data[video]['video_dir'], 99 | meta_data[video]['init_rect'], 100 | meta_data[video]['img_names'], 101 | meta_data[video]['gt_rect'], 102 | meta_data[video]['attr']) 103 | 104 | # set attr 105 | attr = [] 106 | for x in self.videos.values(): 107 | attr += x.attr 108 | attr = set(attr) 109 | self.attr = {} 110 | self.attr['ALL'] = list(self.videos.keys()) 111 | for x in attr: 112 | self.attr[x] = [] 113 | for k, v in self.videos.items(): 114 | for attr_ in v.attr: 115 | self.attr[attr_].append(k) 116 | 117 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/datasets/uav20l.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | from glob import glob 8 | 9 | from .dataset import Dataset 10 | from .video import Video 11 | 12 | 13 | def loaddata(): 14 | 15 | path='./test_dataset/UAV123_20L' 16 | 17 | name_list=os.listdir(path+'/data_seq') 18 | name_list.sort() 19 | 20 | b=[] 21 | for i in range(len(name_list)): 22 | b.append(name_list[i]) 23 | c=[] 24 | 25 | for jj in range(len(name_list)): 26 | imgs=path+'/data_seq/'+str(name_list[jj]) 27 | txt=path+'/anno/'+str(name_list[jj])+'.txt' 28 | bbox=[] 29 | f = open(txt) # 返回一个文件对象 30 | file= f.readlines() 31 | li=os.listdir(imgs) 32 | li.sort() 33 | for ii in range(len(file)): 34 | li[ii]=name_list[jj]+'/'+li[ii] 35 | 36 | line = file[ii].strip('\n').split(',') 37 | 38 | try: 39 | line[0]=int(line[0]) 40 | except: 41 | line[0]=float(line[0]) 42 | try: 43 | line[1]=int(line[1]) 44 | except: 45 | line[1]=float(line[1]) 46 | try: 47 | line[2]=int(line[2]) 48 | except: 49 | line[2]=float(line[2]) 50 | try: 51 | line[3]=int(line[3]) 52 | except: 53 | line[3]=float(line[3]) 54 | bbox.append(line) 55 | 56 | if len(bbox)!=len(li): 57 | print (jj) 58 | f.close() 59 | c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]}) 60 | 61 | d=dict(zip(b,c)) 62 | 63 | return d 64 | 65 | class UAVVideo(Video): 66 | """ 67 | Args: 68 | name: video name 69 | root: dataset root 70 | video_dir: video directory 71 | init_rect: init rectangle 72 | img_names: image names 73 | gt_rect: groundtruth rectangle 74 | attr: attribute of video 75 | """ 76 | def __init__(self, name, root, video_dir, init_rect, img_names, 77 | gt_rect, attr, load_img=False): 78 | super(UAVVideo, self).__init__(name, root, video_dir, 79 | init_rect, img_names, gt_rect, attr, load_img) 80 | 81 | 82 | class UAV20Dataset(Dataset): 83 | """ 84 | Args: 85 | name: dataset name, should be 'UAV123', 'UAV20L' 86 | dataset_root: dataset root 87 | load_img: wether to load all imgs 88 | """ 89 | def __init__(self, name, dataset_root, load_img=False): 90 | super(UAV20Dataset, self).__init__(name, dataset_root) 91 | meta_data = loaddata() 92 | 93 | # load videos 94 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 95 | self.videos = {} 96 | for video in pbar: 97 | pbar.set_postfix_str(video) 98 | self.videos[video] = UAVVideo(video, 99 | dataset_root+'/data_seq', 100 | meta_data[video]['video_dir'], 101 | meta_data[video]['init_rect'], 102 | meta_data[video]['img_names'], 103 | meta_data[video]['gt_rect'], 104 | meta_data[video]['attr']) 105 | 106 | # set attr 107 | attr = [] 108 | for x in self.videos.values(): 109 | attr += x.attr 110 | attr = set(attr) 111 | self.attr = {} 112 | self.attr['ALL'] = list(self.videos.keys()) 113 | for x in attr: 114 | self.attr[x] = [] 115 | for k, v in self.videos.items(): 116 | for attr_ in v.attr: 117 | self.attr[attr_].append(k) 118 | 119 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .ope_benchmark import OPEBenchmark 2 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/evaluation/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/evaluation/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/evaluation/__pycache__/ope_benchmark.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/evaluation/__pycache__/ope_benchmark.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/utils/__pycache__/statistics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/utils/__pycache__/statistics.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/utils/statistics.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author fangyi.zhang@vipl.ict.ac.cn 3 | """ 4 | import numpy as np 5 | 6 | def overlap_ratio(rect1, rect2): 7 | '''Compute overlap ratio between two rects 8 | Args 9 | rect:2d array of N x [x,y,w,h] 10 | Return: 11 | iou 12 | ''' 13 | # if rect1.ndim==1: 14 | # rect1 = rect1[np.newaxis, :] 15 | # if rect2.ndim==1: 16 | # rect2 = rect2[np.newaxis, :] 17 | left = np.maximum(rect1[:,0], rect2[:,0]) 18 | right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2]) 19 | top = np.maximum(rect1[:,1], rect2[:,1]) 20 | bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3]) 21 | 22 | intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top) 23 | union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect 24 | iou = intersect / union 25 | iou = np.maximum(np.minimum(1, iou), 0) 26 | return iou 27 | 28 | def success_overlap(gt_bb, result_bb, n_frame): 29 | thresholds_overlap = np.arange(0, 1.05, 0.05) 30 | success = np.zeros(len(thresholds_overlap)) 31 | iou = np.ones(len(gt_bb)) * (-1) 32 | # mask = np.sum(gt_bb > 0, axis=1) == 4 #TODO check all dataset 33 | mask = np.sum(gt_bb[:, 2:] > 0, axis=1) == 2 34 | iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask]) 35 | for i in range(len(thresholds_overlap)): 36 | success[i] = np.sum(iou > thresholds_overlap[i]) / float(n_frame) 37 | return success 38 | 39 | def success_error(gt_center, result_center, thresholds, n_frame): 40 | # n_frame = len(gt_center) 41 | success = np.zeros(len(thresholds)) 42 | dist = np.ones(len(gt_center)) * (-1) 43 | mask = np.sum(gt_center > 0, axis=1) == 2 44 | dist[mask] = np.sqrt(np.sum( 45 | np.power(gt_center[mask] - result_center[mask], 2), axis=1)) 46 | for i in range(len(thresholds)): 47 | success[i] = np.sum(dist <= thresholds[i]) / float(n_frame) 48 | return success 49 | 50 | 51 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .draw_success_precision import draw_success_precision 2 | -------------------------------------------------------------------------------- /SiamAPN++/toolkit/visualization/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/visualization/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/visualization/__pycache__/draw_success_precision.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/visualization/__pycache__/draw_success_precision.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/visualization/__pycache__/draw_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN++/toolkit/visualization/__pycache__/draw_utils.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN++/toolkit/visualization/draw_utils.py: -------------------------------------------------------------------------------- 1 | 2 | COLOR = ((1, 0, 0), 3 | (0, 1, 0), 4 | (1, 0, 1), 5 | (1, 1, 0), 6 | (0 , 162/255, 232/255), 7 | (0.5, 0.5, 0.5), 8 | (0, 0, 1), 9 | (0, 1, 1), 10 | (136/255, 0 , 21/255), 11 | (255/255, 127/255, 39/255), 12 | (0, 0, 0)) 13 | 14 | LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-'] 15 | 16 | MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.'] 17 | -------------------------------------------------------------------------------- /SiamAPN++/tools/demo_apn++.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import os 7 | import sys 8 | sys.path.append('../') 9 | 10 | import argparse 11 | import cv2 12 | import torch 13 | from glob import glob 14 | 15 | from pysot.core.config_adapn import cfg 16 | from pysot.models.model_builder_adapn import ModelBuilderADAPN 17 | from pysot.tracker.adsiamapn_tracker import ADSiamAPNTracker 18 | from pysot.utils.model_load import load_pretrain 19 | 20 | torch.set_num_threads(1) 21 | 22 | parser = argparse.ArgumentParser(description='SiamAPN demo') 23 | parser.add_argument('--config', type=str, default='../experiments/siamapn++/config.yaml', help='config file') 24 | parser.add_argument('--snapshot', type=str, default='./snapshot/general_model.pth', help='model name') 25 | parser.add_argument('--video_name', default='../test_dataset/sequence_name', type=str, help='videos or image files') 26 | args = parser.parse_args() 27 | 28 | 29 | def get_frames(video_name): 30 | if not video_name: 31 | cap = cv2.VideoCapture(0) 32 | 33 | # warmup 34 | for i in range(5): 35 | cap.read() 36 | while True: 37 | ret, frame = cap.read() 38 | if ret: 39 | yield frame 40 | else: 41 | break 42 | elif video_name.endswith('avi') or \ 43 | video_name.endswith('mp4'): 44 | cap = cv2.VideoCapture(args.video_name) 45 | while True: 46 | ret, frame = cap.read() 47 | if ret: 48 | yield frame 49 | else: 50 | break 51 | else: 52 | images = sorted(glob(os.path.join(video_name, 'img', '*.jp*'))) 53 | for img in images: 54 | frame = cv2.imread(img) 55 | yield frame 56 | 57 | 58 | def main(): 59 | # load config 60 | cfg.merge_from_file(args.config) 61 | cfg.CUDA = torch.cuda.is_available() 62 | device = torch.device('cuda' if cfg.CUDA else 'cpu') 63 | 64 | # create model 65 | model = ModelBuilderADAPN() 66 | 67 | # load model 68 | model = load_pretrain(model, args.snapshot).eval().to(device) 69 | 70 | # build tracker 71 | tracker = ADSiamAPNTracker(model, cfg.TRACK) 72 | 73 | 74 | first_frame = True 75 | if args.video_name: 76 | video_name = args.video_name.split('/')[-1].split('.')[0] 77 | else: 78 | video_name = 'webcam' 79 | cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN) 80 | for frame in get_frames(args.video_name): 81 | if first_frame: 82 | try: 83 | init_rect = cv2.selectROI(video_name, frame, False, False) 84 | except: 85 | exit() 86 | tracker.init(frame, init_rect) 87 | first_frame = False 88 | else: 89 | outputs = tracker.track(frame) 90 | bbox = list(map(int, outputs['bbox'])) 91 | cv2.rectangle(frame, (bbox[0], bbox[1]), 92 | (bbox[0]+bbox[2], bbox[1]+bbox[3]), 93 | (0, 255, 0), 3) 94 | cv2.imshow(video_name, frame) 95 | cv2.waitKey(40) 96 | 97 | 98 | if __name__ == '__main__': 99 | main() 100 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/gen_json.py: -------------------------------------------------------------------------------- 1 | from pycocotools.coco import COCO 2 | from os.path import join 3 | import json 4 | 5 | 6 | dataDir = '.' 7 | for dataType in ['val2017', 'train2017']: 8 | dataset = dict() 9 | annFile = '{}/annotations/instances_{}.json'.format(dataDir,dataType) 10 | coco = COCO(annFile) 11 | n_imgs = len(coco.imgs) 12 | for n, img_id in enumerate(coco.imgs): 13 | print('subset: {} image id: {:04d} / {:04d}'.format(dataType, n, n_imgs)) 14 | img = coco.loadImgs(img_id)[0] 15 | annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None) 16 | anns = coco.loadAnns(annIds) 17 | video_crop_base_path = join(dataType, img['file_name'].split('/')[-1].split('.')[0]) 18 | 19 | if len(anns) > 0: 20 | dataset[video_crop_base_path] = dict() 21 | 22 | for trackid, ann in enumerate(anns): 23 | rect = ann['bbox'] 24 | c = ann['category_id'] 25 | bbox = [rect[0], rect[1], rect[0]+rect[2], rect[1]+rect[3]] 26 | if rect[2] <= 0 or rect[3] <= 0: # lead nan error in cls. 27 | continue 28 | dataset[video_crop_base_path]['{:02d}'.format(trackid)] = {'000000': bbox} 29 | 30 | print('save json (dataset), please wait 20 seconds~') 31 | json.dump(dataset, open('{}.json'.format(dataType), 'w'), indent=4, sort_keys=True) 32 | print('done!') 33 | 34 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/pycocotools/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | # install pycocotools locally 3 | python setup.py build_ext --inplace 4 | rm -rf build 5 | 6 | install: 7 | # install pycocotools to the Python site-packages 8 | python setup.py build_ext install 9 | rm -rf build 10 | clean: 11 | rm _mask.c _mask.cpython-36m-x86_64-linux-gnu.so 12 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/pycocotools/common/gason.h: -------------------------------------------------------------------------------- 1 | // https://github.com/vivkin/gason - pulled January 10, 2016 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | enum JsonTag { 9 | JSON_NUMBER = 0, 10 | JSON_STRING, 11 | JSON_ARRAY, 12 | JSON_OBJECT, 13 | JSON_TRUE, 14 | JSON_FALSE, 15 | JSON_NULL = 0xF 16 | }; 17 | 18 | struct JsonNode; 19 | 20 | #define JSON_VALUE_PAYLOAD_MASK 0x00007FFFFFFFFFFFULL 21 | #define JSON_VALUE_NAN_MASK 0x7FF8000000000000ULL 22 | #define JSON_VALUE_TAG_MASK 0xF 23 | #define JSON_VALUE_TAG_SHIFT 47 24 | 25 | union JsonValue { 26 | uint64_t ival; 27 | double fval; 28 | 29 | JsonValue(double x) 30 | : fval(x) { 31 | } 32 | JsonValue(JsonTag tag = JSON_NULL, void *payload = nullptr) { 33 | assert((uintptr_t)payload <= JSON_VALUE_PAYLOAD_MASK); 34 | ival = JSON_VALUE_NAN_MASK | ((uint64_t)tag << JSON_VALUE_TAG_SHIFT) | (uintptr_t)payload; 35 | } 36 | bool isDouble() const { 37 | return (int64_t)ival <= (int64_t)JSON_VALUE_NAN_MASK; 38 | } 39 | JsonTag getTag() const { 40 | return isDouble() ? JSON_NUMBER : JsonTag((ival >> JSON_VALUE_TAG_SHIFT) & JSON_VALUE_TAG_MASK); 41 | } 42 | uint64_t getPayload() const { 43 | assert(!isDouble()); 44 | return ival & JSON_VALUE_PAYLOAD_MASK; 45 | } 46 | double toNumber() const { 47 | assert(getTag() == JSON_NUMBER); 48 | return fval; 49 | } 50 | char *toString() const { 51 | assert(getTag() == JSON_STRING); 52 | return (char *)getPayload(); 53 | } 54 | JsonNode *toNode() const { 55 | assert(getTag() == JSON_ARRAY || getTag() == JSON_OBJECT); 56 | return (JsonNode *)getPayload(); 57 | } 58 | }; 59 | 60 | struct JsonNode { 61 | JsonValue value; 62 | JsonNode *next; 63 | char *key; 64 | }; 65 | 66 | struct JsonIterator { 67 | JsonNode *p; 68 | 69 | void operator++() { 70 | p = p->next; 71 | } 72 | bool operator!=(const JsonIterator &x) const { 73 | return p != x.p; 74 | } 75 | JsonNode *operator*() const { 76 | return p; 77 | } 78 | JsonNode *operator->() const { 79 | return p; 80 | } 81 | }; 82 | 83 | inline JsonIterator begin(JsonValue o) { 84 | return JsonIterator{o.toNode()}; 85 | } 86 | inline JsonIterator end(JsonValue) { 87 | return JsonIterator{nullptr}; 88 | } 89 | 90 | #define JSON_ERRNO_MAP(XX) \ 91 | XX(OK, "ok") \ 92 | XX(BAD_NUMBER, "bad number") \ 93 | XX(BAD_STRING, "bad string") \ 94 | XX(BAD_IDENTIFIER, "bad identifier") \ 95 | XX(STACK_OVERFLOW, "stack overflow") \ 96 | XX(STACK_UNDERFLOW, "stack underflow") \ 97 | XX(MISMATCH_BRACKET, "mismatch bracket") \ 98 | XX(UNEXPECTED_CHARACTER, "unexpected character") \ 99 | XX(UNQUOTED_KEY, "unquoted key") \ 100 | XX(BREAKING_BAD, "breaking bad") \ 101 | XX(ALLOCATION_FAILURE, "allocation failure") 102 | 103 | enum JsonErrno { 104 | #define XX(no, str) JSON_##no, 105 | JSON_ERRNO_MAP(XX) 106 | #undef XX 107 | }; 108 | 109 | const char *jsonStrError(int err); 110 | 111 | class JsonAllocator { 112 | struct Zone { 113 | Zone *next; 114 | size_t used; 115 | } *head = nullptr; 116 | 117 | public: 118 | JsonAllocator() = default; 119 | JsonAllocator(const JsonAllocator &) = delete; 120 | JsonAllocator &operator=(const JsonAllocator &) = delete; 121 | JsonAllocator(JsonAllocator &&x) : head(x.head) { 122 | x.head = nullptr; 123 | } 124 | JsonAllocator &operator=(JsonAllocator &&x) { 125 | head = x.head; 126 | x.head = nullptr; 127 | return *this; 128 | } 129 | ~JsonAllocator() { 130 | deallocate(); 131 | } 132 | void *allocate(size_t size); 133 | void deallocate(); 134 | }; 135 | 136 | int jsonParse(char *str, char **endptr, JsonValue *value, JsonAllocator &allocator); 137 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/pycocotools/common/maskApi.h: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #pragma once 8 | 9 | typedef unsigned int uint; 10 | typedef unsigned long siz; 11 | typedef unsigned char byte; 12 | typedef double* BB; 13 | typedef struct { siz h, w, m; uint *cnts; } RLE; 14 | 15 | /* Initialize/destroy RLE. */ 16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ); 17 | void rleFree( RLE *R ); 18 | 19 | /* Initialize/destroy RLE array. */ 20 | void rlesInit( RLE **R, siz n ); 21 | void rlesFree( RLE **R, siz n ); 22 | 23 | /* Encode binary masks using RLE. */ 24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n ); 25 | 26 | /* Decode binary masks encoded via RLE. */ 27 | void rleDecode( const RLE *R, byte *mask, siz n ); 28 | 29 | /* Compute union or intersection of encoded masks. */ 30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect ); 31 | 32 | /* Compute area of encoded masks. */ 33 | void rleArea( const RLE *R, siz n, uint *a ); 34 | 35 | /* Compute intersection over union between masks. */ 36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ); 37 | 38 | /* Compute non-maximum suppression between bounding masks */ 39 | void rleNms( RLE *dt, siz n, uint *keep, double thr ); 40 | 41 | /* Compute intersection over union between bounding boxes. */ 42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ); 43 | 44 | /* Compute non-maximum suppression between bounding boxes */ 45 | void bbNms( BB dt, siz n, uint *keep, double thr ); 46 | 47 | /* Get bounding boxes surrounding encoded masks. */ 48 | void rleToBbox( const RLE *R, BB bb, siz n ); 49 | 50 | /* Convert bounding boxes to encoded masks. */ 51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ); 52 | 53 | /* Convert polygon to encoded mask. */ 54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ); 55 | 56 | /* Get compressed string representation of encoded mask. */ 57 | char* rleToString( const RLE *R ); 58 | 59 | /* Convert from compressed string representation of encoded mask. */ 60 | void rleFrString( RLE *R, char *s, siz h, siz w ); 61 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/pycocotools/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Build import cythonize 3 | from distutils.extension import Extension 4 | import numpy as np 5 | 6 | # To compile and install locally run "python setup.py build_ext --inplace" 7 | # To install library to Python site-packages run "python setup.py build_ext install" 8 | 9 | ext_modules = [ 10 | Extension( 11 | '_mask', 12 | sources=['common/maskApi.c', '_mask.pyx'], 13 | include_dirs = [np.get_include(), 'common'], 14 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], 15 | ) 16 | ] 17 | 18 | setup(name='pycocotools', 19 | packages=['pycocotools'], 20 | package_dir = {'pycocotools': '.'}, 21 | version='2.0', 22 | ext_modules= 23 | cythonize(ext_modules) 24 | ) 25 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing COCO 2 | 3 | ### Download raw images and annotations 4 | 5 | ````shell 6 | wget http://images.cocodataset.org/zips/train2017.zip 7 | wget http://images.cocodataset.org/zips/val2017.zip 8 | wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip 9 | 10 | unzip ./train2017.zip 11 | unzip ./val2017.zip 12 | unzip ./annotations_trainval2017.zip 13 | cd pycocotools && make && cd .. 14 | ```` 15 | 16 | ### Crop & Generate data info (10 min) 17 | 18 | ````shell 19 | #python par_crop.py [crop_size] [num_threads] 20 | python par_crop.py 511 12 21 | python gen_json.py 22 | ```` 23 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/coco/visual.py: -------------------------------------------------------------------------------- 1 | from pycocotools.coco import COCO 2 | import cv2 3 | import numpy as np 4 | 5 | color_bar = np.random.randint(0, 255, (90, 3)) 6 | 7 | visual = True 8 | 9 | dataDir = '.' 10 | dataType = 'val2017' 11 | annFile = '{}/annotations/instances_{}.json'.format(dataDir,dataType) 12 | coco = COCO(annFile) 13 | 14 | for img_id in coco.imgs: 15 | img = coco.loadImgs(img_id)[0] 16 | annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None) 17 | anns = coco.loadAnns(annIds) 18 | im = cv2.imread('{}/{}/{}'.format(dataDir, dataType, img['file_name'])) 19 | for ann in anns: 20 | rect = ann['bbox'] 21 | c = ann['category_id'] 22 | if visual: 23 | pt1 = (int(rect[0]), int(rect[1])) 24 | pt2 = (int(rect[0]+rect[2]), int(rect[1]+rect[3])) 25 | cv2.rectangle(im, pt1, pt2, color_bar[c-1], 3) 26 | cv2.imshow('img', im) 27 | cv2.waitKey(200) 28 | print('done') 29 | 30 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/got10k/gen_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import unicode_literals 5 | import json 6 | from os.path import join, exists 7 | import os 8 | import pandas as pd 9 | 10 | dataset_path = 'data' 11 | train_sets = ['GOT-10k_Train_split_01','GOT-10k_Train_split_02','GOT-10k_Train_split_03','GOT-10k_Train_split_04', 12 | 'GOT-10k_Train_split_05','GOT-10k_Train_split_06','GOT-10k_Train_split_07','GOT-10k_Train_split_08', 13 | 'GOT-10k_Train_split_09','GOT-10k_Train_split_10','GOT-10k_Train_split_11','GOT-10k_Train_split_12', 14 | 'GOT-10k_Train_split_13','GOT-10k_Train_split_14','GOT-10k_Train_split_15','GOT-10k_Train_split_16', 15 | 'GOT-10k_Train_split_17','GOT-10k_Train_split_18','GOT-10k_Train_split_19'] 16 | val_set = ['val'] 17 | d_sets = {'videos_val':val_set,'videos_train':train_sets} 18 | # videos_val = ['MOT17-02-DPM'] 19 | # videos_train = ['MOT17-04-DPM','MOT17-05-DPM','MOT17-09-DPM','MOT17-11-DPM','MOT17-13-DPM'] 20 | # d_sets = {'videos_val':videos_val,'videos_train':videos_train} 21 | 22 | def parse_and_sched(dl_dir='.'): 23 | # For each of the two datasets 24 | js = {} 25 | for d_set in d_sets: 26 | for dataset in d_sets[d_set]: 27 | videos = os.listdir(os.path.join(dataset_path,dataset)) 28 | for video in videos: 29 | if video == 'list.txt': 30 | continue 31 | video = dataset+'/'+video 32 | gt_path = join(dataset_path, video, 'groundtruth.txt') 33 | f = open(gt_path, 'r') 34 | groundtruth = f.readlines() 35 | f.close() 36 | for idx, gt_line in enumerate(groundtruth): 37 | gt_image = gt_line.strip().split(',') 38 | frame = '%06d' % (int(idx)) 39 | obj = '%02d' % (int(0)) 40 | bbox = [int(float(gt_image[0])), int(float(gt_image[1])), 41 | int(float(gt_image[0])) + int(float(gt_image[2])), 42 | int(float(gt_image[1])) + int(float(gt_image[3]))] # xmin,ymin,xmax,ymax 43 | 44 | if video not in js: 45 | js[video] = {} 46 | if obj not in js[video]: 47 | js[video][obj] = {} 48 | js[video][obj][frame] = bbox 49 | if 'videos_val' == d_set: 50 | json.dump(js, open('val.json', 'w'), indent=4, sort_keys=True) 51 | else: 52 | json.dump(js, open('train.json', 'w'), indent=4, sort_keys=True) 53 | js = {} 54 | 55 | print(d_set+': All videos downloaded' ) 56 | 57 | 58 | if __name__ == '__main__': 59 | parse_and_sched() 60 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/got10k/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing GOT-10K 2 | A Large High-Diversity Benchmark for Generic Object Tracking in the Wild 3 | 4 | ### Prepare dataset 5 | 6 | After download the dataset, please unzip the dataset at *train_dataset/got10k* directory 7 | mkdir data 8 | unzip full_data/train_data/*.zip -d ./data 9 | ```` 10 | 11 | ### Crop & Generate data info 12 | 13 | ````shell 14 | #python par_crop.py [crop_size] [num_threads] 15 | python par_crop.py 511 12 16 | python gen_json.py 17 | ```` 18 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/vid/gen_json.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import json 4 | import numpy as np 5 | 6 | print('load json (raw vid info), please wait 20 seconds~') 7 | vid = json.load(open('vid.json', 'r')) 8 | 9 | 10 | def check_size(frame_sz, bbox): 11 | min_ratio = 0.1 12 | max_ratio = 0.75 13 | # only accept objects >10% and <75% of the total frame 14 | area_ratio = np.sqrt((bbox[2]-bbox[0])*(bbox[3]-bbox[1])/float(np.prod(frame_sz))) 15 | ok = (area_ratio > min_ratio) and (area_ratio < max_ratio) 16 | return ok 17 | 18 | 19 | def check_borders(frame_sz, bbox): 20 | dist_from_border = 0.05 * (bbox[2] - bbox[0] + bbox[3] - bbox[1])/2 21 | ok = (bbox[0] > dist_from_border) and (bbox[1] > dist_from_border) and \ 22 | ((frame_sz[0] - bbox[2]) > dist_from_border) and \ 23 | ((frame_sz[1] - bbox[3]) > dist_from_border) 24 | return ok 25 | 26 | 27 | snippets = dict() 28 | n_snippets = 0 29 | n_videos = 0 30 | for subset in vid: 31 | for video in subset: 32 | n_videos += 1 33 | frames = video['frame'] 34 | id_set = [] 35 | id_frames = [[]] * 60 # at most 60 objects 36 | for f, frame in enumerate(frames): 37 | objs = frame['objs'] 38 | frame_sz = frame['frame_sz'] 39 | for obj in objs: 40 | trackid = obj['trackid'] 41 | occluded = obj['occ'] 42 | bbox = obj['bbox'] 43 | # if occluded: 44 | # continue 45 | # 46 | # if not(check_size(frame_sz, bbox) and check_borders(frame_sz, bbox)): 47 | # continue 48 | # 49 | # if obj['c'] in ['n01674464', 'n01726692', 'n04468005', 'n02062744']: 50 | # continue 51 | 52 | if trackid not in id_set: 53 | id_set.append(trackid) 54 | id_frames[trackid] = [] 55 | id_frames[trackid].append(f) 56 | if len(id_set) > 0: 57 | snippets[video['base_path']] = dict() 58 | for selected in id_set: 59 | frame_ids = sorted(id_frames[selected]) 60 | sequences = np.split(frame_ids, np.array(np.where(np.diff(frame_ids) > 1)[0]) + 1) 61 | sequences = [s for s in sequences if len(s) > 1] # remove isolated frame. 62 | for seq in sequences: 63 | snippet = dict() 64 | for frame_id in seq: 65 | frame = frames[frame_id] 66 | for obj in frame['objs']: 67 | if obj['trackid'] == selected: 68 | o = obj 69 | continue 70 | snippet[frame['img_path'].split('.')[0]] = o['bbox'] 71 | snippets[video['base_path']]['{:02d}'.format(selected)] = snippet 72 | n_snippets += 1 73 | print('video: {:d} snippets_num: {:d}'.format(n_videos, n_snippets)) 74 | 75 | train = {k:v for (k,v) in snippets.items() if 'train' in k} 76 | val = {k:v for (k,v) in snippets.items() if 'val' in k} 77 | 78 | json.dump(train, open('train.json', 'w'), indent=4, sort_keys=True) 79 | json.dump(val, open('val.json', 'w'), indent=4, sort_keys=True) 80 | print('done!') 81 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/vid/parse_vid.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import json 4 | import glob 5 | import xml.etree.ElementTree as ET 6 | 7 | VID_base_path = './ILSVRC2015' 8 | ann_base_path = join(VID_base_path, 'Annotations/VID/train/') 9 | img_base_path = join(VID_base_path, 'Data/VID/train/') 10 | sub_sets = sorted({'a', 'b', 'c', 'd', 'e'}) 11 | 12 | vid = [] 13 | for sub_set in sub_sets: 14 | sub_set_base_path = join(ann_base_path, sub_set) 15 | videos = sorted(listdir(sub_set_base_path)) 16 | s = [] 17 | for vi, video in enumerate(videos): 18 | print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos))) 19 | v = dict() 20 | v['base_path'] = join(sub_set, video) 21 | v['frame'] = [] 22 | video_base_path = join(sub_set_base_path, video) 23 | xmls = sorted(glob.glob(join(video_base_path, '*.xml'))) 24 | for xml in xmls: 25 | f = dict() 26 | xmltree = ET.parse(xml) 27 | size = xmltree.findall('size')[0] 28 | frame_sz = [int(it.text) for it in size] 29 | objects = xmltree.findall('object') 30 | objs = [] 31 | for object_iter in objects: 32 | trackid = int(object_iter.find('trackid').text) 33 | name = (object_iter.find('name')).text 34 | bndbox = object_iter.find('bndbox') 35 | occluded = int(object_iter.find('occluded').text) 36 | o = dict() 37 | o['c'] = name 38 | o['bbox'] = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), 39 | int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)] 40 | o['trackid'] = trackid 41 | o['occ'] = occluded 42 | objs.append(o) 43 | f['frame_sz'] = frame_sz 44 | f['img_path'] = xml.split('/')[-1].replace('xml', 'JPEG') 45 | f['objs'] = objs 46 | v['frame'].append(f) 47 | s.append(v) 48 | vid.append(s) 49 | print('save json (raw vid info), please wait 1 min~') 50 | json.dump(vid, open('vid.json', 'w'), indent=4, sort_keys=True) 51 | print('done!') 52 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/vid/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing VID(Object detection from video) 2 | Large Scale Visual Recognition Challenge 2015 (ILSVRC2015) 3 | 4 | ### Download dataset (86GB) 5 | 6 | ````shell 7 | wget http://bvisionweb1.cs.unc.edu/ilsvrc2015/ILSVRC2015_VID.tar.gz 8 | tar -xzvf ./ILSVRC2015_VID.tar.gz 9 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0000 ILSVRC2015/Annotations/VID/train/a 10 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0001 ILSVRC2015/Annotations/VID/train/b 11 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0002 ILSVRC2015/Annotations/VID/train/c 12 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0003 ILSVRC2015/Annotations/VID/train/d 13 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/val ILSVRC2015/Annotations/VID/train/e 14 | 15 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0000 ILSVRC2015/Data/VID/train/a 16 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0001 ILSVRC2015/Data/VID/train/b 17 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0002 ILSVRC2015/Data/VID/train/c 18 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0003 ILSVRC2015/Data/VID/train/d 19 | ln -sfb $PWD/ILSVRC2015/Data/VID/val ILSVRC2015/Data/VID/train/e 20 | ```` 21 | 22 | ### Crop & Generate data info (20 min) 23 | 24 | ````shell 25 | python parse_vid.py 26 | 27 | #python par_crop.py [crop_size] [num_threads] 28 | python par_crop.py 511 12 29 | python gen_json.py 30 | ```` 31 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/vid/visual.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import cv2 4 | import numpy as np 5 | import glob 6 | import xml.etree.ElementTree as ET 7 | 8 | visual = False 9 | color_bar = np.random.randint(0, 255, (90, 3)) 10 | 11 | VID_base_path = './ILSVRC2015' 12 | ann_base_path = join(VID_base_path, 'Annotations/VID/train/') 13 | img_base_path = join(VID_base_path, 'Data/VID/train/') 14 | sub_sets = sorted({'a', 'b', 'c', 'd', 'e'}) 15 | for sub_set in sub_sets: 16 | sub_set_base_path = join(ann_base_path, sub_set) 17 | videos = sorted(listdir(sub_set_base_path)) 18 | for vi, video in enumerate(videos): 19 | print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos))) 20 | 21 | video_base_path = join(sub_set_base_path, video) 22 | xmls = sorted(glob.glob(join(video_base_path, '*.xml'))) 23 | for xml in xmls: 24 | f = dict() 25 | xmltree = ET.parse(xml) 26 | size = xmltree.findall('size')[0] 27 | frame_sz = [int(it.text) for it in size] 28 | objects = xmltree.findall('object') 29 | if visual: 30 | im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data')) 31 | for object_iter in objects: 32 | trackid = int(object_iter.find('trackid').text) 33 | bndbox = object_iter.find('bndbox') 34 | bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), 35 | int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)] 36 | if visual: 37 | pt1 = (int(bbox[0]), int(bbox[1])) 38 | pt2 = (int(bbox[2]), int(bbox[3])) 39 | cv2.rectangle(im, pt1, pt2, color_bar[trackid], 3) 40 | if visual: 41 | cv2.imshow('img', im) 42 | cv2.waitKey(1) 43 | 44 | print('done!') 45 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/yt_bb/checknum.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import glob 3 | 4 | col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name', 5 | 'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax'] 6 | 7 | sets = ['yt_bb_detection_validation', 'yt_bb_detection_train'] 8 | 9 | for subset in sets: 10 | df = pd.DataFrame.from_csv('./'+ subset +'.csv', header=None, index_col=False) 11 | df.columns = col_names 12 | vids = sorted(df['youtube_id'].unique()) 13 | n_vids = len(vids) 14 | print('Total video in {}.csv is {:d}'.format(subset, n_vids)) 15 | 16 | frame_download = glob.glob('./{}/*/*.jpg'.format(subset)) 17 | frame_download = [frame.split('/')[-1] for frame in frame_download] 18 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 19 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 20 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 21 | frame_download = sorted(set(frame_download)) 22 | # print(frame_download) 23 | print('Total downloaded in {} is {:d}'.format(subset, len(frame_download))) 24 | 25 | 26 | print('done') 27 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/yt_bb/gen_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import unicode_literals 5 | import json 6 | from os.path import join, exists 7 | import pandas as pd 8 | 9 | # The data sets to be downloaded 10 | d_sets = ['yt_bb_detection_validation', 'yt_bb_detection_train'] 11 | 12 | # Column names for detection CSV files 13 | col_names = ['youtube_id', 'timestamp_ms','class_id','class_name', 14 | 'object_id','object_presence','xmin','xmax','ymin','ymax'] 15 | 16 | instanc_size = 511 17 | crop_path = './crop{:d}'.format(instanc_size) 18 | 19 | 20 | def parse_and_sched(dl_dir='.'): 21 | # For each of the two datasets 22 | js = {} 23 | for d_set in d_sets: 24 | 25 | # Make the directory for this dataset 26 | d_set_dir = dl_dir+'/'+d_set+'/' 27 | 28 | # Parse csv data using pandas 29 | print (d_set+': Parsing annotations into clip data...') 30 | df = pd.DataFrame.from_csv(d_set+'.csv', header=None, index_col=False) 31 | df.columns = col_names 32 | 33 | # Get list of unique video files 34 | vids = df['youtube_id'].unique() 35 | 36 | for vid in vids: 37 | data = df[df['youtube_id']==vid] 38 | for index, row in data.iterrows(): 39 | youtube_id, timestamp_ms, class_id, class_name, \ 40 | object_id, object_presence, x1, x2, y1, y2 = row 41 | 42 | if object_presence == 'absent': 43 | continue 44 | 45 | if x1 < 0 or x2 < 0 or y1 < 0 or y2 < 0 or y2 < y1 or x2 < x1: 46 | continue 47 | 48 | bbox = [x1, y1, x2, y2] 49 | frame = '%06d' % (int(timestamp_ms) / 1000) 50 | obj = '%02d' % (int(object_id)) 51 | video = join(d_set_dir + str(class_id), youtube_id) 52 | 53 | if not exists(join(crop_path, video, '{}.{}.x.jpg'.format(frame, obj))): 54 | continue 55 | 56 | if video not in js: 57 | js[video] = {} 58 | if obj not in js[video]: 59 | js[video][obj] = {} 60 | js[video][obj][frame] = bbox 61 | 62 | if 'yt_bb_detection_train' == d_set: 63 | json.dump(js, open('train.json', 'w'), indent=4, sort_keys=True) 64 | else: 65 | json.dump(js, open('val.json', 'w'), indent=4, sort_keys=True) 66 | js = {} 67 | print(d_set+': All videos downloaded' ) 68 | 69 | 70 | if __name__ == '__main__': 71 | parse_and_sched() 72 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/yt_bb/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing Youtube-bb(YouTube-BoundingBoxes Dataset) 2 | 3 | ### Download raw label 4 | 5 | ````shell 6 | wget https://research.google.com/youtube-bb/yt_bb_detection_train.csv.gz 7 | wget https://research.google.com/youtube-bb/yt_bb_detection_validation.csv.gz 8 | 9 | gzip -d ./yt_bb_detection_train.csv.gz 10 | gzip -d ./yt_bb_detection_validation.csv.gz 11 | ```` 12 | 13 | ### Download raw image by `youtube-bb-utility`(spend long time, 400GB) 14 | 15 | ````shell 16 | git clone https://github.com/mehdi-shiba/youtube-bb-utility.git 17 | cd youtube-bb-utility 18 | pip install -r requirements.txt 19 | # python download_detection.py [VIDEO_DIR] [NUM_THREADS] 20 | python download_detection.py ../ 12 21 | cd .. 22 | ```` 23 | 24 | ### Crop & Generate data info (1 DAY) 25 | 26 | ````shell 27 | python par_crop.py 28 | python gen_json.py 29 | ```` 30 | -------------------------------------------------------------------------------- /SiamAPN++/training_dataset/yt_bb/visual.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import pandas as pd 3 | import numpy as np 4 | import cv2 5 | 6 | visual = True 7 | 8 | col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name', 9 | 'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax'] 10 | 11 | df = pd.DataFrame.from_csv('yt_bb_detection_validation.csv', header=None, index_col=False) 12 | df.columns = col_names 13 | frame_num = len(df['youtube_id']) 14 | 15 | img_path = glob.glob('/mnt/qwang/youtubebb/frames/val*/*/*.jpg') 16 | d = {key.split('/')[-1]: value for (value, key) in enumerate(img_path)} 17 | 18 | for n in range(frame_num): 19 | if df['object_presence'][n]: 20 | frame_name = df['youtube_id'][n] + '_' + str(df['timestamp_ms'][n]) + '_' + \ 21 | str(df['class_id'][n]) + '_' + str(df['object_id'][n]) + '.jpg' 22 | bbox = np.array([df['xmin'][n],df['ymin'][n],df['xmax'][n],df['ymax'][n]]) 23 | if frame_name in d.keys(): 24 | frame_path = img_path[d[frame_name]] 25 | if visual: 26 | im = cv2.imread(frame_path) 27 | h, w, _ = im.shape 28 | pt1 = (int(bbox[0]*w), int(bbox[1]*h)) 29 | pt2 = (int(bbox[2]*w), int(bbox[3]*h)) 30 | cv2.rectangle(im, pt1, pt2, (0, 255, 0), 2) 31 | cv2.imshow('img', im) 32 | cv2.waitKey(100) 33 | else: 34 | print('no image: {}'.format(frame_name)) 35 | pass 36 | else: 37 | pass 38 | 39 | print('done') 40 | 41 | -------------------------------------------------------------------------------- /SiamAPN/experiments/config.yaml: -------------------------------------------------------------------------------- 1 | META_ARC: "SiamAPN_alexnet" 2 | 3 | BACKBONE: 4 | TYPE: "alexnet" 5 | PRETRAINED: 'alexnet-bn.pth' 6 | TRAIN_LAYERS: ['layer3','layer4','layer5'] 7 | TRAIN_EPOCH: 10 8 | LAYERS_LR: 0.1 9 | 10 | TRACK: 11 | TYPE: 'SiamAPNtracker' 12 | EXEMPLAR_SIZE: 127 13 | INSTANCE_SIZE: 287 14 | CONTEXT_AMOUNT: 0.5 15 | STRIDE: 8 16 | PENALTY_K: 0.08 17 | LR: 0.302 18 | w1: 1.18 19 | w2: 1.0 20 | w3: 1.0 21 | 22 | TRAIN: 23 | EPOCH: 50 24 | START_EPOCH: 0 25 | BATCH_SIZE: 124 26 | NUM_GPU: 2 27 | BASE_LR: 0.005 28 | RESUME: '' 29 | WEIGHT_DECAY : 0.0001 30 | PRETRAINED: '' 31 | OUTPUT_SIZE: 21 32 | NUM_WORKERS: 8 33 | LOC_WEIGHT: 1.0 34 | CLS_WEIGHT: 1.0 35 | SHAPE_WEIGHT: 1.0 36 | w1: 1.2 37 | w2: 1.0 38 | w3: 1.0 39 | w4: 1.0 40 | w5: 1.0 41 | POS_NUM : 16 42 | TOTAL_NUM : 64 43 | NEG_NUM : 16 44 | LARGER: 1.0 45 | range : 1.0 46 | LR: 47 | TYPE: 'log' 48 | KWARGS: 49 | start_lr: 0.01 50 | end_lr: 0.0005 51 | 52 | LR_WARMUP: 53 | TYPE: 'step' 54 | EPOCH: 5 55 | KWARGS: 56 | start_lr: 0.005 57 | end_lr: 0.01 58 | step: 1 59 | 60 | DATASET: 61 | NAMES: 62 | - 'VID' 63 | - 'COCO' 64 | - 'GOT' 65 | - 'YOUTUBEBB' 66 | 67 | 68 | TEMPLATE: 69 | SHIFT: 4 70 | SCALE: 0.05 71 | BLUR: 0.0 72 | FLIP: 0.0 73 | COLOR: 1.0 74 | 75 | SEARCH: 76 | SHIFT: 64 77 | SCALE: 0.18 78 | BLUR: 0.2 79 | FLIP: 0.0 80 | COLOR: 1.0 81 | 82 | NEG: 0.05 83 | GRAY: 0.0 84 | -------------------------------------------------------------------------------- /SiamAPN/pretrained_models/alexnet-bn.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pretrained_models/alexnet-bn.pth -------------------------------------------------------------------------------- /SiamAPN/pysot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/core/__pycache__/config_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/core/__pycache__/config_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/anchortarget_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/anchortarget_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/augmentation.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/augmentation.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/dataset.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/dataset_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/dataset_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/datasets/__pycache__/dataset_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/datasets/__pycache__/dataset_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/loss_car.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/loss_car.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/model_builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/model_builder.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/model_builder_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/model_builder_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/model_builder_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/model_builder_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/model_builder_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/model_builder_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/model_builder_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/model_builder_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/newalexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/newalexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/newbackbone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/newbackbone.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/utile.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/utile.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/utile_adapn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/utile_adapn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/utile_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/utile_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/utile_apn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/utile_apn.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/__pycache__/utile_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/__pycache__/utile_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/alexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/alexnet.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/alexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/alexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/mobile_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/mobile_v2.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/mobile_v2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/mobile_v2.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/newalexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/newalexnet.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/newalexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/newalexnet.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/__pycache__/resnet_atrous.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/models/backbone/__pycache__/resnet_atrous.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/models/backbone/alexnet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class AlexNet(nn.Module): 5 | configs = [3, 96, 256, 384, 384, 256] 6 | 7 | def __init__(self, width_mult=1): 8 | configs = list(map(lambda x: 3 if x == 3 else 9 | int(x*width_mult), AlexNet.configs)) 10 | super(AlexNet, self).__init__() 11 | self.layer1 = nn.Sequential( 12 | nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2), 13 | nn.BatchNorm2d(configs[1]), 14 | nn.MaxPool2d(kernel_size=3, stride=2), 15 | nn.ReLU(inplace=True), 16 | ) 17 | self.layer2 = nn.Sequential( 18 | nn.Conv2d(configs[1], configs[2], kernel_size=5), 19 | nn.BatchNorm2d(configs[2]), 20 | nn.MaxPool2d(kernel_size=3, stride=2), 21 | nn.ReLU(inplace=True), 22 | ) 23 | self.layer3 = nn.Sequential( 24 | nn.Conv2d(configs[2], configs[3], kernel_size=3), 25 | nn.BatchNorm2d(configs[3]), 26 | nn.ReLU(inplace=True), 27 | ) 28 | self.layer4 = nn.Sequential( 29 | nn.Conv2d(configs[3], configs[4], kernel_size=3), 30 | nn.BatchNorm2d(configs[4]), 31 | nn.ReLU(inplace=True), 32 | ) 33 | 34 | self.layer5 = nn.Sequential( 35 | nn.Conv2d(configs[4], configs[5], kernel_size=3), 36 | nn.BatchNorm2d(configs[5]), 37 | ) 38 | self.feature_size = configs[5] 39 | for param in self.layer1.parameters(): 40 | param.requires_grad = False 41 | for param in self.layer2.parameters(): 42 | param.requires_grad = False 43 | 44 | def forward(self, x): 45 | x = self.layer1(x) 46 | x = self.layer2(x) 47 | x = self.layer3(x) 48 | x1 = self.layer4(x) 49 | x = self.layer5(x1) 50 | return x1,x 51 | 52 | -------------------------------------------------------------------------------- /SiamAPN/pysot/models/init_weight.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def init_weights(model): 5 | for m in model.modules(): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight.data, 8 | mode='fan_out', 9 | nonlinearity='relu') 10 | elif isinstance(m, nn.BatchNorm2d): 11 | m.weight.data.fill_(1) 12 | m.bias.data.zero_() 13 | -------------------------------------------------------------------------------- /SiamAPN/pysot/models/loss_apn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | from torch import nn 8 | 9 | import torch 10 | import torch.nn.functional as F 11 | 12 | 13 | def get_cls_loss(pred, label, select): 14 | if len(select.size()) == 0 or \ 15 | select.size() == torch.Size([0]): 16 | return 0 17 | pred = torch.index_select(pred, 0, select) 18 | label = torch.index_select(label, 0, select) 19 | label=label.long() 20 | return F.nll_loss(pred, label) 21 | 22 | 23 | def select_cross_entropy_loss(pred, label): 24 | pred = pred.view(-1, 2) 25 | label = label.view(-1) 26 | pos = label.data.eq(1).nonzero(as_tuple =False).squeeze().cuda() 27 | neg = label.data.eq(0).nonzero(as_tuple =False).squeeze().cuda() 28 | loss_pos = get_cls_loss(pred, label, pos) 29 | loss_neg = get_cls_loss(pred, label, neg) 30 | return loss_pos * 0.5 + loss_neg * 0.5 31 | 32 | def l1loss(pre,label,weight): 33 | loss=(torch.abs((pre-label))*weight).sum()/(weight).sum() 34 | return loss 35 | 36 | def weight_l1_loss(pred_loc, label_loc, loss_weight): 37 | b, _, sh, sw = pred_loc.size() 38 | pred_loc = pred_loc.view(b, 4, -1, sh, sw) 39 | diff = (pred_loc - label_loc).abs() 40 | diff = diff.sum(dim=1).view(b, -1, sh, sw) 41 | loss = diff * loss_weight 42 | return loss.sum().div(b) 43 | 44 | class IOULoss(nn.Module): 45 | def forward(self, pred, target, weight=None): 46 | 47 | pred_left = pred[:,:, 0] 48 | pred_top = pred[:,:, 1] 49 | pred_right = pred[:,:, 2] 50 | pred_bottom = pred[:,:, 3] 51 | 52 | target_left = target[:,:, 0] 53 | target_top = target[:,:, 1] 54 | target_right = target[:,:, 2] 55 | target_bottom = target[:,:, 3] 56 | 57 | target_aera = (target_right-target_left ) * \ 58 | (target_bottom-target_top) 59 | pred_aera = (pred_right-pred_left ) * \ 60 | (pred_bottom-pred_top) 61 | 62 | w_intersect = torch.min(pred_right, target_right)-torch.max(pred_left, target_left) 63 | w_intersect=w_intersect.clamp(min=0) 64 | h_intersect = torch.min(pred_bottom, target_bottom) -torch.max(pred_top, target_top) 65 | h_intersect=h_intersect.clamp(min=0) 66 | area_intersect = w_intersect * h_intersect 67 | area_union = target_aera + pred_aera - area_intersect 68 | ious=((area_intersect ) / (area_union +1e-6)).clamp(min=0)+1e-6 69 | 70 | losses = -torch.log(ious) 71 | weight=weight.view(losses.size()) 72 | if weight.sum()>0: 73 | 74 | return (losses * weight).sum() / (weight.sum()+1e-6) 75 | else: 76 | return (losses *weight).sum() 77 | 78 | -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/adsiamapn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/base_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/base_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/base_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/base_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/dsiamrpn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/dsiamrpn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/siamapn_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/siamapn_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/__pycache__/siamapn_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/tracker/__pycache__/siamapn_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/tracker/base_tracker.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import cv2 9 | import numpy as np 10 | import torch 11 | 12 | from pysot.core.config_apn import cfg 13 | 14 | 15 | class BaseTracker(object): 16 | """ Base tracker of single objec tracking 17 | """ 18 | def init(self, img, bbox): 19 | """ 20 | args: 21 | img(np.ndarray): BGR image 22 | bbox(list): [x, y, width, height] 23 | x, y need to be 0-based 24 | """ 25 | raise NotImplementedError 26 | 27 | def track(self, img): 28 | """ 29 | args: 30 | img(np.ndarray): BGR image 31 | return: 32 | bbox(list):[x, y, width, height] 33 | """ 34 | raise NotImplementedError 35 | 36 | 37 | class SiameseTracker(BaseTracker): 38 | def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): 39 | """ 40 | args: 41 | im: bgr based image 42 | pos: center position 43 | model_sz: exemplar size 44 | s_z: original size 45 | avg_chans: channel average 46 | """ 47 | if isinstance(pos, float): 48 | pos = [pos, pos] 49 | sz = original_sz 50 | im_sz = im.shape 51 | c = (original_sz + 1) / 2 52 | # context_xmin = round(pos[0] - c) # py2 and py3 round 53 | context_xmin = np.floor(pos[0] - c + 0.5) 54 | context_xmax = context_xmin + sz - 1 55 | # context_ymin = round(pos[1] - c) 56 | context_ymin = np.floor(pos[1] - c + 0.5) 57 | context_ymax = context_ymin + sz - 1 58 | left_pad = int(max(0., -context_xmin)) 59 | top_pad = int(max(0., -context_ymin)) 60 | right_pad = int(max(0., context_xmax - im_sz[1] + 1)) 61 | bottom_pad = int(max(0., context_ymax - im_sz[0] + 1)) 62 | 63 | context_xmin = context_xmin + left_pad 64 | context_xmax = context_xmax + left_pad 65 | context_ymin = context_ymin + top_pad 66 | context_ymax = context_ymax + top_pad 67 | 68 | r, c, k = im.shape 69 | if any([top_pad, bottom_pad, left_pad, right_pad]): 70 | size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k) 71 | te_im = np.zeros(size, np.uint8) 72 | te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im 73 | if top_pad: 74 | te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans 75 | if bottom_pad: 76 | te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans 77 | if left_pad: 78 | te_im[:, 0:left_pad, :] = avg_chans 79 | if right_pad: 80 | te_im[:, c + left_pad:, :] = avg_chans 81 | im_patch = te_im[int(context_ymin):int(context_ymax + 1), 82 | int(context_xmin):int(context_xmax + 1), :] 83 | else: 84 | im_patch = im[int(context_ymin):int(context_ymax + 1), 85 | int(context_xmin):int(context_xmax + 1), :] 86 | 87 | if not np.array_equal(model_sz, original_sz): 88 | im_patch = cv2.resize(im_patch, (model_sz, model_sz)) 89 | im_patch = im_patch.transpose(2, 0, 1) 90 | im_patch = im_patch[np.newaxis, :, :, :] 91 | im_patch = im_patch.astype(np.float32) 92 | im_patch = torch.from_numpy(im_patch) 93 | if cfg.CUDA: 94 | im_patch = im_patch.cuda() 95 | return im_patch 96 | -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__init__.py -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/average_meter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/average_meter.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/bbox.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/bbox.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/bbox.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/bbox.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/distributed.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/distributed.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/location_grid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/location_grid.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/log_helper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/log_helper.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/lr_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/lr_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/lr_scheduler_adapn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/lr_scheduler_adapn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/lr_scheduler_apn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/lr_scheduler_apn.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/misc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/misc.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/model_load.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/model_load.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/model_load.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/model_load.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/__pycache__/xcorr.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/pysot/utils/__pycache__/xcorr.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/average_meter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | 9 | class Meter(object): 10 | def __init__(self, name, val, avg): 11 | self.name = name 12 | self.val = val 13 | self.avg = avg 14 | 15 | def __repr__(self): 16 | return "{name}: {val:.6f} ({avg:.6f})".format( 17 | name=self.name, val=self.val, avg=self.avg 18 | ) 19 | 20 | def __format__(self, *tuples, **kwargs): 21 | return self.__repr__() 22 | 23 | 24 | class AverageMeter: 25 | """Computes and stores the average and current value""" 26 | def __init__(self, num=100): 27 | self.num = num 28 | self.reset() 29 | 30 | def reset(self): 31 | self.val = {} 32 | self.sum = {} 33 | self.count = {} 34 | self.history = {} 35 | 36 | def update(self, batch=1, **kwargs): 37 | val = {} 38 | for k in kwargs: 39 | val[k] = kwargs[k] / float(batch) 40 | self.val.update(val) 41 | for k in kwargs: 42 | if k not in self.sum: 43 | self.sum[k] = 0 44 | self.count[k] = 0 45 | self.history[k] = [] 46 | self.sum[k] += kwargs[k] 47 | self.count[k] += batch 48 | for _ in range(batch): 49 | self.history[k].append(val[k]) 50 | 51 | if self.num <= 0: 52 | # < 0, average all 53 | self.history[k] = [] 54 | 55 | # == 0: no average 56 | if self.num == 0: 57 | self.sum[k] = self.val[k] 58 | self.count[k] = 1 59 | 60 | elif len(self.history[k]) > self.num: 61 | pop_num = len(self.history[k]) - self.num 62 | for _ in range(pop_num): 63 | self.sum[k] -= self.history[k][0] 64 | del self.history[k][0] 65 | self.count[k] -= 1 66 | 67 | def __repr__(self): 68 | s = '' 69 | for k in self.sum: 70 | s += self.format_str(k) 71 | return s 72 | 73 | def format_str(self, attr): 74 | return "{name}: {val:.6f} ({avg:.6f}) ".format( 75 | name=attr, 76 | val=float(self.val[attr]), 77 | avg=float(self.sum[attr]) / self.count[attr]) 78 | 79 | def __getattr__(self, attr): 80 | if attr in self.__dict__: 81 | return super(AverageMeter, self).__getattr__(attr) 82 | if attr not in self.sum: 83 | print("invalid key '{}'".format(attr)) 84 | return Meter(attr, 0, 0) 85 | return Meter(attr, self.val[attr], self.avg(attr)) 86 | 87 | def avg(self, attr): 88 | return float(self.sum[attr]) / self.count[attr] 89 | 90 | 91 | if __name__ == '__main__': 92 | avg1 = AverageMeter(10) 93 | avg2 = AverageMeter(0) 94 | avg3 = AverageMeter(-1) 95 | 96 | for i in range(20): 97 | avg1.update(s=i) 98 | avg2.update(s=i) 99 | avg3.update(s=i) 100 | 101 | print('iter {}'.format(i)) 102 | print(avg1.s) 103 | print(avg2.s) 104 | print(avg3.s) 105 | -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/location_grid.py: -------------------------------------------------------------------------------- 1 | import torch 2 | def compute_locations(features,stride): 3 | h, w = features.size()[-2:] 4 | locations_per_level = compute_locations_per_level( 5 | h, w, stride, 6 | features.device 7 | ) 8 | return locations_per_level 9 | 10 | 11 | def compute_locations_per_level(h, w, stride, device): 12 | shifts_x = torch.arange( 13 | 0, w * stride, step=stride, 14 | dtype=torch.float32, device=device 15 | ) 16 | shifts_y = torch.arange( 17 | 0, h * stride, step=stride, 18 | dtype=torch.float32, device=device 19 | ) 20 | shift_y, shift_x = torch.meshgrid((shifts_y, shifts_x)) 21 | shift_x = shift_x.reshape(-1) 22 | shift_y = shift_y.reshape(-1) 23 | # locations = torch.stack((shift_x, shift_y), dim=1) + stride + 3*stride # (size_z-1)/2*size_z 28 24 | # locations = torch.stack((shift_x, shift_y), dim=1) + stride 25 | locations = torch.stack((shift_x, shift_y), dim=1) + 32 #alex:48 // 32 26 | return locations 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import os 9 | import numpy as np 10 | import torch 11 | 12 | from colorama import Fore, Style 13 | 14 | 15 | __all__ = ['commit', 'describe'] 16 | 17 | 18 | def _exec(cmd): 19 | f = os.popen(cmd, 'r', 1) 20 | return f.read().strip() 21 | 22 | 23 | def _bold(s): 24 | return "\033[1m%s\033[0m" % s 25 | 26 | 27 | def _color(s): 28 | # return f'{Fore.RED}{s}{Style.RESET_ALL}' 29 | return "{}{}{}".format(Fore.RED,s,Style.RESET_ALL) 30 | 31 | 32 | def _describe(model, lines=None, spaces=0): 33 | head = " " * spaces 34 | for name, p in model.named_parameters(): 35 | if '.' in name: 36 | continue 37 | if p.requires_grad: 38 | name = _color(name) 39 | line = "{head}- {name}".format(head=head, name=name) 40 | lines.append(line) 41 | 42 | for name, m in model.named_children(): 43 | space_num = len(name) + spaces + 1 44 | if m.training: 45 | name = _color(name) 46 | line = "{head}.{name} ({type})".format( 47 | head=head, 48 | name=name, 49 | type=m.__class__.__name__) 50 | lines.append(line) 51 | _describe(m, lines, space_num) 52 | 53 | 54 | def commit(): 55 | root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) 56 | cmd = "cd {}; git log | head -n1 | awk '{{print $2}}'".format(root) 57 | commit = _exec(cmd) 58 | cmd = "cd {}; git log --oneline | head -n1".format(root) 59 | commit_log = _exec(cmd) 60 | return "commit : {}\n log : {}".format(commit, commit_log) 61 | 62 | 63 | def describe(net, name=None): 64 | num = 0 65 | lines = [] 66 | if name is not None: 67 | lines.append(name) 68 | num = len(name) 69 | _describe(net, lines, num) 70 | return "\n".join(lines) 71 | 72 | 73 | def bbox_clip(x, min_value, max_value): 74 | new_x = max(min_value, min(x, max_value)) 75 | return new_x 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/model_load.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import logging 9 | 10 | import torch 11 | 12 | 13 | logger = logging.getLogger('global') 14 | 15 | 16 | def check_keys(model, pretrained_state_dict): 17 | ckpt_keys = set(pretrained_state_dict.keys()) 18 | model_keys = set(model.state_dict().keys()) 19 | used_pretrained_keys = model_keys & ckpt_keys 20 | unused_pretrained_keys = ckpt_keys - model_keys 21 | missing_keys = model_keys - ckpt_keys 22 | # filter 'num_batches_tracked' 23 | missing_keys = [x for x in missing_keys 24 | if not x.endswith('num_batches_tracked')] 25 | if len(missing_keys) > 0: 26 | logger.info('[Warning] missing keys: {}'.format(missing_keys)) 27 | logger.info('missing keys:{}'.format(len(missing_keys))) 28 | if len(unused_pretrained_keys) > 0: 29 | logger.info('[Warning] unused_pretrained_keys: {}'.format( 30 | unused_pretrained_keys)) 31 | logger.info('unused checkpoint keys:{}'.format( 32 | len(unused_pretrained_keys))) 33 | logger.info('used keys:{}'.format(len(used_pretrained_keys))) 34 | assert len(used_pretrained_keys) > 0, \ 35 | 'load NONE from pretrained checkpoint' 36 | return True 37 | 38 | 39 | def remove_prefix(state_dict, prefix): 40 | ''' Old style model is stored with all names of parameters 41 | share common prefix 'module.' ''' 42 | logger.info('remove prefix \'{}\''.format(prefix)) 43 | f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x 44 | return {f(key): value for key, value in state_dict.items()} 45 | 46 | 47 | def load_pretrain(model, pretrained_path): 48 | logger.info('load pretrained model from {}'.format(pretrained_path)) 49 | device = torch.cuda.current_device() 50 | pretrained_dict = torch.load(pretrained_path, 51 | map_location=lambda storage, loc: storage.cuda(device)) 52 | if "state_dict" in pretrained_dict.keys(): 53 | pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 54 | 'module.') 55 | else: 56 | pretrained_dict = remove_prefix(pretrained_dict, 'module.') 57 | 58 | try: 59 | check_keys(model, pretrained_dict) 60 | except: 61 | logger.info('[Warning]: using pretrain as features.\ 62 | Adding "features." as prefix') 63 | new_dict = {} 64 | for k, v in pretrained_dict.items(): 65 | k = 'features.' + k 66 | new_dict[k] = v 67 | pretrained_dict = new_dict 68 | check_keys(model, pretrained_dict) 69 | model.load_state_dict(pretrained_dict, strict=False) 70 | return model 71 | 72 | 73 | def restore_from(model, optimizer, ckpt_path): 74 | device = torch.cuda.current_device() 75 | ckpt = torch.load(ckpt_path, 76 | map_location=lambda storage, loc: storage.cuda(device)) 77 | epoch = ckpt['epoch'] 78 | 79 | ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.') 80 | check_keys(model, ckpt_model_dict) 81 | model.load_state_dict(ckpt_model_dict, strict=False) 82 | 83 | check_keys(optimizer, ckpt['optimizer']) 84 | optimizer.load_state_dict(ckpt['optimizer']) 85 | return model, optimizer, epoch 86 | -------------------------------------------------------------------------------- /SiamAPN/pysot/utils/xcorr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | 12 | def xcorr_slow(x, kernel): 13 | """for loop to calculate cross correlation, slow version 14 | """ 15 | batch = x.size()[0] 16 | out = [] 17 | for i in range(batch): 18 | px = x[i] 19 | pk = kernel[i] 20 | px = px.view(1, px.size()[0], px.size()[1], px.size()[2]) 21 | pk = pk.view(-1, px.size()[1], pk.size()[1], pk.size()[2]) 22 | po = F.conv2d(px, pk) 23 | out.append(po) 24 | out = torch.cat(out, 0) 25 | return out 26 | 27 | 28 | def xcorr_fast(x, kernel): 29 | """group conv2d to calculate cross correlation, fast version 30 | """ 31 | batch = kernel.size()[0] 32 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 33 | px = x.view(1, -1, x.size()[2], x.size()[3]) 34 | po = F.conv2d(px, pk, groups=batch) 35 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 36 | return po 37 | 38 | 39 | def xcorr_depthwise(x, kernel): 40 | """depthwise cross correlation 41 | """ 42 | batch = kernel.size(0) 43 | channel = kernel.size(1) 44 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 45 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 46 | out = F.conv2d(x, kernel, groups=batch*channel) 47 | out = out.view(batch, channel, out.size(2), out.size(3)) 48 | return out 49 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/__init__.py -------------------------------------------------------------------------------- /SiamAPN/toolkit/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .uav10fps import UAV10Dataset 2 | from .uav20l import UAV20Dataset 3 | from .visdrone1 import VISDRONED2018Dataset 4 | from .v4r import V4RDataset 5 | class DatasetFactory(object): 6 | @staticmethod 7 | def create_dataset(**kwargs): 8 | 9 | 10 | assert 'name' in kwargs, "should provide dataset name" 11 | name = kwargs['name'] 12 | 13 | if 'UAV123_10fps' in name: 14 | dataset = UAV10Dataset(**kwargs) 15 | elif 'UAV20l' in name: 16 | dataset = UAV20Dataset(**kwargs) 17 | elif 'VISDRONED2018' in name: 18 | dataset = VISDRONED2018Dataset(**kwargs) 19 | elif 'V4RFlight112' in name: 20 | dataset = V4RDataset(**kwargs) 21 | else: 22 | raise Exception("unknow dataset {}".format(kwargs['name'])) 23 | return dataset 24 | 25 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/dataset.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/dtb.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/dtb.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/got10k.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/got10k.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/lasot.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/lasot.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/otb.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/otb.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uav.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uav.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uav10fps.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uav10fps.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uav10fps.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uav10fps.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uav20l.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uav20l.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uav20l.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uav20l.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/uavdt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/uavdt.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/v4r.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/v4r.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/v4r.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/v4r.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/video.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/video.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/video.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/video.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/visdrone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/visdrone.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/visdrone1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/visdrone1.cpython-37.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/__pycache__/visdrone1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/datasets/__pycache__/visdrone1.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/dataset.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | class Dataset(object): 4 | def __init__(self, name, dataset_root): 5 | self.name = name 6 | self.dataset_root = dataset_root 7 | self.videos = None 8 | 9 | def __getitem__(self, idx): 10 | if isinstance(idx, str): 11 | return self.videos[idx] 12 | elif isinstance(idx, int): 13 | return self.videos[sorted(list(self.videos.keys()))[idx]] 14 | 15 | def __len__(self): 16 | return len(self.videos) 17 | 18 | def __iter__(self): 19 | keys = sorted(list(self.videos.keys())) 20 | for key in keys: 21 | yield self.videos[key] 22 | 23 | def set_tracker(self, path, tracker_names): 24 | """ 25 | Args: 26 | path: path to tracker results, 27 | tracker_names: list of tracker name 28 | """ 29 | self.tracker_path = path 30 | self.tracker_names = tracker_names 31 | # for video in tqdm(self.videos.values(), 32 | # desc='loading tacker result', ncols=100): 33 | # video.load_tracker(path, tracker_names) 34 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/uav10fps.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | from glob import glob 8 | 9 | from .dataset import Dataset 10 | from .video import Video 11 | 12 | def ca(): 13 | path='./test_dataset/UAV123_10fps' 14 | 15 | name_list=os.listdir(path+'/data_seq') 16 | name_list.sort() 17 | a=123 18 | b=[] 19 | for i in range(a): 20 | b.append(name_list[i]) 21 | c=[] 22 | 23 | for jj in range(a): 24 | imgs=path+'/data_seq/'+str(name_list[jj]) 25 | txt=path+'/anno/'+str(name_list[jj])+'.txt' 26 | bbox=[] 27 | f = open(txt) # 返回一个文件对象 28 | file= f.readlines() 29 | li=os.listdir(imgs) 30 | li.sort() 31 | for ii in range(len(file)): 32 | li[ii]=name_list[jj]+'/'+li[ii] 33 | 34 | line = file[ii].strip('\n').split(',') 35 | 36 | try: 37 | line[0]=int(line[0]) 38 | except: 39 | line[0]=float(line[0]) 40 | try: 41 | line[1]=int(line[1]) 42 | except: 43 | line[1]=float(line[1]) 44 | try: 45 | line[2]=int(line[2]) 46 | except: 47 | line[2]=float(line[2]) 48 | try: 49 | line[3]=int(line[3]) 50 | except: 51 | line[3]=float(line[3]) 52 | bbox.append(line) 53 | 54 | if len(bbox)!=len(li): 55 | print (jj) 56 | f.close() 57 | c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]}) 58 | 59 | d=dict(zip(b,c)) 60 | 61 | return d 62 | 63 | class UAVVideo(Video): 64 | """ 65 | Args: 66 | name: video name 67 | root: dataset root 68 | video_dir: video directory 69 | init_rect: init rectangle 70 | img_names: image names 71 | gt_rect: groundtruth rectangle 72 | attr: attribute of video 73 | """ 74 | def __init__(self, name, root, video_dir, init_rect, img_names, 75 | gt_rect, attr, load_img=False): 76 | super(UAVVideo, self).__init__(name, root, video_dir, 77 | init_rect, img_names, gt_rect, attr, load_img) 78 | 79 | 80 | class UAV10Dataset(Dataset): 81 | """ 82 | Args: 83 | name: dataset name, should be 'UAV123', 'UAV20L' 84 | dataset_root: dataset root 85 | load_img: wether to load all imgs 86 | """ 87 | def __init__(self, name, dataset_root, load_img=False): 88 | super(UAV10Dataset, self).__init__(name, dataset_root) 89 | meta_data = ca() 90 | 91 | # load videos 92 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 93 | self.videos = {} 94 | for video in pbar: 95 | pbar.set_postfix_str(video) 96 | self.videos[video] = UAVVideo(video, 97 | dataset_root+'/data_seq', 98 | meta_data[video]['video_dir'], 99 | meta_data[video]['init_rect'], 100 | meta_data[video]['img_names'], 101 | meta_data[video]['gt_rect'], 102 | meta_data[video]['attr']) 103 | 104 | # set attr 105 | attr = [] 106 | for x in self.videos.values(): 107 | attr += x.attr 108 | attr = set(attr) 109 | self.attr = {} 110 | self.attr['ALL'] = list(self.videos.keys()) 111 | for x in attr: 112 | self.attr[x] = [] 113 | for k, v in self.videos.items(): 114 | for attr_ in v.attr: 115 | self.attr[attr_].append(k) 116 | 117 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/uav20l.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | from glob import glob 8 | 9 | from .dataset import Dataset 10 | from .video import Video 11 | 12 | 13 | def loaddata(): 14 | 15 | path='./test_dataset/UAV123_20L' 16 | 17 | name_list=os.listdir(path+'/data_seq') 18 | name_list.sort() 19 | 20 | b=[] 21 | for i in range(len(name_list)): 22 | b.append(name_list[i]) 23 | c=[] 24 | 25 | for jj in range(len(name_list)): 26 | imgs=path+'/data_seq/'+str(name_list[jj]) 27 | txt=path+'/anno/'+str(name_list[jj])+'.txt' 28 | bbox=[] 29 | f = open(txt) # 返回一个文件对象 30 | file= f.readlines() 31 | li=os.listdir(imgs) 32 | li.sort() 33 | for ii in range(len(file)): 34 | li[ii]=name_list[jj]+'/'+li[ii] 35 | 36 | line = file[ii].strip('\n').split(',') 37 | 38 | try: 39 | line[0]=int(line[0]) 40 | except: 41 | line[0]=float(line[0]) 42 | try: 43 | line[1]=int(line[1]) 44 | except: 45 | line[1]=float(line[1]) 46 | try: 47 | line[2]=int(line[2]) 48 | except: 49 | line[2]=float(line[2]) 50 | try: 51 | line[3]=int(line[3]) 52 | except: 53 | line[3]=float(line[3]) 54 | bbox.append(line) 55 | 56 | if len(bbox)!=len(li): 57 | print (jj) 58 | f.close() 59 | c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]}) 60 | 61 | d=dict(zip(b,c)) 62 | 63 | return d 64 | 65 | class UAVVideo(Video): 66 | """ 67 | Args: 68 | name: video name 69 | root: dataset root 70 | video_dir: video directory 71 | init_rect: init rectangle 72 | img_names: image names 73 | gt_rect: groundtruth rectangle 74 | attr: attribute of video 75 | """ 76 | def __init__(self, name, root, video_dir, init_rect, img_names, 77 | gt_rect, attr, load_img=False): 78 | super(UAVVideo, self).__init__(name, root, video_dir, 79 | init_rect, img_names, gt_rect, attr, load_img) 80 | 81 | 82 | class UAV20Dataset(Dataset): 83 | """ 84 | Args: 85 | name: dataset name, should be 'UAV123', 'UAV20L' 86 | dataset_root: dataset root 87 | load_img: wether to load all imgs 88 | """ 89 | def __init__(self, name, dataset_root, load_img=False): 90 | super(UAV20Dataset, self).__init__(name, dataset_root) 91 | meta_data = loaddata() 92 | 93 | # load videos 94 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 95 | self.videos = {} 96 | for video in pbar: 97 | pbar.set_postfix_str(video) 98 | self.videos[video] = UAVVideo(video, 99 | dataset_root+'/data_seq', 100 | meta_data[video]['video_dir'], 101 | meta_data[video]['init_rect'], 102 | meta_data[video]['img_names'], 103 | meta_data[video]['gt_rect'], 104 | meta_data[video]['attr']) 105 | 106 | # set attr 107 | attr = [] 108 | for x in self.videos.values(): 109 | attr += x.attr 110 | attr = set(attr) 111 | self.attr = {} 112 | self.attr['ALL'] = list(self.videos.keys()) 113 | for x in attr: 114 | self.attr[x] = [] 115 | for k, v in self.videos.items(): 116 | for attr_ in v.attr: 117 | self.attr[attr_].append(k) 118 | 119 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/datasets/visdrone1.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from PIL import Image 6 | from tqdm import tqdm 7 | from glob import glob 8 | 9 | from .dataset import Dataset 10 | from .video import Video 11 | 12 | 13 | class UVADTVideo(Video): 14 | """ 15 | Args: 16 | name: video name 17 | root: dataset root 18 | video_dir: video directory 19 | init_rect: init rectangle 20 | img_names: image names 21 | gt_rect: groundtruth rectangle 22 | attr: attribute of video 23 | """ 24 | def __init__(self, name, root, video_dir, init_rect, img_names, 25 | gt_rect, attr, load_img=False): 26 | super(UVADTVideo, self).__init__(name, root, video_dir, 27 | init_rect, img_names, gt_rect, attr, load_img) 28 | 29 | def loaddata(): 30 | 31 | 32 | path='./test_dataset/VisDrone2018-SOT-test/' 33 | 34 | name_list=os.listdir(path+'/sequences') 35 | name_list.sort() 36 | 37 | b=[] 38 | for i in range(len(name_list)): 39 | b.append(name_list[i]) 40 | c=[] 41 | 42 | for jj in range(len(name_list)): 43 | imgs=path+'/sequences/'+str(name_list[jj]) 44 | txt=path+'/annotations/'+str(name_list[jj])+'.txt' 45 | bbox=[] 46 | f = open(txt) # 返回一个文件对象 47 | file= f.readlines() 48 | li=os.listdir(imgs) 49 | li.sort() 50 | for ii in range(len(file)): 51 | li[ii]=name_list[jj]+'/'+li[ii] 52 | 53 | line = file[ii].strip('\n').split(',') 54 | 55 | try: 56 | line[0]=int(line[0]) 57 | except: 58 | line[0]=float(line[0]) 59 | try: 60 | line[1]=int(line[1]) 61 | except: 62 | line[1]=float(line[1]) 63 | try: 64 | line[2]=int(line[2]) 65 | except: 66 | line[2]=float(line[2]) 67 | try: 68 | line[3]=int(line[3]) 69 | except: 70 | line[3]=float(line[3]) 71 | bbox.append(line) 72 | 73 | if len(bbox)!=len(li): 74 | print (jj) 75 | f.close() 76 | c.append({'attr':[],'gt_rect':bbox,'img_names':li,'init_rect':bbox[0],'video_dir':name_list[jj]}) 77 | 78 | d=dict(zip(b,c)) 79 | 80 | return d 81 | class VISDRONED2018Dataset(Dataset): 82 | """ 83 | Args: 84 | name: dataset name, should be 'OTB100', 'CVPR13', 'OTB50' 85 | dataset_root: dataset root 86 | load_img: wether to load all imgs 87 | """ 88 | def __init__(self, name, dataset_root, load_img=False): 89 | super(VISDRONED2018Dataset, self).__init__(name, dataset_root) 90 | 91 | meta_data=loaddata() 92 | # load videos 93 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 94 | self.videos = {} 95 | for video in pbar: 96 | pbar.set_postfix_str(video) 97 | self.videos[video] = UVADTVideo(video, 98 | dataset_root+'/sequences', 99 | meta_data[video]['video_dir'], 100 | meta_data[video]['init_rect'], 101 | meta_data[video]['img_names'], 102 | meta_data[video]['gt_rect'], 103 | meta_data[video]['attr'], 104 | load_img) 105 | 106 | # set attr 107 | attr = [] 108 | for x in self.videos.values(): 109 | attr += x.attr 110 | attr = set(attr) 111 | self.attr = {} 112 | self.attr['ALL'] = list(self.videos.keys()) 113 | for x in attr: 114 | self.attr[x] = [] 115 | for k, v in self.videos.items(): 116 | for attr_ in v.attr: 117 | self.attr[attr_].append(k) 118 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .ope_benchmark import OPEBenchmark 2 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/evaluation/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/evaluation/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/evaluation/__pycache__/ope_benchmark.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/evaluation/__pycache__/ope_benchmark.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/utils/__pycache__/statistics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/utils/__pycache__/statistics.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/utils/statistics.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author fangyi.zhang@vipl.ict.ac.cn 3 | """ 4 | import numpy as np 5 | 6 | def overlap_ratio(rect1, rect2): 7 | '''Compute overlap ratio between two rects 8 | Args 9 | rect:2d array of N x [x,y,w,h] 10 | Return: 11 | iou 12 | ''' 13 | # if rect1.ndim==1: 14 | # rect1 = rect1[np.newaxis, :] 15 | # if rect2.ndim==1: 16 | # rect2 = rect2[np.newaxis, :] 17 | left = np.maximum(rect1[:,0], rect2[:,0]) 18 | right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2]) 19 | top = np.maximum(rect1[:,1], rect2[:,1]) 20 | bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3]) 21 | 22 | intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top) 23 | union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect 24 | iou = intersect / union 25 | iou = np.maximum(np.minimum(1, iou), 0) 26 | return iou 27 | 28 | def success_overlap(gt_bb, result_bb, n_frame): 29 | thresholds_overlap = np.arange(0, 1.05, 0.05) 30 | success = np.zeros(len(thresholds_overlap)) 31 | iou = np.ones(len(gt_bb)) * (-1) 32 | # mask = np.sum(gt_bb > 0, axis=1) == 4 #TODO check all dataset 33 | mask = np.sum(gt_bb[:, 2:] > 0, axis=1) == 2 34 | iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask]) 35 | for i in range(len(thresholds_overlap)): 36 | success[i] = np.sum(iou > thresholds_overlap[i]) / float(n_frame) 37 | return success 38 | 39 | def success_error(gt_center, result_center, thresholds, n_frame): 40 | # n_frame = len(gt_center) 41 | success = np.zeros(len(thresholds)) 42 | dist = np.ones(len(gt_center)) * (-1) 43 | mask = np.sum(gt_center > 0, axis=1) == 2 44 | dist[mask] = np.sqrt(np.sum( 45 | np.power(gt_center[mask] - result_center[mask], 2), axis=1)) 46 | for i in range(len(thresholds)): 47 | success[i] = np.sum(dist <= thresholds[i]) / float(n_frame) 48 | return success 49 | 50 | 51 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .draw_success_precision import draw_success_precision 2 | -------------------------------------------------------------------------------- /SiamAPN/toolkit/visualization/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/visualization/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/visualization/__pycache__/draw_success_precision.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/visualization/__pycache__/draw_success_precision.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/visualization/__pycache__/draw_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vision4robotics/SiamAPN/e1bbfcc81377de04fe0bfa11ab694f8a0d9f5380/SiamAPN/toolkit/visualization/__pycache__/draw_utils.cpython-38.pyc -------------------------------------------------------------------------------- /SiamAPN/toolkit/visualization/draw_utils.py: -------------------------------------------------------------------------------- 1 | 2 | COLOR = ((1, 0, 0), 3 | (0, 1, 0), 4 | (1, 0, 1), 5 | (1, 1, 0), 6 | (0 , 162/255, 232/255), 7 | (0.5, 0.5, 0.5), 8 | (0, 0, 1), 9 | (0, 1, 1), 10 | (136/255, 0 , 21/255), 11 | (255/255, 127/255, 39/255), 12 | (0, 0, 0)) 13 | 14 | LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-'] 15 | 16 | MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.'] 17 | -------------------------------------------------------------------------------- /SiamAPN/tools/demo_apn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import os 7 | import sys 8 | sys.path.append('../') 9 | 10 | import argparse 11 | import cv2 12 | import torch 13 | from glob import glob 14 | 15 | from pysot.core.config_apn import cfg 16 | from pysot.models.model_builder_apn import ModelBuilderAPN 17 | from pysot.tracker.siamapn_tracker import SiamAPNTracker 18 | from pysot.utils.model_load import load_pretrain 19 | 20 | torch.set_num_threads(1) 21 | 22 | parser = argparse.ArgumentParser(description='SiamAPN demo') 23 | parser.add_argument('--config', type=str, default='../experiments/siamapn/config.yaml', help='config file') 24 | parser.add_argument('--snapshot', type=str, default='./snapshot/general_model.pth', help='model name') 25 | parser.add_argument('--video_name', default='../test_dataset/sequence_name', type=str, help='videos or image files') 26 | args = parser.parse_args() 27 | 28 | 29 | def get_frames(video_name): 30 | if not video_name: 31 | cap = cv2.VideoCapture(0) 32 | 33 | # warmup 34 | for i in range(5): 35 | cap.read() 36 | while True: 37 | ret, frame = cap.read() 38 | if ret: 39 | yield frame 40 | else: 41 | break 42 | elif video_name.endswith('avi') or \ 43 | video_name.endswith('mp4'): 44 | cap = cv2.VideoCapture(args.video_name) 45 | while True: 46 | ret, frame = cap.read() 47 | if ret: 48 | yield frame 49 | else: 50 | break 51 | else: 52 | images = sorted(glob(os.path.join(video_name, 'img', '*.jp*'))) 53 | for img in images: 54 | frame = cv2.imread(img) 55 | yield frame 56 | 57 | 58 | def main(): 59 | # load config 60 | cfg.merge_from_file(args.config) 61 | cfg.CUDA = torch.cuda.is_available() 62 | device = torch.device('cuda' if cfg.CUDA else 'cpu') 63 | 64 | # create model 65 | model = ModelBuilderAPN() 66 | 67 | # load model 68 | model = load_pretrain(model, args.snapshot).eval().to(device) 69 | 70 | # build tracker 71 | tracker = SiamAPNTracker(model, cfg.TRACK) 72 | 73 | 74 | first_frame = True 75 | if args.video_name: 76 | video_name = args.video_name.split('/')[-1].split('.')[0] 77 | else: 78 | video_name = 'webcam' 79 | cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN) 80 | for frame in get_frames(args.video_name): 81 | if first_frame: 82 | try: 83 | init_rect = cv2.selectROI(video_name, frame, False, False) 84 | except: 85 | exit() 86 | tracker.init(frame, init_rect) 87 | first_frame = False 88 | else: 89 | outputs = tracker.track(frame) 90 | bbox = list(map(int, outputs['bbox'])) 91 | cv2.rectangle(frame, (bbox[0], bbox[1]), 92 | (bbox[0]+bbox[2], bbox[1]+bbox[3]), 93 | (0, 255, 0), 3) 94 | cv2.imshow(video_name, frame) 95 | cv2.waitKey(40) 96 | 97 | 98 | if __name__ == '__main__': 99 | main() 100 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/gen_json.py: -------------------------------------------------------------------------------- 1 | from pycocotools.coco import COCO 2 | from os.path import join 3 | import json 4 | 5 | 6 | dataDir = '.' 7 | for dataType in ['val2017', 'train2017']: 8 | dataset = dict() 9 | annFile = '{}/annotations/instances_{}.json'.format(dataDir,dataType) 10 | coco = COCO(annFile) 11 | n_imgs = len(coco.imgs) 12 | for n, img_id in enumerate(coco.imgs): 13 | print('subset: {} image id: {:04d} / {:04d}'.format(dataType, n, n_imgs)) 14 | img = coco.loadImgs(img_id)[0] 15 | annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None) 16 | anns = coco.loadAnns(annIds) 17 | video_crop_base_path = join(dataType, img['file_name'].split('/')[-1].split('.')[0]) 18 | 19 | if len(anns) > 0: 20 | dataset[video_crop_base_path] = dict() 21 | 22 | for trackid, ann in enumerate(anns): 23 | rect = ann['bbox'] 24 | c = ann['category_id'] 25 | bbox = [rect[0], rect[1], rect[0]+rect[2], rect[1]+rect[3]] 26 | if rect[2] <= 0 or rect[3] <= 0: # lead nan error in cls. 27 | continue 28 | dataset[video_crop_base_path]['{:02d}'.format(trackid)] = {'000000': bbox} 29 | 30 | print('save json (dataset), please wait 20 seconds~') 31 | json.dump(dataset, open('{}.json'.format(dataType), 'w'), indent=4, sort_keys=True) 32 | print('done!') 33 | 34 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/pycocotools/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | # install pycocotools locally 3 | python setup.py build_ext --inplace 4 | rm -rf build 5 | 6 | install: 7 | # install pycocotools to the Python site-packages 8 | python setup.py build_ext install 9 | rm -rf build 10 | clean: 11 | rm _mask.c _mask.cpython-36m-x86_64-linux-gnu.so 12 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/pycocotools/common/gason.h: -------------------------------------------------------------------------------- 1 | // https://github.com/vivkin/gason - pulled January 10, 2016 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | enum JsonTag { 9 | JSON_NUMBER = 0, 10 | JSON_STRING, 11 | JSON_ARRAY, 12 | JSON_OBJECT, 13 | JSON_TRUE, 14 | JSON_FALSE, 15 | JSON_NULL = 0xF 16 | }; 17 | 18 | struct JsonNode; 19 | 20 | #define JSON_VALUE_PAYLOAD_MASK 0x00007FFFFFFFFFFFULL 21 | #define JSON_VALUE_NAN_MASK 0x7FF8000000000000ULL 22 | #define JSON_VALUE_TAG_MASK 0xF 23 | #define JSON_VALUE_TAG_SHIFT 47 24 | 25 | union JsonValue { 26 | uint64_t ival; 27 | double fval; 28 | 29 | JsonValue(double x) 30 | : fval(x) { 31 | } 32 | JsonValue(JsonTag tag = JSON_NULL, void *payload = nullptr) { 33 | assert((uintptr_t)payload <= JSON_VALUE_PAYLOAD_MASK); 34 | ival = JSON_VALUE_NAN_MASK | ((uint64_t)tag << JSON_VALUE_TAG_SHIFT) | (uintptr_t)payload; 35 | } 36 | bool isDouble() const { 37 | return (int64_t)ival <= (int64_t)JSON_VALUE_NAN_MASK; 38 | } 39 | JsonTag getTag() const { 40 | return isDouble() ? JSON_NUMBER : JsonTag((ival >> JSON_VALUE_TAG_SHIFT) & JSON_VALUE_TAG_MASK); 41 | } 42 | uint64_t getPayload() const { 43 | assert(!isDouble()); 44 | return ival & JSON_VALUE_PAYLOAD_MASK; 45 | } 46 | double toNumber() const { 47 | assert(getTag() == JSON_NUMBER); 48 | return fval; 49 | } 50 | char *toString() const { 51 | assert(getTag() == JSON_STRING); 52 | return (char *)getPayload(); 53 | } 54 | JsonNode *toNode() const { 55 | assert(getTag() == JSON_ARRAY || getTag() == JSON_OBJECT); 56 | return (JsonNode *)getPayload(); 57 | } 58 | }; 59 | 60 | struct JsonNode { 61 | JsonValue value; 62 | JsonNode *next; 63 | char *key; 64 | }; 65 | 66 | struct JsonIterator { 67 | JsonNode *p; 68 | 69 | void operator++() { 70 | p = p->next; 71 | } 72 | bool operator!=(const JsonIterator &x) const { 73 | return p != x.p; 74 | } 75 | JsonNode *operator*() const { 76 | return p; 77 | } 78 | JsonNode *operator->() const { 79 | return p; 80 | } 81 | }; 82 | 83 | inline JsonIterator begin(JsonValue o) { 84 | return JsonIterator{o.toNode()}; 85 | } 86 | inline JsonIterator end(JsonValue) { 87 | return JsonIterator{nullptr}; 88 | } 89 | 90 | #define JSON_ERRNO_MAP(XX) \ 91 | XX(OK, "ok") \ 92 | XX(BAD_NUMBER, "bad number") \ 93 | XX(BAD_STRING, "bad string") \ 94 | XX(BAD_IDENTIFIER, "bad identifier") \ 95 | XX(STACK_OVERFLOW, "stack overflow") \ 96 | XX(STACK_UNDERFLOW, "stack underflow") \ 97 | XX(MISMATCH_BRACKET, "mismatch bracket") \ 98 | XX(UNEXPECTED_CHARACTER, "unexpected character") \ 99 | XX(UNQUOTED_KEY, "unquoted key") \ 100 | XX(BREAKING_BAD, "breaking bad") \ 101 | XX(ALLOCATION_FAILURE, "allocation failure") 102 | 103 | enum JsonErrno { 104 | #define XX(no, str) JSON_##no, 105 | JSON_ERRNO_MAP(XX) 106 | #undef XX 107 | }; 108 | 109 | const char *jsonStrError(int err); 110 | 111 | class JsonAllocator { 112 | struct Zone { 113 | Zone *next; 114 | size_t used; 115 | } *head = nullptr; 116 | 117 | public: 118 | JsonAllocator() = default; 119 | JsonAllocator(const JsonAllocator &) = delete; 120 | JsonAllocator &operator=(const JsonAllocator &) = delete; 121 | JsonAllocator(JsonAllocator &&x) : head(x.head) { 122 | x.head = nullptr; 123 | } 124 | JsonAllocator &operator=(JsonAllocator &&x) { 125 | head = x.head; 126 | x.head = nullptr; 127 | return *this; 128 | } 129 | ~JsonAllocator() { 130 | deallocate(); 131 | } 132 | void *allocate(size_t size); 133 | void deallocate(); 134 | }; 135 | 136 | int jsonParse(char *str, char **endptr, JsonValue *value, JsonAllocator &allocator); 137 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/pycocotools/common/maskApi.h: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #pragma once 8 | 9 | typedef unsigned int uint; 10 | typedef unsigned long siz; 11 | typedef unsigned char byte; 12 | typedef double* BB; 13 | typedef struct { siz h, w, m; uint *cnts; } RLE; 14 | 15 | /* Initialize/destroy RLE. */ 16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ); 17 | void rleFree( RLE *R ); 18 | 19 | /* Initialize/destroy RLE array. */ 20 | void rlesInit( RLE **R, siz n ); 21 | void rlesFree( RLE **R, siz n ); 22 | 23 | /* Encode binary masks using RLE. */ 24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n ); 25 | 26 | /* Decode binary masks encoded via RLE. */ 27 | void rleDecode( const RLE *R, byte *mask, siz n ); 28 | 29 | /* Compute union or intersection of encoded masks. */ 30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect ); 31 | 32 | /* Compute area of encoded masks. */ 33 | void rleArea( const RLE *R, siz n, uint *a ); 34 | 35 | /* Compute intersection over union between masks. */ 36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ); 37 | 38 | /* Compute non-maximum suppression between bounding masks */ 39 | void rleNms( RLE *dt, siz n, uint *keep, double thr ); 40 | 41 | /* Compute intersection over union between bounding boxes. */ 42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ); 43 | 44 | /* Compute non-maximum suppression between bounding boxes */ 45 | void bbNms( BB dt, siz n, uint *keep, double thr ); 46 | 47 | /* Get bounding boxes surrounding encoded masks. */ 48 | void rleToBbox( const RLE *R, BB bb, siz n ); 49 | 50 | /* Convert bounding boxes to encoded masks. */ 51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ); 52 | 53 | /* Convert polygon to encoded mask. */ 54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ); 55 | 56 | /* Get compressed string representation of encoded mask. */ 57 | char* rleToString( const RLE *R ); 58 | 59 | /* Convert from compressed string representation of encoded mask. */ 60 | void rleFrString( RLE *R, char *s, siz h, siz w ); 61 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/pycocotools/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Build import cythonize 3 | from distutils.extension import Extension 4 | import numpy as np 5 | 6 | # To compile and install locally run "python setup.py build_ext --inplace" 7 | # To install library to Python site-packages run "python setup.py build_ext install" 8 | 9 | ext_modules = [ 10 | Extension( 11 | '_mask', 12 | sources=['common/maskApi.c', '_mask.pyx'], 13 | include_dirs = [np.get_include(), 'common'], 14 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], 15 | ) 16 | ] 17 | 18 | setup(name='pycocotools', 19 | packages=['pycocotools'], 20 | package_dir = {'pycocotools': '.'}, 21 | version='2.0', 22 | ext_modules= 23 | cythonize(ext_modules) 24 | ) 25 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing COCO 2 | 3 | ### Download raw images and annotations 4 | 5 | ````shell 6 | wget http://images.cocodataset.org/zips/train2017.zip 7 | wget http://images.cocodataset.org/zips/val2017.zip 8 | wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip 9 | 10 | unzip ./train2017.zip 11 | unzip ./val2017.zip 12 | unzip ./annotations_trainval2017.zip 13 | cd pycocotools && make && cd .. 14 | ```` 15 | 16 | ### Crop & Generate data info (10 min) 17 | 18 | ````shell 19 | #python par_crop.py [crop_size] [num_threads] 20 | python par_crop.py 511 12 21 | python gen_json.py 22 | ```` 23 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/coco/visual.py: -------------------------------------------------------------------------------- 1 | from pycocotools.coco import COCO 2 | import cv2 3 | import numpy as np 4 | 5 | color_bar = np.random.randint(0, 255, (90, 3)) 6 | 7 | visual = True 8 | 9 | dataDir = '.' 10 | dataType = 'val2017' 11 | annFile = '{}/annotations/instances_{}.json'.format(dataDir,dataType) 12 | coco = COCO(annFile) 13 | 14 | for img_id in coco.imgs: 15 | img = coco.loadImgs(img_id)[0] 16 | annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None) 17 | anns = coco.loadAnns(annIds) 18 | im = cv2.imread('{}/{}/{}'.format(dataDir, dataType, img['file_name'])) 19 | for ann in anns: 20 | rect = ann['bbox'] 21 | c = ann['category_id'] 22 | if visual: 23 | pt1 = (int(rect[0]), int(rect[1])) 24 | pt2 = (int(rect[0]+rect[2]), int(rect[1]+rect[3])) 25 | cv2.rectangle(im, pt1, pt2, color_bar[c-1], 3) 26 | cv2.imshow('img', im) 27 | cv2.waitKey(200) 28 | print('done') 29 | 30 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/got10k/gen_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import unicode_literals 5 | import json 6 | from os.path import join, exists 7 | import os 8 | import pandas as pd 9 | 10 | dataset_path = 'data' 11 | train_sets = ['GOT-10k_Train_split_01','GOT-10k_Train_split_02','GOT-10k_Train_split_03','GOT-10k_Train_split_04', 12 | 'GOT-10k_Train_split_05','GOT-10k_Train_split_06','GOT-10k_Train_split_07','GOT-10k_Train_split_08', 13 | 'GOT-10k_Train_split_09','GOT-10k_Train_split_10','GOT-10k_Train_split_11','GOT-10k_Train_split_12', 14 | 'GOT-10k_Train_split_13','GOT-10k_Train_split_14','GOT-10k_Train_split_15','GOT-10k_Train_split_16', 15 | 'GOT-10k_Train_split_17','GOT-10k_Train_split_18','GOT-10k_Train_split_19'] 16 | val_set = ['val'] 17 | d_sets = {'videos_val':val_set,'videos_train':train_sets} 18 | # videos_val = ['MOT17-02-DPM'] 19 | # videos_train = ['MOT17-04-DPM','MOT17-05-DPM','MOT17-09-DPM','MOT17-11-DPM','MOT17-13-DPM'] 20 | # d_sets = {'videos_val':videos_val,'videos_train':videos_train} 21 | 22 | def parse_and_sched(dl_dir='.'): 23 | # For each of the two datasets 24 | js = {} 25 | for d_set in d_sets: 26 | for dataset in d_sets[d_set]: 27 | videos = os.listdir(os.path.join(dataset_path,dataset)) 28 | for video in videos: 29 | if video == 'list.txt': 30 | continue 31 | video = dataset+'/'+video 32 | gt_path = join(dataset_path, video, 'groundtruth.txt') 33 | f = open(gt_path, 'r') 34 | groundtruth = f.readlines() 35 | f.close() 36 | for idx, gt_line in enumerate(groundtruth): 37 | gt_image = gt_line.strip().split(',') 38 | frame = '%06d' % (int(idx)) 39 | obj = '%02d' % (int(0)) 40 | bbox = [int(float(gt_image[0])), int(float(gt_image[1])), 41 | int(float(gt_image[0])) + int(float(gt_image[2])), 42 | int(float(gt_image[1])) + int(float(gt_image[3]))] # xmin,ymin,xmax,ymax 43 | 44 | if video not in js: 45 | js[video] = {} 46 | if obj not in js[video]: 47 | js[video][obj] = {} 48 | js[video][obj][frame] = bbox 49 | if 'videos_val' == d_set: 50 | json.dump(js, open('val.json', 'w'), indent=4, sort_keys=True) 51 | else: 52 | json.dump(js, open('train.json', 'w'), indent=4, sort_keys=True) 53 | js = {} 54 | 55 | print(d_set+': All videos downloaded' ) 56 | 57 | 58 | if __name__ == '__main__': 59 | parse_and_sched() 60 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/got10k/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing GOT-10K 2 | A Large High-Diversity Benchmark for Generic Object Tracking in the Wild 3 | 4 | ### Prepare dataset 5 | 6 | After download the dataset, please unzip the dataset at *train_dataset/got10k* directory 7 | mkdir data 8 | unzip full_data/train_data/*.zip -d ./data 9 | ```` 10 | 11 | ### Crop & Generate data info 12 | 13 | ````shell 14 | #python par_crop.py [crop_size] [num_threads] 15 | python par_crop.py 511 12 16 | python gen_json.py 17 | ```` 18 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/vid/gen_json.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import json 4 | import numpy as np 5 | 6 | print('load json (raw vid info), please wait 20 seconds~') 7 | vid = json.load(open('vid.json', 'r')) 8 | 9 | 10 | def check_size(frame_sz, bbox): 11 | min_ratio = 0.1 12 | max_ratio = 0.75 13 | # only accept objects >10% and <75% of the total frame 14 | area_ratio = np.sqrt((bbox[2]-bbox[0])*(bbox[3]-bbox[1])/float(np.prod(frame_sz))) 15 | ok = (area_ratio > min_ratio) and (area_ratio < max_ratio) 16 | return ok 17 | 18 | 19 | def check_borders(frame_sz, bbox): 20 | dist_from_border = 0.05 * (bbox[2] - bbox[0] + bbox[3] - bbox[1])/2 21 | ok = (bbox[0] > dist_from_border) and (bbox[1] > dist_from_border) and \ 22 | ((frame_sz[0] - bbox[2]) > dist_from_border) and \ 23 | ((frame_sz[1] - bbox[3]) > dist_from_border) 24 | return ok 25 | 26 | 27 | snippets = dict() 28 | n_snippets = 0 29 | n_videos = 0 30 | for subset in vid: 31 | for video in subset: 32 | n_videos += 1 33 | frames = video['frame'] 34 | id_set = [] 35 | id_frames = [[]] * 60 # at most 60 objects 36 | for f, frame in enumerate(frames): 37 | objs = frame['objs'] 38 | frame_sz = frame['frame_sz'] 39 | for obj in objs: 40 | trackid = obj['trackid'] 41 | occluded = obj['occ'] 42 | bbox = obj['bbox'] 43 | # if occluded: 44 | # continue 45 | # 46 | # if not(check_size(frame_sz, bbox) and check_borders(frame_sz, bbox)): 47 | # continue 48 | # 49 | # if obj['c'] in ['n01674464', 'n01726692', 'n04468005', 'n02062744']: 50 | # continue 51 | 52 | if trackid not in id_set: 53 | id_set.append(trackid) 54 | id_frames[trackid] = [] 55 | id_frames[trackid].append(f) 56 | if len(id_set) > 0: 57 | snippets[video['base_path']] = dict() 58 | for selected in id_set: 59 | frame_ids = sorted(id_frames[selected]) 60 | sequences = np.split(frame_ids, np.array(np.where(np.diff(frame_ids) > 1)[0]) + 1) 61 | sequences = [s for s in sequences if len(s) > 1] # remove isolated frame. 62 | for seq in sequences: 63 | snippet = dict() 64 | for frame_id in seq: 65 | frame = frames[frame_id] 66 | for obj in frame['objs']: 67 | if obj['trackid'] == selected: 68 | o = obj 69 | continue 70 | snippet[frame['img_path'].split('.')[0]] = o['bbox'] 71 | snippets[video['base_path']]['{:02d}'.format(selected)] = snippet 72 | n_snippets += 1 73 | print('video: {:d} snippets_num: {:d}'.format(n_videos, n_snippets)) 74 | 75 | train = {k:v for (k,v) in snippets.items() if 'train' in k} 76 | val = {k:v for (k,v) in snippets.items() if 'val' in k} 77 | 78 | json.dump(train, open('train.json', 'w'), indent=4, sort_keys=True) 79 | json.dump(val, open('val.json', 'w'), indent=4, sort_keys=True) 80 | print('done!') 81 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/vid/parse_vid.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import json 4 | import glob 5 | import xml.etree.ElementTree as ET 6 | 7 | VID_base_path = './ILSVRC2015' 8 | ann_base_path = join(VID_base_path, 'Annotations/VID/train/') 9 | img_base_path = join(VID_base_path, 'Data/VID/train/') 10 | sub_sets = sorted({'a', 'b', 'c', 'd', 'e'}) 11 | 12 | vid = [] 13 | for sub_set in sub_sets: 14 | sub_set_base_path = join(ann_base_path, sub_set) 15 | videos = sorted(listdir(sub_set_base_path)) 16 | s = [] 17 | for vi, video in enumerate(videos): 18 | print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos))) 19 | v = dict() 20 | v['base_path'] = join(sub_set, video) 21 | v['frame'] = [] 22 | video_base_path = join(sub_set_base_path, video) 23 | xmls = sorted(glob.glob(join(video_base_path, '*.xml'))) 24 | for xml in xmls: 25 | f = dict() 26 | xmltree = ET.parse(xml) 27 | size = xmltree.findall('size')[0] 28 | frame_sz = [int(it.text) for it in size] 29 | objects = xmltree.findall('object') 30 | objs = [] 31 | for object_iter in objects: 32 | trackid = int(object_iter.find('trackid').text) 33 | name = (object_iter.find('name')).text 34 | bndbox = object_iter.find('bndbox') 35 | occluded = int(object_iter.find('occluded').text) 36 | o = dict() 37 | o['c'] = name 38 | o['bbox'] = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), 39 | int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)] 40 | o['trackid'] = trackid 41 | o['occ'] = occluded 42 | objs.append(o) 43 | f['frame_sz'] = frame_sz 44 | f['img_path'] = xml.split('/')[-1].replace('xml', 'JPEG') 45 | f['objs'] = objs 46 | v['frame'].append(f) 47 | s.append(v) 48 | vid.append(s) 49 | print('save json (raw vid info), please wait 1 min~') 50 | json.dump(vid, open('vid.json', 'w'), indent=4, sort_keys=True) 51 | print('done!') 52 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/vid/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing VID(Object detection from video) 2 | Large Scale Visual Recognition Challenge 2015 (ILSVRC2015) 3 | 4 | ### Download dataset (86GB) 5 | 6 | ````shell 7 | wget http://bvisionweb1.cs.unc.edu/ilsvrc2015/ILSVRC2015_VID.tar.gz 8 | tar -xzvf ./ILSVRC2015_VID.tar.gz 9 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0000 ILSVRC2015/Annotations/VID/train/a 10 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0001 ILSVRC2015/Annotations/VID/train/b 11 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0002 ILSVRC2015/Annotations/VID/train/c 12 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/train/ILSVRC2015_VID_train_0003 ILSVRC2015/Annotations/VID/train/d 13 | ln -sfb $PWD/ILSVRC2015/Annotations/VID/val ILSVRC2015/Annotations/VID/train/e 14 | 15 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0000 ILSVRC2015/Data/VID/train/a 16 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0001 ILSVRC2015/Data/VID/train/b 17 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0002 ILSVRC2015/Data/VID/train/c 18 | ln -sfb $PWD/ILSVRC2015/Data/VID/train/ILSVRC2015_VID_train_0003 ILSVRC2015/Data/VID/train/d 19 | ln -sfb $PWD/ILSVRC2015/Data/VID/val ILSVRC2015/Data/VID/train/e 20 | ```` 21 | 22 | ### Crop & Generate data info (20 min) 23 | 24 | ````shell 25 | python parse_vid.py 26 | 27 | #python par_crop.py [crop_size] [num_threads] 28 | python par_crop.py 511 12 29 | python gen_json.py 30 | ```` 31 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/vid/visual.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from os import listdir 3 | import cv2 4 | import numpy as np 5 | import glob 6 | import xml.etree.ElementTree as ET 7 | 8 | visual = False 9 | color_bar = np.random.randint(0, 255, (90, 3)) 10 | 11 | VID_base_path = './ILSVRC2015' 12 | ann_base_path = join(VID_base_path, 'Annotations/VID/train/') 13 | img_base_path = join(VID_base_path, 'Data/VID/train/') 14 | sub_sets = sorted({'a', 'b', 'c', 'd', 'e'}) 15 | for sub_set in sub_sets: 16 | sub_set_base_path = join(ann_base_path, sub_set) 17 | videos = sorted(listdir(sub_set_base_path)) 18 | for vi, video in enumerate(videos): 19 | print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos))) 20 | 21 | video_base_path = join(sub_set_base_path, video) 22 | xmls = sorted(glob.glob(join(video_base_path, '*.xml'))) 23 | for xml in xmls: 24 | f = dict() 25 | xmltree = ET.parse(xml) 26 | size = xmltree.findall('size')[0] 27 | frame_sz = [int(it.text) for it in size] 28 | objects = xmltree.findall('object') 29 | if visual: 30 | im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data')) 31 | for object_iter in objects: 32 | trackid = int(object_iter.find('trackid').text) 33 | bndbox = object_iter.find('bndbox') 34 | bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), 35 | int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)] 36 | if visual: 37 | pt1 = (int(bbox[0]), int(bbox[1])) 38 | pt2 = (int(bbox[2]), int(bbox[3])) 39 | cv2.rectangle(im, pt1, pt2, color_bar[trackid], 3) 40 | if visual: 41 | cv2.imshow('img', im) 42 | cv2.waitKey(1) 43 | 44 | print('done!') 45 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/yt_bb/checknum.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import glob 3 | 4 | col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name', 5 | 'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax'] 6 | 7 | sets = ['yt_bb_detection_validation', 'yt_bb_detection_train'] 8 | 9 | for subset in sets: 10 | df = pd.DataFrame.from_csv('./'+ subset +'.csv', header=None, index_col=False) 11 | df.columns = col_names 12 | vids = sorted(df['youtube_id'].unique()) 13 | n_vids = len(vids) 14 | print('Total video in {}.csv is {:d}'.format(subset, n_vids)) 15 | 16 | frame_download = glob.glob('./{}/*/*.jpg'.format(subset)) 17 | frame_download = [frame.split('/')[-1] for frame in frame_download] 18 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 19 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 20 | frame_download = [frame[:frame.find('_')] for frame in frame_download] 21 | frame_download = sorted(set(frame_download)) 22 | # print(frame_download) 23 | print('Total downloaded in {} is {:d}'.format(subset, len(frame_download))) 24 | 25 | 26 | print('done') 27 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/yt_bb/gen_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import unicode_literals 5 | import json 6 | from os.path import join, exists 7 | import pandas as pd 8 | 9 | # The data sets to be downloaded 10 | d_sets = ['yt_bb_detection_validation', 'yt_bb_detection_train'] 11 | 12 | # Column names for detection CSV files 13 | col_names = ['youtube_id', 'timestamp_ms','class_id','class_name', 14 | 'object_id','object_presence','xmin','xmax','ymin','ymax'] 15 | 16 | instanc_size = 511 17 | crop_path = './crop{:d}'.format(instanc_size) 18 | 19 | 20 | def parse_and_sched(dl_dir='.'): 21 | # For each of the two datasets 22 | js = {} 23 | for d_set in d_sets: 24 | 25 | # Make the directory for this dataset 26 | d_set_dir = dl_dir+'/'+d_set+'/' 27 | 28 | # Parse csv data using pandas 29 | print (d_set+': Parsing annotations into clip data...') 30 | df = pd.DataFrame.from_csv(d_set+'.csv', header=None, index_col=False) 31 | df.columns = col_names 32 | 33 | # Get list of unique video files 34 | vids = df['youtube_id'].unique() 35 | 36 | for vid in vids: 37 | data = df[df['youtube_id']==vid] 38 | for index, row in data.iterrows(): 39 | youtube_id, timestamp_ms, class_id, class_name, \ 40 | object_id, object_presence, x1, x2, y1, y2 = row 41 | 42 | if object_presence == 'absent': 43 | continue 44 | 45 | if x1 < 0 or x2 < 0 or y1 < 0 or y2 < 0 or y2 < y1 or x2 < x1: 46 | continue 47 | 48 | bbox = [x1, y1, x2, y2] 49 | frame = '%06d' % (int(timestamp_ms) / 1000) 50 | obj = '%02d' % (int(object_id)) 51 | video = join(d_set_dir + str(class_id), youtube_id) 52 | 53 | if not exists(join(crop_path, video, '{}.{}.x.jpg'.format(frame, obj))): 54 | continue 55 | 56 | if video not in js: 57 | js[video] = {} 58 | if obj not in js[video]: 59 | js[video][obj] = {} 60 | js[video][obj][frame] = bbox 61 | 62 | if 'yt_bb_detection_train' == d_set: 63 | json.dump(js, open('train.json', 'w'), indent=4, sort_keys=True) 64 | else: 65 | json.dump(js, open('val.json', 'w'), indent=4, sort_keys=True) 66 | js = {} 67 | print(d_set+': All videos downloaded' ) 68 | 69 | 70 | if __name__ == '__main__': 71 | parse_and_sched() 72 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/yt_bb/readme.md: -------------------------------------------------------------------------------- 1 | # Preprocessing Youtube-bb(YouTube-BoundingBoxes Dataset) 2 | 3 | ### Download raw label 4 | 5 | ````shell 6 | wget https://research.google.com/youtube-bb/yt_bb_detection_train.csv.gz 7 | wget https://research.google.com/youtube-bb/yt_bb_detection_validation.csv.gz 8 | 9 | gzip -d ./yt_bb_detection_train.csv.gz 10 | gzip -d ./yt_bb_detection_validation.csv.gz 11 | ```` 12 | 13 | ### Download raw image by `youtube-bb-utility`(spend long time, 400GB) 14 | 15 | ````shell 16 | git clone https://github.com/mehdi-shiba/youtube-bb-utility.git 17 | cd youtube-bb-utility 18 | pip install -r requirements.txt 19 | # python download_detection.py [VIDEO_DIR] [NUM_THREADS] 20 | python download_detection.py ../ 12 21 | cd .. 22 | ```` 23 | 24 | ### Crop & Generate data info (1 DAY) 25 | 26 | ````shell 27 | python par_crop.py 28 | python gen_json.py 29 | ```` 30 | -------------------------------------------------------------------------------- /SiamAPN/training_dataset/yt_bb/visual.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import pandas as pd 3 | import numpy as np 4 | import cv2 5 | 6 | visual = True 7 | 8 | col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name', 9 | 'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax'] 10 | 11 | df = pd.DataFrame.from_csv('yt_bb_detection_validation.csv', header=None, index_col=False) 12 | df.columns = col_names 13 | frame_num = len(df['youtube_id']) 14 | 15 | img_path = glob.glob('/mnt/qwang/youtubebb/frames/val*/*/*.jpg') 16 | d = {key.split('/')[-1]: value for (value, key) in enumerate(img_path)} 17 | 18 | for n in range(frame_num): 19 | if df['object_presence'][n]: 20 | frame_name = df['youtube_id'][n] + '_' + str(df['timestamp_ms'][n]) + '_' + \ 21 | str(df['class_id'][n]) + '_' + str(df['object_id'][n]) + '.jpg' 22 | bbox = np.array([df['xmin'][n],df['ymin'][n],df['xmax'][n],df['ymax'][n]]) 23 | if frame_name in d.keys(): 24 | frame_path = img_path[d[frame_name]] 25 | if visual: 26 | im = cv2.imread(frame_path) 27 | h, w, _ = im.shape 28 | pt1 = (int(bbox[0]*w), int(bbox[1]*h)) 29 | pt2 = (int(bbox[2]*w), int(bbox[3]*h)) 30 | cv2.rectangle(im, pt1, pt2, (0, 255, 0), 2) 31 | cv2.imshow('img', im) 32 | cv2.waitKey(100) 33 | else: 34 | print('no image: {}'.format(frame_name)) 35 | pass 36 | else: 37 | pass 38 | 39 | print('done') 40 | 41 | -------------------------------------------------------------------------------- /UAVTrack112/README.md: -------------------------------------------------------------------------------- 1 | # [UAVTrack112] 2 | 3 | ## Sequence number 4 | This benchmark is collected by DJI Mavic Air2 with 112 sequences. The structure of the files is as follows: 5 | 6 | ``` 7 | UAVTrack112 8 | 9 | --data_seq \ the sequence of images 10 | ... 11 | 12 | --anno \ the ground truth of each frame 13 | ... 14 | 15 | --att \ the 13 attributes of each sequence 16 | ... 17 | 18 | ``` 19 | 20 | **Note:** The format of ground truth adopts the common way (left-top x-coordinate, left-top y-coordinate, width, height) to represent the ground truth bounding box. 21 | 22 | 23 | **Note:** The format of attributes is ordered as Fast Motion, Low Resolution, Long-term Tracking, Aspect Ratio Change, Scale Variation, Partial Occlusion, Full Occlusion, Camera Motion, Out-of-View, Illumination Variation, Low Illumination, Viewpoint Change, Similar Object. 24 | 25 | 26 | 27 | Fast Motion: motion of the ground truth bounding box is larger than 28 | 20 pixels between two consecutive frames. 29 | 30 | Low Resolution: at least one ground truth bounding box has less than 400 pixels. 31 | 32 | Long-term Tracking: the sequences with more than 1000 frames. 33 | 34 | Aspect Ratio Change: the fraction of ground truth aspect ratio in the first frame 35 | and at least one subsequent frame is outside the range [0.5, 2]. 36 | 37 | Scale Variation: the ratio of initial and at least one subsequent bounding box 38 | is outside the range [0.5, 2]. 39 | 40 | Partial Occlusion: the target is partially occluded. 41 | 42 | Full Occlusion: the target is fully occluded. 43 | 44 | Camera Motion: abrupt motion of the camera. 45 | 46 | Out-of-View: some portion of the target leaves the view. 47 | 48 | Illumination Variation: the illumination of the target changes significantly. 49 | 50 | Low Illumination: the illumination of surroundings is insufficient. 51 | 52 | Viewpoint Change: viewpoint affects target appearance significantly. 53 | 54 | Similar Object: there are objects of similar appearance near the target. 55 | 56 | 57 | **Note:** UAVTrack112_l is an aerial tracking benchmark for long-term tracking including 45 sequences. The annotation files are [available](https://pan.baidu.com/s/1YhDd_WwASe2Fc-1lyIfTPA) (code: z3f5). -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | opencv-python 3 | pyyaml 4 | yacs 5 | tqdm 6 | colorama 7 | matplotlib 8 | cython 9 | tensorboardX 10 | --------------------------------------------------------------------------------