├── README.md ├── SEE_Net_HOT2020_results.zip ├── SEE_Net_HOT2022_results.zip ├── SEE_Net_HOT2023_results.zip ├── SEE_Net_IMEC25_results.zip ├── code ├── SEE-Net-NIR-25bands │ ├── README.md │ ├── build │ │ └── temp.linux-x86_64-3.7 │ │ │ └── toolkit │ │ │ └── utils │ │ │ ├── region.o │ │ │ └── src │ │ │ └── region.o │ ├── experiments │ │ ├── __pycache__ │ │ │ └── tmp.cpython-37.pyc │ │ └── siamban_r50_l234 │ │ │ ├── config.yaml │ │ │ └── snapshot │ │ │ └── download │ ├── install.sh │ ├── results.zip │ ├── setup.py │ ├── siamban │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-37.pyc │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── config.cpython-37.pyc │ │ │ │ └── xcorr.cpython-37.pyc │ │ │ ├── config.py │ │ │ └── xcorr.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── augmentation.cpython-37.pyc │ │ │ │ ├── dataset.cpython-37.pyc │ │ │ │ └── point_target.cpython-37.pyc │ │ │ ├── augmentation.py │ │ │ ├── dataset.py │ │ │ └── point_target.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── iou_loss.cpython-37.pyc │ │ │ │ ├── loss.cpython-37.pyc │ │ │ │ └── model_builder.cpython-37.pyc │ │ │ ├── backbone │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── alexnet.cpython-37.pyc │ │ │ │ │ ├── gpu_mem_track.cpython-37.pyc │ │ │ │ │ ├── mobile_v2.cpython-37.pyc │ │ │ │ │ └── resnet_atrous.cpython-37.pyc │ │ │ │ ├── alexnet.py │ │ │ │ ├── gpu_mem_track.py │ │ │ │ ├── mobile_v2.py │ │ │ │ ├── modelsize_estimate.py │ │ │ │ └── resnet_atrous.py │ │ │ ├── head │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ └── ban.cpython-37.pyc │ │ │ │ └── ban.py │ │ │ ├── init_weight.py │ │ │ ├── iou_loss.py │ │ │ ├── loss.py │ │ │ ├── model_builder.py │ │ │ └── neck │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ └── neck.cpython-37.pyc │ │ │ │ └── neck.py │ │ ├── tracker │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── base_tracker.cpython-37.pyc │ │ │ │ ├── siamban_tracker.cpython-37.pyc │ │ │ │ └── tracker_builder.cpython-37.pyc │ │ │ ├── base_tracker.py │ │ │ ├── siamban_tracker.py │ │ │ └── tracker_builder.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── average_meter.cpython-37.pyc │ │ │ ├── bbox.cpython-37.pyc │ │ │ ├── distributed.cpython-37.pyc │ │ │ ├── imagePreDeal.cpython-37.pyc │ │ │ ├── log_helper.cpython-37.pyc │ │ │ ├── lr_scheduler.cpython-37.pyc │ │ │ ├── misc.cpython-37.pyc │ │ │ ├── model_load.cpython-37.pyc │ │ │ └── point.cpython-37.pyc │ │ │ ├── average_meter.py │ │ │ ├── bbox.py │ │ │ ├── distributed.py │ │ │ ├── imagePreDeal.py │ │ │ ├── log_helper.py │ │ │ ├── lr_scheduler.py │ │ │ ├── misc.py │ │ │ ├── model_load.py │ │ │ └── point.py │ ├── test.sh │ ├── testing_dataset │ │ └── README.md │ ├── toolkit │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── dataset.py │ │ │ ├── got10k.py │ │ │ ├── lasot.py │ │ │ ├── nfs.py │ │ │ ├── otb.py │ │ │ ├── trackingnet.py │ │ │ ├── uav.py │ │ │ ├── video.py │ │ │ └── vot.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── ar_benchmark.py │ │ │ ├── eao_benchmark.py │ │ │ ├── f1_benchmark.py │ │ │ └── ope_benchmark.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── c_region.pxd │ │ │ ├── misc.py │ │ │ ├── region.c │ │ │ ├── region.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── region.pyx │ │ │ ├── src │ │ │ │ ├── buffer.h │ │ │ │ ├── region.c │ │ │ │ └── region.h │ │ │ └── statistics.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ ├── draw_eao.py │ │ │ ├── draw_f1.py │ │ │ ├── draw_success_precision.py │ │ │ └── draw_utils.py │ ├── tools │ │ ├── demo.py │ │ ├── eval.py │ │ ├── hp_search.py │ │ ├── test.py │ │ ├── test_epochs.py │ │ ├── train.py │ │ └── tune.py │ └── vot_siamban │ │ ├── __init__.py │ │ ├── tracker_SiamBAN.m │ │ ├── vot.py │ │ └── vot_siamban.py └── SEE-Net-VIS-16bands │ ├── README.md │ ├── build │ └── temp.linux-x86_64-3.7 │ │ └── toolkit │ │ └── utils │ │ ├── region.o │ │ └── src │ │ └── region.o │ ├── experiments │ └── siamban_r50_l234 │ │ ├── config.yaml │ │ └── snapshot │ │ └── download │ ├── install.sh │ ├── results.zip │ ├── setup.py │ ├── siamban │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-37.pyc │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── config.cpython-37.pyc │ │ │ └── xcorr.cpython-37.pyc │ │ ├── config.py │ │ └── xcorr.py │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── augmentation.cpython-37.pyc │ │ │ ├── dataset.cpython-37.pyc │ │ │ └── point_target.cpython-37.pyc │ │ ├── augmentation.py │ │ ├── dataset.py │ │ └── point_target.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── iou_loss.cpython-37.pyc │ │ │ ├── loss.cpython-37.pyc │ │ │ └── model_builder.cpython-37.pyc │ │ ├── backbone │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── alexnet.cpython-37.pyc │ │ │ │ ├── gpu_mem_track.cpython-37.pyc │ │ │ │ ├── mobile_v2.cpython-37.pyc │ │ │ │ └── resnet_atrous.cpython-37.pyc │ │ │ ├── alexnet.py │ │ │ ├── gpu_mem_track.py │ │ │ ├── mobile_v2.py │ │ │ ├── modelsize_estimate.py │ │ │ └── resnet_atrous.py │ │ ├── head │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ └── ban.cpython-37.pyc │ │ │ └── ban.py │ │ ├── init_weight.py │ │ ├── iou_loss.py │ │ ├── loss.py │ │ ├── model_builder.py │ │ └── neck │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ └── neck.cpython-37.pyc │ │ │ └── neck.py │ ├── tracker │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── base_tracker.cpython-37.pyc │ │ │ ├── siamban_tracker.cpython-37.pyc │ │ │ └── tracker_builder.cpython-37.pyc │ │ ├── base_tracker.py │ │ ├── siamban_tracker.py │ │ └── tracker_builder.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── average_meter.cpython-37.pyc │ │ ├── bbox.cpython-37.pyc │ │ ├── distributed.cpython-37.pyc │ │ ├── log_helper.cpython-37.pyc │ │ ├── lr_scheduler.cpython-37.pyc │ │ ├── misc.cpython-37.pyc │ │ ├── model_load.cpython-37.pyc │ │ └── point.cpython-37.pyc │ │ ├── average_meter.py │ │ ├── bbox.py │ │ ├── distributed.py │ │ ├── log_helper.py │ │ ├── lr_scheduler.py │ │ ├── misc.py │ │ ├── model_load.py │ │ └── point.py │ ├── test.sh │ ├── testing_dataset │ └── README.md │ ├── toolkit │ ├── __init__.py │ ├── datasets │ │ ├── __init__.py │ │ ├── dataset.py │ │ ├── got10k.py │ │ ├── lasot.py │ │ ├── nfs.py │ │ ├── otb.py │ │ ├── trackingnet.py │ │ ├── uav.py │ │ ├── video.py │ │ └── vot.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── ar_benchmark.py │ │ ├── eao_benchmark.py │ │ ├── f1_benchmark.py │ │ └── ope_benchmark.py │ ├── utils │ │ ├── __init__.py │ │ ├── c_region.pxd │ │ ├── misc.py │ │ ├── region.c │ │ ├── region.cpython-37m-x86_64-linux-gnu.so │ │ ├── region.pyx │ │ ├── src │ │ │ ├── buffer.h │ │ │ ├── region.c │ │ │ └── region.h │ │ └── statistics.py │ └── visualization │ │ ├── __init__.py │ │ ├── draw_eao.py │ │ ├── draw_f1.py │ │ ├── draw_success_precision.py │ │ └── draw_utils.py │ ├── tools │ ├── demo.py │ ├── eval.py │ ├── hp_search.py │ ├── test.py │ ├── test_epochs.py │ ├── train.py │ └── tune.py │ └── vot_siamban │ ├── __init__.py │ ├── tracker_SiamBAN.m │ ├── vot.py │ └── vot_siamban.py └── plot-tools ├── plotAUC_DP_curve ├── BAE-Net-0.6062-0.8778.mat ├── CNHT-0.1713-0.3351.mat ├── DP_AUC │ ├── AUC_HS_tracker.eps │ └── DP_HS_tracker.eps ├── DeepHKCF-0.3033-0.5415.mat ├── MFI-0.6009-0.8925.mat ├── MHT-0.5860-0.8818.mat ├── SEE-Net-0.6657-0.9327.mat ├── SST-Net-0.6230-0.9161.mat ├── plotTracking_AUC_DP.m └── readme.txt ├── plotAttribute_Curve ├── AttributeRes │ ├── BC_AUC.eps │ ├── DEF_AUC.eps │ ├── FM_AUC.eps │ ├── IPR_AUC.eps │ ├── IV_AUC.eps │ ├── LR_AUC.eps │ ├── MB_AUC.eps │ ├── OCC_AUC.eps │ ├── OPR_AUC.eps │ ├── OV_AUC.eps │ └── SV_AUC.eps ├── BAE-Net-0.6062-0.8778.mat ├── CNHT-0.1713-0.3351.mat ├── DROL-0.6262-0.9001.mat ├── DeepHKCF-0.3033-0.5415.mat ├── Li_res_attribute_comp.m ├── MFI-0.6009-0.8925.mat ├── MHT-0.5860-0.8818.mat ├── SEE-Net-0.6657-0.9327.mat ├── SST-Net-0.6230-0.9161.mat ├── computeArea.m ├── computeDistance.m ├── computeIntersectionArea.m ├── computeMetric.m ├── computePascalScore.m ├── compute_performance_measures.m ├── compute_relaibitlity.m ├── drawPlot_new.m ├── load_video_info.m └── readme.txt ├── plotRect ├── comp_demo_whisper_TIP.py ├── detection_res │ ├── BAE-Net │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt │ ├── DROL │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt │ ├── GT │ │ ├── ball_gt.txt │ │ ├── basketball_gt.txt │ │ ├── board_gt.txt │ │ ├── book_gt.txt │ │ ├── bus2_gt.txt │ │ ├── bus_gt.txt │ │ ├── campus_gt.txt │ │ ├── car2_gt.txt │ │ ├── car3_gt.txt │ │ ├── car_gt.txt │ │ ├── card_gt.txt │ │ ├── coin_gt.txt │ │ ├── coke_gt.txt │ │ ├── drive_gt.txt │ │ ├── excavator_gt.txt │ │ ├── face2_gt.txt │ │ ├── face_gt.txt │ │ ├── forest2_gt.txt │ │ ├── forest_gt.txt │ │ ├── fruit_gt.txt │ │ ├── hand_gt.txt │ │ ├── kangaroo_gt.txt │ │ ├── paper_gt.txt │ │ ├── pedestrain_gt.txt │ │ ├── pedestrian2_gt.txt │ │ ├── player_gt.txt │ │ ├── playground_gt.txt │ │ ├── rider1_gt.txt │ │ ├── rider2_gt.txt │ │ ├── rubik_gt.txt │ │ ├── student_gt.txt │ │ ├── toy1_gt.txt │ │ ├── toy2_gt.txt │ │ ├── trucker_gt.txt │ │ └── worker_gt.txt │ ├── MFI │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt │ ├── MHT │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt │ ├── SEE-Net │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt │ └── SST-Net │ │ ├── ball.txt │ │ ├── basketball.txt │ │ ├── board.txt │ │ ├── book.txt │ │ ├── bus.txt │ │ ├── bus2.txt │ │ ├── campus.txt │ │ ├── car.txt │ │ ├── car2.txt │ │ ├── car3.txt │ │ ├── card.txt │ │ ├── coin.txt │ │ ├── coke.txt │ │ ├── drive.txt │ │ ├── excavator.txt │ │ ├── face.txt │ │ ├── face2.txt │ │ ├── forest.txt │ │ ├── forest2.txt │ │ ├── fruit.txt │ │ ├── hand.txt │ │ ├── kangaroo.txt │ │ ├── paper.txt │ │ ├── pedestrain.txt │ │ ├── pedestrian2.txt │ │ ├── player.txt │ │ ├── playground.txt │ │ ├── rider1.txt │ │ ├── rider2.txt │ │ ├── rubik.txt │ │ ├── student.txt │ │ ├── toy1.txt │ │ ├── toy2.txt │ │ ├── trucker.txt │ │ └── worker.txt ├── draw_visual_bar │ ├── Qualitive-Legend-right.eps │ ├── Qualitive-Legend.eps │ ├── plotTracking_legend_bar.m │ └── plotTracking_legend_bar_right.m └── readme.txt ├── readme.txt ├── test-AUC-DP-on-H_F-videos └── testSingleAUCAndDP │ ├── Li_res.m │ ├── computeArea.m │ ├── computeDistance.m │ ├── computeIntersectionArea.m │ ├── computeMetric.m │ ├── computePascalScore.m │ ├── compute_performance_measures.m │ ├── compute_relaibitlity.m │ ├── drawPlot.m │ ├── gt_falsecolor_website │ ├── ball_gt.txt │ ├── basketball_gt.txt │ ├── board_gt.txt │ ├── book_gt.txt │ ├── bus2_gt.txt │ ├── bus_gt.txt │ ├── campus_gt.txt │ ├── car2_gt.txt │ ├── car3_gt.txt │ ├── car_gt.txt │ ├── card_gt.txt │ ├── coin_gt.txt │ ├── coke_gt.txt │ ├── drive_gt.txt │ ├── excavator_gt.txt │ ├── face2_gt.txt │ ├── face_gt.txt │ ├── forest2_gt.txt │ ├── forest_gt.txt │ ├── fruit_gt.txt │ ├── hand_gt.txt │ ├── kangaroo_gt.txt │ ├── paper_gt.txt │ ├── pedestrain_gt.txt │ ├── pedestrian2_gt.txt │ ├── player_gt.txt │ ├── playground_gt.txt │ ├── rider1_gt.txt │ ├── rider2_gt.txt │ ├── rubik_gt.txt │ ├── student_gt.txt │ ├── toy1_gt.txt │ ├── toy2_gt.txt │ ├── trucker_gt.txt │ └── worker_gt.txt │ ├── load_video_info.m │ ├── mergeMulImageTo3DImahe.m │ └── readme.txt └── test-AUC-DP-on-color-videos └── testSingleAUCAndDP ├── Li_res.m ├── computeArea.m ├── computeDistance.m ├── computeIntersectionArea.m ├── computeMetric.m ├── computePascalScore.m ├── compute_performance_measures.m ├── compute_relaibitlity.m ├── drawPlot.m ├── gt_color_website ├── ball_gt.txt ├── basketball_gt.txt ├── board_gt.txt ├── book_gt.txt ├── bus2_gt.txt ├── bus_gt.txt ├── campus_gt.txt ├── car2_gt.txt ├── car3_gt.txt ├── car_gt.txt ├── card_gt.txt ├── coin_gt.txt ├── coke_gt.txt ├── drive_gt.txt ├── excavator_gt.txt ├── face2_gt.txt ├── face_gt.txt ├── forest2_gt.txt ├── forest_gt.txt ├── fruit_gt.txt ├── hand_gt.txt ├── kangaroo_gt.txt ├── paper_gt.txt ├── pedestrain_gt.txt ├── pedestrian2_gt.txt ├── player_gt.txt ├── playground_gt.txt ├── rider1_gt.txt ├── rider2_gt.txt ├── rubik_gt.txt ├── student_gt.txt ├── toy1_gt.txt ├── toy2_gt.txt ├── trucker_gt.txt └── worker_gt.txt ├── load_video_info.m ├── mergeMulImageTo3DImahe.m └── readme.txt /README.md: -------------------------------------------------------------------------------- 1 | # SEE-Net 2 | Learning a Deep Ensemble Network with Band Importance for Hyperspectral Object Tracking 3 | 4 | 1. The tools for evaluating the compared trackers can be found in the folder "plot-tools". 5 | 2. The source code can be found in the "code" folder for processing 16-bands data and 25-bands hyperspectral images. 6 | 7 | ## 1. Dataset 8 | Download training and testing datasets in https://www.hsitracking.com/. 9 | ```python 10 | 1. The format of training dataset: 11 | rootDir |- 12 | videoName1 13 | |- HSI 14 | |- 0001.png 15 | |- 0002.png 16 | ... 17 | |- XXXX.png 18 | |- groundturth_rect.txt 19 | videoName2 20 | |- HSI 21 | |- 0001.png 22 | |- 0002.png 23 | ... 24 | |- XXXX.png 25 | |- groundturth_rect.txt 26 | ... 27 | videoNameN 28 | |- HSI 29 | |- 0001.png 30 | |- 0002.png 31 | ... 32 | |- XXXX.png 33 | |- groundturth_rect.txt 34 | ``` 35 | ```python 36 | 2. The format of testing dataset: 37 | rootDir |- 38 | test_HSI 39 | |- videoName1 40 | |- groundturth_rect.txt 41 | |- HSI 42 | |- 0001.png 43 | |- 0002.png 44 | |- ... 45 | |- XXXX.png 46 | |- videoName2 47 | |- groundturth_rect.txt 48 | |- HSI 49 | |- 0001.png 50 | |- 0002.png 51 | |- ... 52 | |- XXXX.png 53 | ... 54 | |- videoNameM 55 | |- groundturth_rect.txt 56 | |- HSI 57 | |- 0001.png 58 | |- 0002.png 59 | |- ... 60 | |- XXXX.png 61 | ``` 62 | 63 | ## 2. Results 64 | More results can be found in: 65 | ```python 66 | https://pan.baidu.com/s/1BcePsITWMrP59nUcU_eJcg 67 | Access code: 1234 68 | ``` 69 | 70 | ## Citation 71 | If these codes are helpful for you, please cite this paper: 72 | ```python 73 | @ARTICLE{10128966, 74 | author={Li, Zhuanfeng and Xiong, Fengchao and Zhou, Jun and Lu, Jianfeng and Qian, Yuntao}, 75 | journal={IEEE Transactions on Image Processing}, 76 | title={Learning a Deep Ensemble Network With Band Importance for Hyperspectral Object Tracking}, 77 | year={2023}, 78 | volume={32}, 79 | number={}, 80 | pages={2901-2914}, 81 | doi={10.1109/TIP.2023.3263109}} 82 | ``` 83 | 84 | ## Contact 85 | lizhuanfeng@njust.edu.cn 86 | -------------------------------------------------------------------------------- /SEE_Net_HOT2020_results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/SEE_Net_HOT2020_results.zip -------------------------------------------------------------------------------- /SEE_Net_HOT2022_results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/SEE_Net_HOT2022_results.zip -------------------------------------------------------------------------------- /SEE_Net_HOT2023_results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/SEE_Net_HOT2023_results.zip -------------------------------------------------------------------------------- /SEE_Net_IMEC25_results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/SEE_Net_IMEC25_results.zip -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/README.md: -------------------------------------------------------------------------------- 1 | # Quick Start 2 | ## 1. Add SEE-Net to your PYTHONPATH 3 | export PYTHONPATH=$PWD:$PYTHONPATH 4 | 5 | ## 2. Requirements 6 | Please install the environment following https://github.com/hqucv/siamban. 7 | 8 | ## 2. Dataset 9 | Please generate the cropped template patch and search region, following https://github.com/hqucv/siamban. 10 | 11 | ## 3. Train 12 | (a) Download pretrained model in https://pan.baidu.com/s/1xUNW1wnyN7_Fo7Gcl1GaKQ Access code: 1234 13 | 14 | (b) Change the path of training data in siamese/dataset/dataset.py 15 | 16 | (c) Run: 17 | ```python 18 | cd experiments/siamban_r50_l234 19 | CUDA_VISIBLE_DEVICES=0,1,2 20 | python -m torch.distributed.launch \ 21 | --nproc_per_node=3 \ 22 | --master_port=2333 \ 23 | ../../tools/train.py --cfg config.yaml 24 | ``` 25 | 26 | ## 4. Test 27 | Download testing model in https://pan.baidu.com/s/1xUNW1wnyN7_Fo7Gcl1GaKQ 28 | 29 | Access code: 1234 30 | ```python 31 | python tools/demo.py --config experiments/siamban_r50_l234/config.yaml --snapshot experiments/siamban_r50_l234/snapshot/checkpoint_e30.pth --video_path test_path 32 | ``` 33 | 34 | ## Contact 35 | lizhuanfeng@njust.edu.cn 36 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/build/temp.linux-x86_64-3.7/toolkit/utils/region.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/build/temp.linux-x86_64-3.7/toolkit/utils/region.o -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/build/temp.linux-x86_64-3.7/toolkit/utils/src/region.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/build/temp.linux-x86_64-3.7/toolkit/utils/src/region.o -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/experiments/__pycache__/tmp.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/experiments/__pycache__/tmp.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/experiments/siamban_r50_l234/config.yaml: -------------------------------------------------------------------------------- 1 | META_ARC: "siamban_r50_l234" 2 | 3 | BACKBONE: 4 | TYPE: "resnet50" 5 | KWARGS: 6 | used_layers: [2, 3, 4] 7 | PRETRAINED: 'pretrained_models/resnet50.model' 8 | TRAIN_LAYERS: ['layer2', 'layer3', 'layer4'] 9 | TRAIN_EPOCH: 10 10 | LAYERS_LR: 0.1 11 | 12 | ADJUST: 13 | ADJUST: True 14 | TYPE: "AdjustAllLayer" 15 | KWARGS: 16 | in_channels: [512, 1024, 2048] 17 | out_channels: [256, 256, 256] 18 | 19 | BAN: 20 | BAN: True 21 | TYPE: 'MultiBAN' 22 | KWARGS: 23 | in_channels: [256, 256, 256] 24 | cls_out_channels: 2 25 | weighted: True 26 | 27 | POINT: 28 | STRIDE: 8 29 | 30 | TRACK: 31 | TYPE: 'SiamBANTracker' 32 | WINDOW_INFLUENCE: 0.205 33 | PENALTY_K: 0.08513642556896711 34 | LR: 0.206 35 | EXEMPLAR_SIZE: 127 36 | INSTANCE_SIZE: 255 37 | BASE_SIZE: 8 38 | CONTEXT_AMOUNT: 0.5 39 | 40 | TRAIN: 41 | EPOCH: 30 42 | START_EPOCH: 0 43 | BATCH_SIZE: 28 44 | BASE_LR: 0.005 45 | CLS_WEIGHT: 1.0 46 | LOC_WEIGHT: 1.0 47 | RESUME: '' 48 | 49 | LR: 50 | TYPE: 'log' 51 | KWARGS: 52 | start_lr: 0.005 53 | end_lr: 0.00005 54 | LR_WARMUP: 55 | TYPE: 'step' 56 | EPOCH: 5 57 | KWARGS: 58 | start_lr: 0.001 59 | end_lr: 0.005 60 | step: 1 61 | 62 | DATASET: 63 | NAMES: 64 | - 'VID' 65 | - 'YOUTUBEBB' 66 | - 'COCO' 67 | - 'DET' 68 | - 'GOT10K' 69 | - 'LASOT' 70 | 71 | VIDEOS_PER_EPOCH: 1000000 72 | 73 | TEMPLATE: 74 | SHIFT: 4 75 | SCALE: 0.05 76 | BLUR: 0.0 77 | FLIP: 0.0 78 | COLOR: 1.0 79 | 80 | SEARCH: 81 | SHIFT: 64 82 | SCALE: 0.18 83 | BLUR: 0.2 84 | FLIP: 0.0 85 | COLOR: 1.0 86 | 87 | NEG: 0.2 88 | GRAY: 0.0 89 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/experiments/siamban_r50_l234/snapshot/download: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/experiments/siamban_r50_l234/snapshot/download -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 2 ]; then 4 | echo "ARGS ERROR!" 5 | echo " bash install.sh /path/to/your/conda env_name" 6 | exit 1 7 | fi 8 | 9 | set -e 10 | 11 | conda_path=$1 12 | env_name=$2 13 | 14 | source $conda_path/etc/profile.d/conda.sh 15 | 16 | echo "****** create environment " $env_name "*****" 17 | # create environment 18 | conda create -y --name $env_name python=3.7 19 | conda activate $env_name 20 | 21 | echo "***** install numpy pytorch opencv *****" 22 | # numpy 23 | conda install -y numpy 24 | # pytorch 25 | conda install pytorch=1.3.1 torchvision cudatoolkit=10.1 -c pytorch 26 | # opencv 27 | pip install opencv-python 28 | 29 | echo "***** install other libs *****" 30 | # libs 31 | pip install pyyaml yacs tqdm colorama matplotlib cython tensorboard future mpi4py optuna 32 | 33 | 34 | echo "***** build extensions *****" 35 | python setup.py build_ext --inplace 36 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/results.zip -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | 5 | ext_modules = [ 6 | Extension( 7 | name='toolkit.utils.region', 8 | sources=[ 9 | 'toolkit/utils/region.pyx', 10 | 'toolkit/utils/src/region.c', 11 | ], 12 | include_dirs=[ 13 | 'toolkit/utils/src' 14 | ] 15 | ) 16 | ] 17 | 18 | setup( 19 | name='toolkit', 20 | packages=['toolkit'], 21 | ext_modules=cythonize(ext_modules) 22 | ) 23 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/core/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/core/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/core/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/core/__pycache__/xcorr.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/core/__pycache__/xcorr.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/core/xcorr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | 12 | def xcorr_slow(x, kernel): 13 | """for loop to calculate cross correlation, slow version 14 | """ 15 | batch = x.size()[0] 16 | out = [] 17 | for i in range(batch): 18 | px = x[i] 19 | pk = kernel[i] 20 | px = px.view(1, -1, px.size()[1], px.size()[2]) 21 | pk = pk.view(1, -1, pk.size()[1], pk.size()[2]) 22 | po = F.conv2d(px, pk) 23 | out.append(po) 24 | out = torch.cat(out, 0) 25 | return out 26 | 27 | 28 | def xcorr_fast(x, kernel): 29 | """group conv2d to calculate cross correlation, fast version 30 | """ 31 | batch = kernel.size()[0] 32 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 33 | px = x.view(1, -1, x.size()[2], x.size()[3]) 34 | po = F.conv2d(px, pk, groups=batch) 35 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 36 | return po 37 | 38 | 39 | def xcorr_depthwise(x, kernel): 40 | """depthwise cross correlation 41 | """ 42 | batch = kernel.size(0) 43 | channel = kernel.size(1) 44 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 45 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 46 | out = F.conv2d(x, kernel, groups=batch*channel) 47 | out = out.view(batch, channel, out.size(2), out.size(3)) 48 | return out 49 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/datasets/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/augmentation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/augmentation.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/point_target.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/datasets/__pycache__/point_target.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/datasets/point_target.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import numpy as np 7 | 8 | from siamban.core.config import cfg 9 | from siamban.utils.bbox import corner2center 10 | from siamban.utils.point import Point 11 | 12 | 13 | class PointTarget: 14 | def __init__(self,): 15 | self.points = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.SEARCH_SIZE//2) 16 | 17 | def __call__(self, target, size, neg=False): 18 | 19 | # -1 ignore 0 negative 1 positive 20 | cls = -1 * np.ones((size, size), dtype=np.int64) 21 | delta = np.zeros((4, size, size), dtype=np.float32) 22 | 23 | def select(position, keep_num=16): 24 | num = position[0].shape[0] 25 | if num <= keep_num: 26 | return position, num 27 | slt = np.arange(num) 28 | np.random.shuffle(slt) 29 | slt = slt[:keep_num] 30 | return tuple(p[slt] for p in position), keep_num 31 | 32 | tcx, tcy, tw, th = corner2center(target) 33 | points = self.points.points 34 | 35 | if neg: 36 | neg = np.where(np.square(tcx - points[0]) / np.square(tw / 4) + 37 | np.square(tcy - points[1]) / np.square(th / 4) < 1) 38 | neg, neg_num = select(neg, cfg.TRAIN.NEG_NUM) 39 | cls[neg] = 0 40 | 41 | return cls, delta 42 | 43 | delta[0] = points[0] - target[0] 44 | delta[1] = points[1] - target[1] 45 | delta[2] = target[2] - points[0] 46 | delta[3] = target[3] - points[1] 47 | 48 | # ellipse label 49 | pos = np.where(np.square(tcx - points[0]) / np.square(tw / 4) + 50 | np.square(tcy - points[1]) / np.square(th / 4) < 1) 51 | neg = np.where(np.square(tcx - points[0]) / np.square(tw / 2) + 52 | np.square(tcy - points[1]) / np.square(th / 2) > 1) 53 | 54 | # sampling 55 | pos, pos_num = select(pos, cfg.TRAIN.POS_NUM) 56 | neg, neg_num = select(neg, cfg.TRAIN.TOTAL_NUM - cfg.TRAIN.POS_NUM) 57 | 58 | cls[pos] = 1 59 | cls[neg] = 0 60 | 61 | return cls, delta 62 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/__pycache__/iou_loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/__pycache__/iou_loss.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/__pycache__/model_builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/__pycache__/model_builder.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | from siamban.models.backbone.alexnet import alexnetlegacy, alexnet 9 | from siamban.models.backbone.mobile_v2 import mobilenetv2 10 | from siamban.models.backbone.resnet_atrous import resnet18, resnet34, resnet50 11 | 12 | BACKBONES = { 13 | 'alexnetlegacy': alexnetlegacy, 14 | 'mobilenetv2': mobilenetv2, 15 | 'resnet18': resnet18, 16 | 'resnet34': resnet34, 17 | 'resnet50': resnet50, 18 | 'alexnet': alexnet, 19 | } 20 | 21 | 22 | def get_backbone(name, **kwargs): 23 | return BACKBONES[name](**kwargs) 24 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/alexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/alexnet.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/gpu_mem_track.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/gpu_mem_track.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/mobile_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/mobile_v2.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/backbone/modelsize_estimate.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | def modelsize(model, input, type_size=4): 7 | para = sum([np.prod(list(p.size())) for p in model.parameters()]) 8 | # print('Model {} : Number of params: {}'.format(model._get_name(), para)) 9 | print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000)) 10 | 11 | input_ = input.clone() 12 | input_.requires_grad_(requires_grad=False) 13 | 14 | mods = list(model.modules()) 15 | out_sizes = [] 16 | 17 | for i in range(1, len(mods)): 18 | m = mods[i] 19 | if isinstance(m, nn.ReLU): 20 | if m.inplace: 21 | continue 22 | out = m(input_) 23 | out_sizes.append(np.array(out.size())) 24 | input_ = out 25 | 26 | total_nums = 0 27 | for i in range(len(out_sizes)): 28 | s = out_sizes[i] 29 | nums = np.prod(np.array(s)) 30 | total_nums += nums 31 | 32 | # print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums)) 33 | # print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2)) 34 | print('Model {} : intermedite variables: {:3f} M (without backward)' 35 | .format(model._get_name(), total_nums * type_size / 1000 / 1000)) 36 | print('Model {} : intermedite variables: {:3f} M (with backward)' 37 | .format(model._get_name(), total_nums * type_size*2 / 1000 / 1000)) 38 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/head/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | from siamban.models.head.ban import UPChannelBAN, DepthwiseBAN, MultiBAN 7 | 8 | 9 | BANS = { 10 | 'UPChannelBAN': UPChannelBAN, 11 | 'DepthwiseBAN': DepthwiseBAN, 12 | 'MultiBAN': MultiBAN 13 | } 14 | 15 | 16 | def get_ban_head(name, **kwargs): 17 | return BANS[name](**kwargs) 18 | 19 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/head/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/head/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/head/__pycache__/ban.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/head/__pycache__/ban.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/init_weight.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def init_weights(model): 5 | for m in model.modules(): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight.data, 8 | mode='fan_out', 9 | nonlinearity='relu') 10 | elif isinstance(m, nn.BatchNorm2d): 11 | m.weight.data.fill_(1) 12 | m.bias.data.zero_() 13 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/iou_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class IOULoss(nn.Module): 6 | def __init__(self, loc_loss_type): 7 | super(IOULoss, self).__init__() 8 | self.loc_loss_type = loc_loss_type 9 | 10 | def forward(self, pred, target, weight=None): 11 | pred_left = pred[:, 0] 12 | pred_top = pred[:, 1] 13 | pred_right = pred[:, 2] 14 | pred_bottom = pred[:, 3] 15 | 16 | target_left = target[:, 0] 17 | target_top = target[:, 1] 18 | target_right = target[:, 2] 19 | target_bottom = target[:, 3] 20 | 21 | pred_area = (pred_left + pred_right) * (pred_top + pred_bottom) 22 | target_area = (target_left + target_right) * (target_top + target_bottom) 23 | 24 | w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right) 25 | g_w_intersect = torch.max(pred_left, target_left) + torch.max(pred_right, target_right) 26 | h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top) 27 | g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top) 28 | ac_uion = g_w_intersect * g_h_intersect + 1e-7 29 | area_intersect = w_intersect * h_intersect 30 | area_union = target_area + pred_area - area_intersect 31 | ious = (area_intersect + 1.0) / (area_union + 1.0) 32 | gious = ious - (ac_uion - area_union) / ac_uion 33 | 34 | if self.loc_loss_type == 'iou': 35 | losses = -torch.log(ious) 36 | elif self.loc_loss_type == 'linear_iou': 37 | losses = 1 - ious 38 | elif self.loc_loss_type == 'giou': 39 | losses = 1 - gious 40 | else: 41 | raise NotImplementedError 42 | 43 | if weight is not None and weight.sum() > 0: 44 | return (losses * weight).sum() / weight.sum() 45 | else: 46 | assert losses.numel() != 0 47 | return losses.mean() 48 | 49 | 50 | linear_iou = IOULoss(loc_loss_type='linear_iou') 51 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | import numpy as np 12 | 13 | from siamban.core.config import cfg 14 | from siamban.models.iou_loss import linear_iou 15 | 16 | 17 | def get_cls_loss(pred, label, select): 18 | if len(select.size()) == 0 or \ 19 | select.size() == torch.Size([0]): 20 | return 0 21 | # print ('--333333--pred.size() = ',pred.size()) 22 | # print ('--333333--label.size() = ',label.size()) 23 | pred = torch.index_select(pred, 0, select) 24 | label = torch.index_select(label, 0, select) 25 | # print ('--3--pred.size() = ',pred.size()) # [268, 2] 26 | # print ('--3--label.size() = ',label.size()) # [268] 27 | return F.nll_loss(pred, label) 28 | 29 | 30 | def select_cross_entropy_loss(pred, label): 31 | # print ('--1--pred.size() = ',pred.size()) # [28,25,25,2] 32 | # print ('--1--label.size() = ',label.size()) # [28,25,25] 33 | pred = pred.view(-1, 2) 34 | label = label.view(-1) 35 | # print ('--2--pred.size() = ',pred.size())# [17500, 2] 36 | # print ('--2--label.size() = ',label.size()) # [17500] 37 | pos = label.data.eq(1).nonzero().squeeze().cuda() # [268] 38 | neg = label.data.eq(0).nonzero().squeeze().cuda() # [1166] 39 | # print ('pos.size() = ',pos.size()) 40 | # print ('neg.size() = ',neg.size()) 41 | loss_pos = get_cls_loss(pred, label, pos) 42 | loss_neg = get_cls_loss(pred, label, neg) 43 | return loss_pos * 0.5 + loss_neg * 0.5 44 | 45 | 46 | def weight_l1_loss(pred_loc, label_loc, loss_weight): 47 | if cfg.BAN.BAN: 48 | diff = (pred_loc - label_loc).abs() 49 | diff = diff.sum(dim=1) 50 | else: 51 | diff = None 52 | loss = diff * loss_weight 53 | return loss.sum().div(pred_loc.size()[0]) 54 | 55 | 56 | def select_iou_loss(pred_loc, label_loc, label_cls): 57 | label_cls = label_cls.reshape(-1) 58 | pos = label_cls.data.eq(1).nonzero().squeeze().cuda() 59 | 60 | pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 4) 61 | pred_loc = torch.index_select(pred_loc, 0, pos) 62 | 63 | label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 4) 64 | label_loc = torch.index_select(label_loc, 0, pos) 65 | 66 | return linear_iou(pred_loc, label_loc) 67 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/neck/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | from siamban.models.neck.neck import AdjustLayer, AdjustAllLayer 13 | 14 | NECKS = { 15 | 'AdjustLayer': AdjustLayer, 16 | 'AdjustAllLayer': AdjustAllLayer 17 | } 18 | 19 | def get_neck(name, **kwargs): 20 | return NECKS[name](**kwargs) 21 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/neck/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/neck/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/neck/__pycache__/neck.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/models/neck/__pycache__/neck.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/models/neck/neck.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch.nn as nn 9 | 10 | 11 | class AdjustLayer(nn.Module): 12 | def __init__(self, in_channels, out_channels): 13 | super(AdjustLayer, self).__init__() 14 | self.downsample = nn.Sequential( 15 | nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), 16 | nn.BatchNorm2d(out_channels), 17 | ) 18 | 19 | def forward(self, x): 20 | x = self.downsample(x) 21 | if x.size(3) < 20: 22 | l = 4 23 | r = l + 7 24 | x = x[:, :, l:r, l:r] 25 | return x 26 | 27 | 28 | class AdjustAllLayer(nn.Module): 29 | def __init__(self, in_channels, out_channels): 30 | super(AdjustAllLayer, self).__init__() 31 | self.num = len(out_channels) 32 | if self.num == 1: 33 | self.downsample = AdjustLayer(in_channels[0], out_channels[0]) 34 | else: 35 | for i in range(self.num): 36 | self.add_module('downsample'+str(i+2), 37 | AdjustLayer(in_channels[i], out_channels[i])) 38 | 39 | def forward(self, features): 40 | if self.num == 1: 41 | return self.downsample(features) 42 | else: 43 | out = [] 44 | for i in range(self.num): 45 | adj_layer = getattr(self, 'downsample'+str(i+2)) 46 | out.append(adj_layer(features[i])) 47 | return out 48 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/tracker/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/base_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/base_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/siamban_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/siamban_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/tracker_builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/tracker/__pycache__/tracker_builder.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/tracker/tracker_builder.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | from siamban.core.config import cfg 7 | from siamban.tracker.siamban_tracker import SiamBANTracker 8 | 9 | TRACKS = { 10 | 'SiamBANTracker': SiamBANTracker 11 | } 12 | 13 | 14 | def build_tracker(model): 15 | return TRACKS[cfg.TRACK.TYPE](model) 16 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/average_meter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/average_meter.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/bbox.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/bbox.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/distributed.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/distributed.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/imagePreDeal.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/imagePreDeal.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/log_helper.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/log_helper.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/lr_scheduler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/lr_scheduler.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/model_load.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/model_load.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/point.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/siamban/utils/__pycache__/point.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import os 9 | 10 | from colorama import Fore, Style 11 | 12 | 13 | __all__ = ['commit', 'describe'] 14 | 15 | 16 | def _exec(cmd): 17 | f = os.popen(cmd, 'r', 1) 18 | return f.read().strip() 19 | 20 | 21 | def _bold(s): 22 | return "\033[1m%s\033[0m" % s 23 | 24 | 25 | def _color(s): 26 | return f'{Fore.RED}{s}{Style.RESET_ALL}' 27 | 28 | 29 | def _describe(model, lines=None, spaces=0): 30 | head = " " * spaces 31 | for name, p in model.named_parameters(): 32 | if '.' in name: 33 | continue 34 | if p.requires_grad: 35 | name = _color(name) 36 | line = "{head}- {name}".format(head=head, name=name) 37 | lines.append(line) 38 | 39 | for name, m in model.named_children(): 40 | space_num = len(name) + spaces + 1 41 | if m.training: 42 | name = _color(name) 43 | line = "{head}.{name} ({type})".format( 44 | head=head, 45 | name=name, 46 | type=m.__class__.__name__) 47 | lines.append(line) 48 | _describe(m, lines, space_num) 49 | 50 | 51 | def commit(): 52 | root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) 53 | cmd = "cd {}; git log | head -n1 | awk '{{print $2}}'".format(root) 54 | commit = _exec(cmd) 55 | cmd = "cd {}; git log --oneline | head -n1".format(root) 56 | commit_log = _exec(cmd) 57 | return "commit : {}\n log : {}".format(commit, commit_log) 58 | 59 | 60 | def describe(net, name=None): 61 | num = 0 62 | lines = [] 63 | if name is not None: 64 | lines.append(name) 65 | num = len(name) 66 | _describe(net, lines, num) 67 | return "\n".join(lines) 68 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/siamban/utils/point.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import numpy as np 7 | 8 | 9 | class Point: 10 | """ 11 | This class generate points. 12 | """ 13 | def __init__(self, stride, size, image_center): 14 | self.stride = stride 15 | self.size = size 16 | self.image_center = image_center 17 | 18 | self.points = self.generate_points(self.stride, self.size, self.image_center) 19 | 20 | def generate_points(self, stride, size, im_c): 21 | ori = im_c - size // 2 * stride 22 | x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)], 23 | [ori + stride * dy for dy in np.arange(0, size)]) 24 | points = np.zeros((2, size, size), dtype=np.float32) 25 | points[0, :, :], points[1, :, :] = x.astype(np.float32), y.astype(np.float32) 26 | 27 | return points 28 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/test.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=7 nohup python -u tools/demo.py --config experiments/siamban_r50_l234/config.yaml --snapshot experiments/siamban_r50_l234/snapshot/SEE-Net-NIR-Model.pth --video_path /data/lizf/HOT/IMEC25Dataset/test/test_HSI/ > nohup.test.NIR.log 2>&1 & 2 | 3 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/testing_dataset/README.md: -------------------------------------------------------------------------------- 1 | # Testing dataset directory 2 | # putting your testing dataset here 3 | - [x] [VOT2018](http://www.votchallenge.net/vot2018/dataset.html) 4 | - [x] [VOT2019](https://www.votchallenge.net/vot2019/dataset.html) 5 | - [x] [OTB100(OTB2015)](http://cvlab.hanyang.ac.kr/tracker_benchmark/datasets.html) 6 | - [x] [UAV123](https://ivul.kaust.edu.sa/Pages/Dataset-UAV123.aspx) 7 | - [x] [NFS](http://ci2cv.net/nfs/index.html) 8 | - [x] [LaSOT](https://cis.temple.edu/lasot/) 9 | 10 | ## Download Dataset 11 | Download [json files](https://drive.google.com/drive/folders/10cfXjwQQBQeu48XMf2xc_W1LucpistPI). 12 | 13 | 1. Put CVRP13.json, OTB100.json, OTB50.json in OTB100 dataset directory (you need to copy Jogging to Jogging-1 and Jogging-2, and copy Skating2 to Skating2-1 and Skating2-2 or using softlink) 14 | 15 | The directory should have the below format 16 | 17 | | -- OTB100/ 18 | 19 | ​ | -- Basketball 20 | 21 | ​ | ...... 22 | 23 | ​ | -- Woman 24 | 25 | ​ | -- OTB100.json 26 | 27 | ​ | -- OTB50.json 28 | 29 | ​ | -- CVPR13.json 30 | 31 | 2. Put all other jsons in the dataset directory like in step 1 32 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/toolkit/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .vot import VOTDataset, VOTLTDataset 2 | from .otb import OTBDataset 3 | from .uav import UAVDataset 4 | from .lasot import LaSOTDataset 5 | from .nfs import NFSDataset 6 | from .trackingnet import TrackingNetDataset 7 | from .got10k import GOT10kDataset 8 | 9 | class DatasetFactory(object): 10 | @staticmethod 11 | def create_dataset(**kwargs): 12 | """ 13 | Args: 14 | name: dataset name 'OTB2015', 'LaSOT', 'UAV123', 'NFS240', 'NFS30', 15 | 'VOT2018', 'VOT2016', 'VOT2018-LT' 16 | dataset_root: dataset root 17 | load_img: wether to load image 18 | Return: 19 | dataset 20 | """ 21 | assert 'name' in kwargs, "should provide dataset name" 22 | name = kwargs['name'] 23 | if 'OTB' in name: 24 | dataset = OTBDataset(**kwargs) 25 | elif 'LaSOT' == name: 26 | dataset = LaSOTDataset(**kwargs) 27 | elif 'UAV' in name: 28 | dataset = UAVDataset(**kwargs) 29 | elif 'NFS' in name: 30 | dataset = NFSDataset(**kwargs) 31 | elif 'VOT2018' == name or 'VOT2016' == name or 'VOT2019' == name: 32 | dataset = VOTDataset(**kwargs) 33 | elif 'VOT2018-LT' == name: 34 | dataset = VOTLTDataset(**kwargs) 35 | elif 'TrackingNet' == name: 36 | dataset = TrackingNetDataset(**kwargs) 37 | elif 'GOT-10k' == name: 38 | dataset = GOT10kDataset(**kwargs) 39 | else: 40 | raise Exception("unknow dataset {}".format(kwargs['name'])) 41 | return dataset 42 | 43 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/dataset.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | class Dataset(object): 4 | def __init__(self, name, dataset_root): 5 | self.name = name 6 | self.dataset_root = dataset_root 7 | self.videos = None 8 | 9 | def __getitem__(self, idx): 10 | if isinstance(idx, str): 11 | return self.videos[idx] 12 | elif isinstance(idx, int): 13 | return self.videos[sorted(list(self.videos.keys()))[idx]] 14 | 15 | def __len__(self): 16 | return len(self.videos) 17 | 18 | def __iter__(self): 19 | keys = sorted(list(self.videos.keys())) 20 | for key in keys: 21 | yield self.videos[key] 22 | 23 | def set_tracker(self, path, tracker_names): 24 | """ 25 | Args: 26 | path: path to tracker results, 27 | tracker_names: list of tracker name 28 | """ 29 | self.tracker_path = path 30 | self.tracker_names = tracker_names 31 | # for video in tqdm(self.videos.values(), 32 | # desc='loading tacker result', ncols=100): 33 | # video.load_tracker(path, tracker_names) 34 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/got10k.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import os 4 | 5 | from tqdm import tqdm 6 | 7 | from .dataset import Dataset 8 | from .video import Video 9 | 10 | class GOT10kVideo(Video): 11 | """ 12 | Args: 13 | name: video name 14 | root: dataset root 15 | video_dir: video directory 16 | init_rect: init rectangle 17 | img_names: image names 18 | gt_rect: groundtruth rectangle 19 | attr: attribute of video 20 | """ 21 | def __init__(self, name, root, video_dir, init_rect, img_names, 22 | gt_rect, attr, load_img=False): 23 | super(GOT10kVideo, self).__init__(name, root, video_dir, 24 | init_rect, img_names, gt_rect, attr, load_img) 25 | 26 | # def load_tracker(self, path, tracker_names=None): 27 | # """ 28 | # Args: 29 | # path(str): path to result 30 | # tracker_name(list): name of tracker 31 | # """ 32 | # if not tracker_names: 33 | # tracker_names = [x.split('/')[-1] for x in glob(path) 34 | # if os.path.isdir(x)] 35 | # if isinstance(tracker_names, str): 36 | # tracker_names = [tracker_names] 37 | # # self.pred_trajs = {} 38 | # for name in tracker_names: 39 | # traj_file = os.path.join(path, name, self.name+'.txt') 40 | # if os.path.exists(traj_file): 41 | # with open(traj_file, 'r') as f : 42 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 43 | # for x in f.readlines()] 44 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 45 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 46 | # else: 47 | 48 | # self.tracker_names = list(self.pred_trajs.keys()) 49 | 50 | class GOT10kDataset(Dataset): 51 | """ 52 | Args: 53 | name: dataset name, should be "NFS30" or "NFS240" 54 | dataset_root, dataset root dir 55 | """ 56 | def __init__(self, name, dataset_root, load_img=False): 57 | super(GOT10kDataset, self).__init__(name, dataset_root) 58 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 59 | meta_data = json.load(f) 60 | 61 | # load videos 62 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 63 | self.videos = {} 64 | for video in pbar: 65 | pbar.set_postfix_str(video) 66 | self.videos[video] = GOT10kVideo(video, 67 | dataset_root, 68 | meta_data[video]['video_dir'], 69 | meta_data[video]['init_rect'], 70 | meta_data[video]['img_names'], 71 | meta_data[video]['gt_rect'], 72 | None) 73 | self.attr = {} 74 | self.attr['ALL'] = list(self.videos.keys()) 75 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/nfs.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from tqdm import tqdm 6 | from glob import glob 7 | 8 | from .dataset import Dataset 9 | from .video import Video 10 | 11 | 12 | class NFSVideo(Video): 13 | """ 14 | Args: 15 | name: video name 16 | root: dataset root 17 | video_dir: video directory 18 | init_rect: init rectangle 19 | img_names: image names 20 | gt_rect: groundtruth rectangle 21 | attr: attribute of video 22 | """ 23 | def __init__(self, name, root, video_dir, init_rect, img_names, 24 | gt_rect, attr, load_img=False): 25 | super(NFSVideo, self).__init__(name, root, video_dir, 26 | init_rect, img_names, gt_rect, attr, load_img) 27 | 28 | # def load_tracker(self, path, tracker_names=None): 29 | # """ 30 | # Args: 31 | # path(str): path to result 32 | # tracker_name(list): name of tracker 33 | # """ 34 | # if not tracker_names: 35 | # tracker_names = [x.split('/')[-1] for x in glob(path) 36 | # if os.path.isdir(x)] 37 | # if isinstance(tracker_names, str): 38 | # tracker_names = [tracker_names] 39 | # # self.pred_trajs = {} 40 | # for name in tracker_names: 41 | # traj_file = os.path.join(path, name, self.name+'.txt') 42 | # if os.path.exists(traj_file): 43 | # with open(traj_file, 'r') as f : 44 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 45 | # for x in f.readlines()] 46 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 47 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 48 | # else: 49 | 50 | # self.tracker_names = list(self.pred_trajs.keys()) 51 | 52 | class NFSDataset(Dataset): 53 | """ 54 | Args: 55 | name: dataset name, should be "NFS30" or "NFS240" 56 | dataset_root, dataset root dir 57 | """ 58 | def __init__(self, name, dataset_root, load_img=False): 59 | super(NFSDataset, self).__init__(name, dataset_root) 60 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 61 | meta_data = json.load(f) 62 | 63 | # load videos 64 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 65 | self.videos = {} 66 | for video in pbar: 67 | pbar.set_postfix_str(video) 68 | self.videos[video] = NFSVideo(video, 69 | dataset_root, 70 | meta_data[video]['video_dir'], 71 | meta_data[video]['init_rect'], 72 | meta_data[video]['img_names'], 73 | meta_data[video]['gt_rect'], 74 | None) 75 | 76 | self.attr = {} 77 | self.attr['ALL'] = list(self.videos.keys()) 78 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/trackingnet.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from tqdm import tqdm 6 | from glob import glob 7 | 8 | from .dataset import Dataset 9 | from .video import Video 10 | 11 | class TrackingNetVideo(Video): 12 | """ 13 | Args: 14 | name: video name 15 | root: dataset root 16 | video_dir: video directory 17 | init_rect: init rectangle 18 | img_names: image names 19 | gt_rect: groundtruth rectangle 20 | attr: attribute of video 21 | """ 22 | def __init__(self, name, root, video_dir, init_rect, img_names, 23 | gt_rect, attr, load_img=False): 24 | super(TrackingNetVideo, self).__init__(name, root, video_dir, 25 | init_rect, img_names, gt_rect, attr, load_img) 26 | 27 | # def load_tracker(self, path, tracker_names=None): 28 | # """ 29 | # Args: 30 | # path(str): path to result 31 | # tracker_name(list): name of tracker 32 | # """ 33 | # if not tracker_names: 34 | # tracker_names = [x.split('/')[-1] for x in glob(path) 35 | # if os.path.isdir(x)] 36 | # if isinstance(tracker_names, str): 37 | # tracker_names = [tracker_names] 38 | # # self.pred_trajs = {} 39 | # for name in tracker_names: 40 | # traj_file = os.path.join(path, name, self.name+'.txt') 41 | # if os.path.exists(traj_file): 42 | # with open(traj_file, 'r') as f : 43 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 44 | # for x in f.readlines()] 45 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 46 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 47 | # else: 48 | 49 | # self.tracker_names = list(self.pred_trajs.keys()) 50 | 51 | class TrackingNetDataset(Dataset): 52 | """ 53 | Args: 54 | name: dataset name, should be "NFS30" or "NFS240" 55 | dataset_root, dataset root dir 56 | """ 57 | def __init__(self, name, dataset_root, load_img=False): 58 | super(TrackingNetDataset, self).__init__(name, dataset_root) 59 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 60 | meta_data = json.load(f) 61 | 62 | # load videos 63 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 64 | self.videos = {} 65 | for video in pbar: 66 | pbar.set_postfix_str(video) 67 | self.videos[video] = TrackingNetVideo(video, 68 | dataset_root, 69 | meta_data[video]['video_dir'], 70 | meta_data[video]['init_rect'], 71 | meta_data[video]['img_names'], 72 | meta_data[video]['gt_rect'], 73 | None) 74 | self.attr = {} 75 | self.attr['ALL'] = list(self.videos.keys()) 76 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/datasets/uav.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | from tqdm import tqdm 5 | from glob import glob 6 | 7 | from .dataset import Dataset 8 | from .video import Video 9 | 10 | class UAVVideo(Video): 11 | """ 12 | Args: 13 | name: video name 14 | root: dataset root 15 | video_dir: video directory 16 | init_rect: init rectangle 17 | img_names: image names 18 | gt_rect: groundtruth rectangle 19 | attr: attribute of video 20 | """ 21 | def __init__(self, name, root, video_dir, init_rect, img_names, 22 | gt_rect, attr, load_img=False): 23 | super(UAVVideo, self).__init__(name, root, video_dir, 24 | init_rect, img_names, gt_rect, attr, load_img) 25 | 26 | 27 | class UAVDataset(Dataset): 28 | """ 29 | Args: 30 | name: dataset name, should be 'UAV123', 'UAV20L' 31 | dataset_root: dataset root 32 | load_img: wether to load all imgs 33 | """ 34 | def __init__(self, name, dataset_root, load_img=False): 35 | super(UAVDataset, self).__init__(name, dataset_root) 36 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 37 | meta_data = json.load(f) 38 | 39 | # load videos 40 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 41 | self.videos = {} 42 | for video in pbar: 43 | pbar.set_postfix_str(video) 44 | self.videos[video] = UAVVideo(video, 45 | dataset_root, 46 | meta_data[video]['video_dir'], 47 | meta_data[video]['init_rect'], 48 | meta_data[video]['img_names'], 49 | meta_data[video]['gt_rect'], 50 | meta_data[video]['attr']) 51 | 52 | # set attr 53 | attr = [] 54 | for x in self.videos.values(): 55 | attr += x.attr 56 | attr = set(attr) 57 | self.attr = {} 58 | self.attr['ALL'] = list(self.videos.keys()) 59 | for x in attr: 60 | self.attr[x] = [] 61 | for k, v in self.videos.items(): 62 | for attr_ in v.attr: 63 | self.attr[attr_].append(k) 64 | 65 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .ar_benchmark import AccuracyRobustnessBenchmark 2 | from .eao_benchmark import EAOBenchmark 3 | from .ope_benchmark import OPEBenchmark 4 | from .f1_benchmark import F1Benchmark 5 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from . import region 2 | from .statistics import * 3 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/utils/c_region.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "src/region.h": 2 | ctypedef enum region_type "RegionType": 3 | EMTPY 4 | SPECIAL 5 | RECTANGEL 6 | POLYGON 7 | MASK 8 | 9 | ctypedef struct region_bounds: 10 | float top 11 | float bottom 12 | float left 13 | float right 14 | 15 | ctypedef struct region_rectangle: 16 | float x 17 | float y 18 | float width 19 | float height 20 | 21 | # ctypedef struct region_mask: 22 | # int x 23 | # int y 24 | # int width 25 | # int height 26 | # char *data 27 | 28 | ctypedef struct region_polygon: 29 | int count 30 | float *x 31 | float *y 32 | 33 | ctypedef union region_container_data: 34 | region_rectangle rectangle 35 | region_polygon polygon 36 | # region_mask mask 37 | int special 38 | 39 | ctypedef struct region_container: 40 | region_type type 41 | region_container_data data 42 | 43 | # ctypedef struct region_overlap: 44 | # float overlap 45 | # float only1 46 | # float only2 47 | 48 | # region_overlap region_compute_overlap(const region_container* ra, const region_container* rb, region_bounds bounds) 49 | 50 | float compute_polygon_overlap(const region_polygon* p1, const region_polygon* p2, float *only1, float *only2, region_bounds bounds) 51 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/utils/misc.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author fangyi.zhang@vipl.ict.ac.cn 3 | """ 4 | import numpy as np 5 | 6 | def determine_thresholds(confidence, resolution=100): 7 | """choose threshold according to confidence 8 | 9 | Args: 10 | confidence: list or numpy array or numpy array 11 | reolution: number of threshold to choose 12 | 13 | Restures: 14 | threshold: numpy array 15 | """ 16 | if isinstance(confidence, list): 17 | confidence = np.array(confidence) 18 | confidence = confidence.flatten() 19 | confidence = confidence[~np.isnan(confidence)] 20 | confidence.sort() 21 | 22 | assert len(confidence) > resolution and resolution > 2 23 | 24 | thresholds = np.ones((resolution)) 25 | thresholds[0] = - np.inf 26 | thresholds[-1] = np.inf 27 | delta = np.floor(len(confidence) / (resolution - 2)) 28 | idxs = np.linspace(delta, len(confidence)-delta, resolution-2, dtype=np.int32) 29 | thresholds[1:-1] = confidence[idxs] 30 | return thresholds 31 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/utils/region.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/toolkit/utils/region.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .draw_f1 import draw_f1 2 | from .draw_success_precision import draw_success_precision 3 | from .draw_eao import draw_eao 4 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/visualization/draw_eao.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pickle 4 | 5 | from matplotlib import rc 6 | from .draw_utils import COLOR, MARKER_STYLE 7 | 8 | rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) 9 | rc('text', usetex=True) 10 | 11 | def draw_eao(result): 12 | fig = plt.figure() 13 | ax = fig.add_subplot(111, projection='polar') 14 | angles = np.linspace(0, 2*np.pi, 8, endpoint=True) 15 | 16 | attr2value = [] 17 | for i, (tracker_name, ret) in enumerate(result.items()): 18 | value = list(ret.values()) 19 | attr2value.append(value) 20 | value.append(value[0]) 21 | attr2value = np.array(attr2value) 22 | max_value = np.max(attr2value, axis=0) 23 | min_value = np.min(attr2value, axis=0) 24 | for i, (tracker_name, ret) in enumerate(result.items()): 25 | value = list(ret.values()) 26 | value.append(value[0]) 27 | value = np.array(value) 28 | value *= (1 / max_value) 29 | plt.plot(angles, value, linestyle='-', color=COLOR[i], marker=MARKER_STYLE[i], 30 | label=tracker_name, linewidth=1.5, markersize=6) 31 | 32 | attrs = ["Overall", "Camera motion", 33 | "Illumination change","Motion Change", 34 | "Size change","Occlusion", 35 | "Unassigned"] 36 | attr_value = [] 37 | for attr, maxv, minv in zip(attrs, max_value, min_value): 38 | attr_value.append(attr + "\n({:.3f},{:.3f})".format(minv, maxv)) 39 | ax.set_thetagrids(angles[:-1] * 180/np.pi, attr_value) 40 | ax.spines['polar'].set_visible(False) 41 | ax.legend(loc='upper center', bbox_to_anchor=(0.5,-0.07), frameon=False, ncol=5) 42 | ax.grid(b=False) 43 | ax.set_ylim(0, 1.18) 44 | ax.set_yticks([]) 45 | plt.show() 46 | 47 | if __name__ == '__main__': 48 | result = pickle.load(open("../../result.pkl", 'rb')) 49 | draw_eao(result) 50 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/visualization/draw_f1.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | from matplotlib import rc 5 | from .draw_utils import COLOR, LINE_STYLE 6 | 7 | rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) 8 | rc('text', usetex=True) 9 | 10 | def draw_f1(result, bold_name=None): 11 | # drawing f1 contour 12 | fig, ax = plt.subplots() 13 | for f1 in np.arange(0.1, 1, 0.1): 14 | recall = np.arange(f1, 1+0.01, 0.01) 15 | precision = f1 * recall / (2 * recall - f1) 16 | ax.plot(recall, precision, color=[0,1,0], linestyle='-', linewidth=0.5) 17 | ax.plot(precision, recall, color=[0,1,0], linestyle='-', linewidth=0.5) 18 | ax.grid(b=True) 19 | ax.set_aspect(1) 20 | plt.xlabel('Recall') 21 | plt.ylabel('Precision') 22 | plt.axis([0, 1, 0, 1]) 23 | plt.title(r'\textbf{VOT2018-LT Precision vs Recall}') 24 | 25 | # draw result line 26 | all_precision = {} 27 | all_recall = {} 28 | best_f1 = {} 29 | best_idx = {} 30 | for tracker_name, ret in result.items(): 31 | precision = np.mean(list(ret['precision'].values()), axis=0) 32 | recall = np.mean(list(ret['recall'].values()), axis=0) 33 | f1 = 2 * precision * recall / (precision + recall) 34 | max_idx = np.argmax(f1) 35 | all_precision[tracker_name] = precision 36 | all_recall[tracker_name] = recall 37 | best_f1[tracker_name] = f1[max_idx] 38 | best_idx[tracker_name] = max_idx 39 | 40 | for idx, (tracker_name, best_f1) in \ 41 | enumerate(sorted(best_f1.items(), key=lambda x:x[1], reverse=True)): 42 | if tracker_name == bold_name: 43 | label = r"\textbf{[%.3f] Ours}" % (best_f1) 44 | else: 45 | label = "[%.3f] " % (best_f1) + tracker_name 46 | recall = all_recall[tracker_name][:-1] 47 | precision = all_precision[tracker_name][:-1] 48 | ax.plot(recall, precision, color=COLOR[idx], linestyle='-', 49 | label=label) 50 | f1_idx = best_idx[tracker_name] 51 | ax.plot(recall[f1_idx], precision[f1_idx], color=[0,0,0], marker='o', 52 | markerfacecolor=COLOR[idx], markersize=5) 53 | ax.legend(loc='lower right', labelspacing=0.2) 54 | plt.xticks(np.arange(0, 1+0.1, 0.1)) 55 | plt.yticks(np.arange(0, 1+0.1, 0.1)) 56 | plt.show() 57 | 58 | if __name__ == '__main__': 59 | draw_f1(None) 60 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/toolkit/visualization/draw_utils.py: -------------------------------------------------------------------------------- 1 | 2 | COLOR = ((1, 0, 0), 3 | (0, 1, 0), 4 | (1, 0, 1), 5 | (1, 1, 0), 6 | (0 , 162/255, 232/255), 7 | (0.5, 0.5, 0.5), 8 | (0, 0, 1), 9 | (0, 1, 1), 10 | (136/255, 0 , 21/255), 11 | (255/255, 127/255, 39/255), 12 | (0, 0, 0)) 13 | 14 | LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-'] 15 | 16 | MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.'] 17 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/tools/test_epochs.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("..") 3 | import os 4 | import time 5 | import argparse 6 | from mpi4py import MPI 7 | 8 | 9 | parser = argparse.ArgumentParser(description="multi-gpu test all epochs") 10 | parser.add_argument("--start_epoch", default=1, type=int, required=True, help="test end epoch") 11 | parser.add_argument("--end_epoch", default=20, type=int, required=True, help="test end epoch") 12 | parser.add_argument("--gpu_nums", default=3, type=int, required=True, help="gpu numbers") 13 | parser.add_argument("--threads", default=3, type=int, required=True) 14 | parser.add_argument("--dataset", default="OTB2015", type=str, help="benchmark to test") 15 | args = parser.parse_args() 16 | 17 | # init gpu and epochs 18 | comm = MPI.COMM_WORLD 19 | size = comm.Get_size() 20 | rank = comm.Get_rank() 21 | GPU_ID = rank % args.gpu_nums 22 | node_name = MPI.Get_processor_name() # get the name of the node 23 | os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_ID) 24 | print("node name: {}, GPU_ID: {}".format(node_name, GPU_ID)) 25 | time.sleep(rank * 5) 26 | 27 | # run test scripts -- one epoch for each thread 28 | for i in range((args.end_epoch - args.start_epoch + 1) // args.threads + 1): 29 | dataset = args.dataset 30 | try: 31 | epoch_ID += args.threads 32 | except: 33 | epoch_ID = rank % (args.end_epoch - args.start_epoch + 1) + args.start_epoch 34 | 35 | if epoch_ID > args.end_epoch: 36 | continue 37 | 38 | snapshot = "snapshot/checkpoint_e{}.pth".format(epoch_ID) 39 | print("==> test {}th epoch".format(epoch_ID)) 40 | 41 | os.system("python ../../tools/test.py --snapshot {0} --dataset {1} --config config.yaml".format(snapshot, dataset)) 42 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/vot_siamban/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-NIR-25bands/vot_siamban/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/vot_siamban/tracker_SiamBAN.m: -------------------------------------------------------------------------------- 1 | 2 | % error('Tracker not configured! Please edit the tracker_test.m file.'); % Remove this line after proper configuration 3 | 4 | % The human readable label for the tracker, used to identify the tracker in reports 5 | % If not set, it will be set to the same value as the identifier. 6 | % It does not have to be unique, but it is best that it is. 7 | tracker_label = ['SiamBAN']; 8 | 9 | % For Python implementations we have created a handy function that generates the appropritate 10 | % command that will run the python executable and execute the given script that includes your 11 | % tracker implementation. 12 | % 13 | % Please customize the line below by substituting the first argument with the name of the 14 | % script of your tracker (not the .py file but just the name of the script) and also provide the 15 | % path (or multiple paths) where the tracker sources % are found as the elements of the cell 16 | % array (second argument). 17 | setenv('MKL_NUM_THREADS','1'); 18 | siamban_root = 'path/to/siamban'; 19 | track_build_path = 'path/to/track/build'; 20 | support_python_path = '/path/to/vot-toolkit/native/trax/support/python'; 21 | tracker_command = generate_python_command('vot_siambox.vot_iter', {sianban_root, 22 | [track_build_path '/python/lib'], 23 | support_python_path}); 24 | 25 | tracker_interpreter = 'python'; 26 | 27 | tracker_linkpath = {track_build_path}; 28 | 29 | % tracker_linkpath = {}; % A cell array of custom library directories used by the tracker executable (optional) 30 | -------------------------------------------------------------------------------- /code/SEE-Net-NIR-25bands/vot_siamban/vot_siamban.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import cv2 3 | import torch 4 | import numpy as np 5 | import os 6 | from os.path import join 7 | 8 | from siamban.core.config import cfg 9 | from siamban.models.model_builder import ModelBuilder 10 | from siamban.tracker.tracker_builder import build_tracker 11 | from siamban.utils.bbox import get_axis_aligned_bbox 12 | from siamban.utils.model_load import load_pretrain 13 | from toolkit.datasets import DatasetFactory 14 | from toolkit.utils.region import vot_overlap, vot_float2str 15 | 16 | from . import vot 17 | from .vot import Rectangle, Polygon, Point 18 | 19 | 20 | # modify root 21 | 22 | # cfg_root = "path/to/expr" 23 | # model_file = join(cfg_root, 'model.pth') 24 | # cfg_file = join(cfg_root, 'config.yaml') 25 | 26 | cfg_root = "/path/to/siamban/experiments/siamban_r50_l234_dwxcorr" 27 | model_file = join(cfg_root, 'snapshot/checkpoint_e20.pth') 28 | cfg_file = join(cfg_root, 'config.yaml') 29 | 30 | def warmup(model): 31 | for i in range(10): 32 | model.template(torch.FloatTensor(1,3,127,127).cuda()) 33 | 34 | def setup_tracker(): 35 | cfg.merge_from_file(cfg_file) 36 | 37 | model = ModelBuilder() 38 | model = load_pretrain(model, model_file).cuda().eval() 39 | 40 | tracker = build_tracker(model) 41 | warmup(model) 42 | return tracker 43 | 44 | 45 | tracker = setup_tracker() 46 | 47 | handle = vot.VOT("polygon") 48 | region = handle.region() 49 | try: 50 | region = np.array([region[0][0][0], region[0][0][1], region[0][1][0], region[0][1][1], 51 | region[0][2][0], region[0][2][1], region[0][3][0], region[0][3][1]]) 52 | except: 53 | region = np.array(region) 54 | 55 | cx, cy, w, h = get_axis_aligned_bbox(region) 56 | 57 | image_file = handle.frame() 58 | if not image_file: 59 | sys.exit(0) 60 | 61 | im = cv2.imread(image_file) # HxWxC 62 | # init 63 | target_pos, target_sz = np.array([cx, cy]), np.array([w, h]) 64 | gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h] 65 | tracker.init(im, gt_bbox_) 66 | 67 | while True: 68 | img_file = handle.frame() 69 | if not img_file: 70 | break 71 | im = cv2.imread(img_file) 72 | outputs = tracker.track(im) 73 | pred_bbox = outputs['bbox'] 74 | result = Rectangle(*pred_bbox) 75 | score = outputs['best_score'] 76 | 77 | handle.report(result, score) 78 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/README.md: -------------------------------------------------------------------------------- 1 | # Quick Start 2 | ## 1. Add SEE-Net to your PYTHONPATH 3 | export PYTHONPATH=$PWD:$PYTHONPATH 4 | 5 | ## 2. Requirements 6 | Please install the environment following https://github.com/hqucv/siamban. 7 | 8 | ## 2. Dataset 9 | Please generate the cropped template patch and search region, following https://github.com/hqucv/siamban. 10 | 11 | ## 3. Train 12 | (a) Download pretrained model in https://pan.baidu.com/s/1xUNW1wnyN7_Fo7Gcl1GaKQ Access code: 1234 13 | 14 | (b) Change the path of training data in siamese/dataset/dataset.py 15 | 16 | (c) Run: 17 | ```python 18 | cd experiments/siamban_r50_l234 19 | CUDA_VISIBLE_DEVICES=0,1,2 20 | python -m torch.distributed.launch \ 21 | --nproc_per_node=3 \ 22 | --master_port=2333 \ 23 | ../../tools/train.py --cfg config.yaml 24 | ``` 25 | 26 | ## 4. Test 27 | Download testing model in https://pan.baidu.com/s/1xUNW1wnyN7_Fo7Gcl1GaKQ 28 | 29 | Access code: 1234 30 | ```python 31 | python tools/demo.py --config experiments/siamban_r50_l234/config.yaml --snapshot experiments/siamban_r50_l234/snapshot/checkpoint_e30.pth --video_path test_path 32 | ``` 33 | 34 | ## Contact 35 | lizhuanfeng@njust.edu.cn 36 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/build/temp.linux-x86_64-3.7/toolkit/utils/region.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/build/temp.linux-x86_64-3.7/toolkit/utils/region.o -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/build/temp.linux-x86_64-3.7/toolkit/utils/src/region.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/build/temp.linux-x86_64-3.7/toolkit/utils/src/region.o -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/experiments/siamban_r50_l234/config.yaml: -------------------------------------------------------------------------------- 1 | META_ARC: "siamban_r50_l234" 2 | 3 | BACKBONE: 4 | TYPE: "resnet50" 5 | KWARGS: 6 | used_layers: [2, 3, 4] 7 | PRETRAINED: 'pretrained_models/resnet50.model' 8 | TRAIN_LAYERS: ['layer2', 'layer3', 'layer4'] 9 | TRAIN_EPOCH: 10 10 | LAYERS_LR: 0.1 11 | 12 | ADJUST: 13 | ADJUST: True 14 | TYPE: "AdjustAllLayer" 15 | KWARGS: 16 | in_channels: [512, 1024, 2048] 17 | out_channels: [256, 256, 256] 18 | 19 | BAN: 20 | BAN: True 21 | TYPE: 'MultiBAN' 22 | KWARGS: 23 | in_channels: [256, 256, 256] 24 | cls_out_channels: 2 25 | weighted: True 26 | 27 | POINT: 28 | STRIDE: 8 29 | 30 | TRACK: 31 | TYPE: 'SiamBANTracker' 32 | WINDOW_INFLUENCE: 0.4632532824922313 # VOT2018 33 | PENALTY_K: 0.08513642556896711 # VOT2018 34 | LR: 0.44418184746462425 # VOT2018 35 | EXEMPLAR_SIZE: 127 36 | INSTANCE_SIZE: 255 37 | BASE_SIZE: 8 38 | CONTEXT_AMOUNT: 0.5 39 | 40 | TRAIN: 41 | EPOCH: 30 42 | START_EPOCH: 0 43 | BATCH_SIZE: 28 44 | BASE_LR: 0.005 45 | CLS_WEIGHT: 1.0 46 | LOC_WEIGHT: 1.0 47 | RESUME: '' 48 | 49 | LR: 50 | TYPE: 'log' 51 | KWARGS: 52 | start_lr: 0.005 53 | end_lr: 0.00005 54 | LR_WARMUP: 55 | TYPE: 'step' 56 | EPOCH: 5 57 | KWARGS: 58 | start_lr: 0.001 59 | end_lr: 0.005 60 | step: 1 61 | 62 | DATASET: 63 | NAMES: 64 | - 'VID' 65 | - 'YOUTUBEBB' 66 | - 'COCO' 67 | - 'DET' 68 | - 'GOT10K' 69 | - 'LASOT' 70 | 71 | VIDEOS_PER_EPOCH: 1000000 72 | 73 | TEMPLATE: 74 | SHIFT: 4 75 | SCALE: 0.05 76 | BLUR: 0.0 77 | FLIP: 0.0 78 | COLOR: 1.0 79 | 80 | SEARCH: 81 | SHIFT: 64 82 | SCALE: 0.18 83 | BLUR: 0.2 84 | FLIP: 0.0 85 | COLOR: 1.0 86 | 87 | NEG: 0.2 88 | GRAY: 0.0 89 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/experiments/siamban_r50_l234/snapshot/download: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/experiments/siamban_r50_l234/snapshot/download -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 2 ]; then 4 | echo "ARGS ERROR!" 5 | echo " bash install.sh /path/to/your/conda env_name" 6 | exit 1 7 | fi 8 | 9 | set -e 10 | 11 | conda_path=$1 12 | env_name=$2 13 | 14 | source $conda_path/etc/profile.d/conda.sh 15 | 16 | echo "****** create environment " $env_name "*****" 17 | # create environment 18 | conda create -y --name $env_name python=3.7 19 | conda activate $env_name 20 | 21 | echo "***** install numpy pytorch opencv *****" 22 | # numpy 23 | conda install -y numpy 24 | # pytorch 25 | conda install pytorch=1.3.1 torchvision cudatoolkit=10.1 -c pytorch 26 | # opencv 27 | pip install opencv-python 28 | 29 | echo "***** install other libs *****" 30 | # libs 31 | pip install pyyaml yacs tqdm colorama matplotlib cython tensorboard future mpi4py optuna 32 | 33 | 34 | echo "***** build extensions *****" 35 | python setup.py build_ext --inplace 36 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/results.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/results.zip -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | 5 | ext_modules = [ 6 | Extension( 7 | name='toolkit.utils.region', 8 | sources=[ 9 | 'toolkit/utils/region.pyx', 10 | 'toolkit/utils/src/region.c', 11 | ], 12 | include_dirs=[ 13 | 'toolkit/utils/src' 14 | ] 15 | ) 16 | ] 17 | 18 | setup( 19 | name='toolkit', 20 | packages=['toolkit'], 21 | ext_modules=cythonize(ext_modules) 22 | ) 23 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/core/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/core/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/core/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/core/__pycache__/xcorr.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/core/__pycache__/xcorr.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/core/xcorr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn.functional as F 10 | 11 | 12 | def xcorr_slow(x, kernel): 13 | """for loop to calculate cross correlation, slow version 14 | """ 15 | batch = x.size()[0] 16 | out = [] 17 | for i in range(batch): 18 | px = x[i] 19 | pk = kernel[i] 20 | px = px.view(1, -1, px.size()[1], px.size()[2]) 21 | pk = pk.view(1, -1, pk.size()[1], pk.size()[2]) 22 | po = F.conv2d(px, pk) 23 | out.append(po) 24 | out = torch.cat(out, 0) 25 | return out 26 | 27 | 28 | def xcorr_fast(x, kernel): 29 | """group conv2d to calculate cross correlation, fast version 30 | """ 31 | batch = kernel.size()[0] 32 | pk = kernel.view(-1, x.size()[1], kernel.size()[2], kernel.size()[3]) 33 | px = x.view(1, -1, x.size()[2], x.size()[3]) 34 | po = F.conv2d(px, pk, groups=batch) 35 | po = po.view(batch, -1, po.size()[2], po.size()[3]) 36 | return po 37 | 38 | 39 | def xcorr_depthwise(x, kernel): 40 | """depthwise cross correlation 41 | """ 42 | batch = kernel.size(0) 43 | channel = kernel.size(1) 44 | x = x.view(1, batch*channel, x.size(2), x.size(3)) 45 | kernel = kernel.view(batch*channel, 1, kernel.size(2), kernel.size(3)) 46 | out = F.conv2d(x, kernel, groups=batch*channel) 47 | out = out.view(batch, channel, out.size(2), out.size(3)) 48 | return out 49 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/datasets/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/augmentation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/augmentation.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/point_target.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/datasets/__pycache__/point_target.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/datasets/point_target.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import numpy as np 7 | 8 | from siamban.core.config import cfg 9 | from siamban.utils.bbox import corner2center 10 | from siamban.utils.point import Point 11 | 12 | 13 | class PointTarget: 14 | def __init__(self,): 15 | self.points = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.SEARCH_SIZE//2) 16 | 17 | def __call__(self, target, size, neg=False): 18 | 19 | # -1 ignore 0 negative 1 positive 20 | cls = -1 * np.ones((size, size), dtype=np.int64) 21 | delta = np.zeros((4, size, size), dtype=np.float32) 22 | 23 | def select(position, keep_num=16): 24 | num = position[0].shape[0] 25 | if num <= keep_num: 26 | return position, num 27 | slt = np.arange(num) 28 | np.random.shuffle(slt) 29 | slt = slt[:keep_num] 30 | return tuple(p[slt] for p in position), keep_num 31 | 32 | tcx, tcy, tw, th = corner2center(target) 33 | points = self.points.points 34 | 35 | if neg: 36 | neg = np.where(np.square(tcx - points[0]) / np.square(tw / 4) + 37 | np.square(tcy - points[1]) / np.square(th / 4) < 1) 38 | neg, neg_num = select(neg, cfg.TRAIN.NEG_NUM) 39 | cls[neg] = 0 40 | 41 | return cls, delta 42 | 43 | delta[0] = points[0] - target[0] 44 | delta[1] = points[1] - target[1] 45 | delta[2] = target[2] - points[0] 46 | delta[3] = target[3] - points[1] 47 | 48 | # ellipse label 49 | pos = np.where(np.square(tcx - points[0]) / np.square(tw / 4) + 50 | np.square(tcy - points[1]) / np.square(th / 4) < 1) 51 | neg = np.where(np.square(tcx - points[0]) / np.square(tw / 2) + 52 | np.square(tcy - points[1]) / np.square(th / 2) > 1) 53 | 54 | # sampling 55 | pos, pos_num = select(pos, cfg.TRAIN.POS_NUM) 56 | neg, neg_num = select(neg, cfg.TRAIN.TOTAL_NUM - cfg.TRAIN.POS_NUM) 57 | 58 | cls[pos] = 1 59 | cls[neg] = 0 60 | 61 | return cls, delta 62 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/__pycache__/iou_loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/__pycache__/iou_loss.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/__pycache__/model_builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/__pycache__/model_builder.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | from siamban.models.backbone.alexnet import alexnetlegacy, alexnet 9 | from siamban.models.backbone.mobile_v2 import mobilenetv2 10 | from siamban.models.backbone.resnet_atrous import resnet18, resnet34, resnet50 11 | 12 | BACKBONES = { 13 | 'alexnetlegacy': alexnetlegacy, 14 | 'mobilenetv2': mobilenetv2, 15 | 'resnet18': resnet18, 16 | 'resnet34': resnet34, 17 | 'resnet50': resnet50, 18 | 'alexnet': alexnet, 19 | } 20 | 21 | 22 | def get_backbone(name, **kwargs): 23 | return BACKBONES[name](**kwargs) 24 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/alexnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/alexnet.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/gpu_mem_track.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/gpu_mem_track.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/mobile_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/mobile_v2.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/backbone/__pycache__/resnet_atrous.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/backbone/modelsize_estimate.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | def modelsize(model, input, type_size=4): 7 | para = sum([np.prod(list(p.size())) for p in model.parameters()]) 8 | # print('Model {} : Number of params: {}'.format(model._get_name(), para)) 9 | print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000)) 10 | 11 | input_ = input.clone() 12 | input_.requires_grad_(requires_grad=False) 13 | 14 | mods = list(model.modules()) 15 | out_sizes = [] 16 | 17 | for i in range(1, len(mods)): 18 | m = mods[i] 19 | if isinstance(m, nn.ReLU): 20 | if m.inplace: 21 | continue 22 | out = m(input_) 23 | out_sizes.append(np.array(out.size())) 24 | input_ = out 25 | 26 | total_nums = 0 27 | for i in range(len(out_sizes)): 28 | s = out_sizes[i] 29 | nums = np.prod(np.array(s)) 30 | total_nums += nums 31 | 32 | # print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums)) 33 | # print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2)) 34 | print('Model {} : intermedite variables: {:3f} M (without backward)' 35 | .format(model._get_name(), total_nums * type_size / 1000 / 1000)) 36 | print('Model {} : intermedite variables: {:3f} M (with backward)' 37 | .format(model._get_name(), total_nums * type_size*2 / 1000 / 1000)) 38 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/head/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | from siamban.models.head.ban import UPChannelBAN, DepthwiseBAN, MultiBAN 7 | 8 | 9 | BANS = { 10 | 'UPChannelBAN': UPChannelBAN, 11 | 'DepthwiseBAN': DepthwiseBAN, 12 | 'MultiBAN': MultiBAN 13 | } 14 | 15 | 16 | def get_ban_head(name, **kwargs): 17 | return BANS[name](**kwargs) 18 | 19 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/head/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/head/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/head/__pycache__/ban.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/head/__pycache__/ban.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/init_weight.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def init_weights(model): 5 | for m in model.modules(): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight.data, 8 | mode='fan_out', 9 | nonlinearity='relu') 10 | elif isinstance(m, nn.BatchNorm2d): 11 | m.weight.data.fill_(1) 12 | m.bias.data.zero_() 13 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/iou_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class IOULoss(nn.Module): 6 | def __init__(self, loc_loss_type): 7 | super(IOULoss, self).__init__() 8 | self.loc_loss_type = loc_loss_type 9 | 10 | def forward(self, pred, target, weight=None): 11 | pred_left = pred[:, 0] 12 | pred_top = pred[:, 1] 13 | pred_right = pred[:, 2] 14 | pred_bottom = pred[:, 3] 15 | 16 | target_left = target[:, 0] 17 | target_top = target[:, 1] 18 | target_right = target[:, 2] 19 | target_bottom = target[:, 3] 20 | 21 | pred_area = (pred_left + pred_right) * (pred_top + pred_bottom) 22 | target_area = (target_left + target_right) * (target_top + target_bottom) 23 | 24 | w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right) 25 | g_w_intersect = torch.max(pred_left, target_left) + torch.max(pred_right, target_right) 26 | h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top) 27 | g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top) 28 | ac_uion = g_w_intersect * g_h_intersect + 1e-7 29 | area_intersect = w_intersect * h_intersect 30 | area_union = target_area + pred_area - area_intersect 31 | ious = (area_intersect + 1.0) / (area_union + 1.0) 32 | gious = ious - (ac_uion - area_union) / ac_uion 33 | 34 | if self.loc_loss_type == 'iou': 35 | losses = -torch.log(ious) 36 | elif self.loc_loss_type == 'linear_iou': 37 | losses = 1 - ious 38 | elif self.loc_loss_type == 'giou': 39 | losses = 1 - gious 40 | else: 41 | raise NotImplementedError 42 | 43 | if weight is not None and weight.sum() > 0: 44 | return (losses * weight).sum() / weight.sum() 45 | else: 46 | assert losses.numel() != 0 47 | return losses.mean() 48 | 49 | 50 | linear_iou = IOULoss(loc_loss_type='linear_iou') 51 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | import numpy as np 12 | 13 | from siamban.core.config import cfg 14 | from siamban.models.iou_loss import linear_iou 15 | 16 | 17 | def get_cls_loss(pred, label, select): 18 | if len(select.size()) == 0 or \ 19 | select.size() == torch.Size([0]): 20 | return 0 21 | # print ('--333333--pred.size() = ',pred.size()) 22 | # print ('--333333--label.size() = ',label.size()) 23 | pred = torch.index_select(pred, 0, select) 24 | label = torch.index_select(label, 0, select) 25 | # print ('--3--pred.size() = ',pred.size()) # [268, 2] 26 | # print ('--3--label.size() = ',label.size()) # [268] 27 | return F.nll_loss(pred, label) 28 | 29 | 30 | def select_cross_entropy_loss(pred, label): 31 | # print ('--1--pred.size() = ',pred.size()) # [28,25,25,2] 32 | # print ('--1--label.size() = ',label.size()) # [28,25,25] 33 | pred = pred.view(-1, 2) 34 | label = label.view(-1) 35 | # print ('--2--pred.size() = ',pred.size())# [17500, 2] 36 | # print ('--2--label.size() = ',label.size()) # [17500] 37 | pos = label.data.eq(1).nonzero().squeeze().cuda() # [268] 38 | neg = label.data.eq(0).nonzero().squeeze().cuda() # [1166] 39 | # print ('pos.size() = ',pos.size()) 40 | # print ('neg.size() = ',neg.size()) 41 | loss_pos = get_cls_loss(pred, label, pos) 42 | loss_neg = get_cls_loss(pred, label, neg) 43 | return loss_pos * 0.5 + loss_neg * 0.5 44 | 45 | 46 | def weight_l1_loss(pred_loc, label_loc, loss_weight): 47 | if cfg.BAN.BAN: 48 | diff = (pred_loc - label_loc).abs() 49 | diff = diff.sum(dim=1) 50 | else: 51 | diff = None 52 | loss = diff * loss_weight 53 | return loss.sum().div(pred_loc.size()[0]) 54 | 55 | 56 | def select_iou_loss(pred_loc, label_loc, label_cls): 57 | label_cls = label_cls.reshape(-1) 58 | pos = label_cls.data.eq(1).nonzero().squeeze().cuda() 59 | 60 | pred_loc = pred_loc.permute(0, 2, 3, 1).reshape(-1, 4) 61 | pred_loc = torch.index_select(pred_loc, 0, pos) 62 | 63 | label_loc = label_loc.permute(0, 2, 3, 1).reshape(-1, 4) 64 | label_loc = torch.index_select(label_loc, 0, pos) 65 | 66 | return linear_iou(pred_loc, label_loc) 67 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/neck/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | 12 | from siamban.models.neck.neck import AdjustLayer, AdjustAllLayer 13 | 14 | NECKS = { 15 | 'AdjustLayer': AdjustLayer, 16 | 'AdjustAllLayer': AdjustAllLayer 17 | } 18 | 19 | def get_neck(name, **kwargs): 20 | return NECKS[name](**kwargs) 21 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/neck/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/neck/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/neck/__pycache__/neck.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/models/neck/__pycache__/neck.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/models/neck/neck.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import torch.nn as nn 9 | 10 | 11 | class AdjustLayer(nn.Module): 12 | def __init__(self, in_channels, out_channels): 13 | super(AdjustLayer, self).__init__() 14 | self.downsample = nn.Sequential( 15 | nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), 16 | nn.BatchNorm2d(out_channels), 17 | ) 18 | 19 | def forward(self, x): 20 | x = self.downsample(x) 21 | if x.size(3) < 20: 22 | l = 4 23 | r = l + 7 24 | x = x[:, :, l:r, l:r] 25 | return x 26 | 27 | 28 | class AdjustAllLayer(nn.Module): 29 | def __init__(self, in_channels, out_channels): 30 | super(AdjustAllLayer, self).__init__() 31 | self.num = len(out_channels) 32 | if self.num == 1: 33 | self.downsample = AdjustLayer(in_channels[0], out_channels[0]) 34 | else: 35 | for i in range(self.num): 36 | self.add_module('downsample'+str(i+2), 37 | AdjustLayer(in_channels[i], out_channels[i])) 38 | 39 | def forward(self, features): 40 | if self.num == 1: 41 | return self.downsample(features) 42 | else: 43 | out = [] 44 | for i in range(self.num): 45 | adj_layer = getattr(self, 'downsample'+str(i+2)) 46 | out.append(adj_layer(features[i])) 47 | return out 48 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/tracker/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/base_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/base_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/siamban_tracker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/siamban_tracker.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/tracker_builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/tracker/__pycache__/tracker_builder.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/tracker/tracker_builder.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | from siamban.core.config import cfg 7 | from siamban.tracker.siamban_tracker import SiamBANTracker 8 | 9 | TRACKS = { 10 | 'SiamBANTracker': SiamBANTracker 11 | } 12 | 13 | 14 | def build_tracker(model): 15 | return TRACKS[cfg.TRACK.TYPE](model) 16 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/average_meter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/average_meter.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/bbox.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/bbox.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/distributed.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/distributed.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/log_helper.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/log_helper.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/lr_scheduler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/lr_scheduler.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/model_load.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/model_load.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/point.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/siamban/utils/__pycache__/point.cpython-37.pyc -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) SenseTime. All Rights Reserved. 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | from __future__ import unicode_literals 7 | 8 | import os 9 | 10 | from colorama import Fore, Style 11 | 12 | 13 | __all__ = ['commit', 'describe'] 14 | 15 | 16 | def _exec(cmd): 17 | f = os.popen(cmd, 'r', 1) 18 | return f.read().strip() 19 | 20 | 21 | def _bold(s): 22 | return "\033[1m%s\033[0m" % s 23 | 24 | 25 | def _color(s): 26 | return f'{Fore.RED}{s}{Style.RESET_ALL}' 27 | 28 | 29 | def _describe(model, lines=None, spaces=0): 30 | head = " " * spaces 31 | for name, p in model.named_parameters(): 32 | if '.' in name: 33 | continue 34 | if p.requires_grad: 35 | name = _color(name) 36 | line = "{head}- {name}".format(head=head, name=name) 37 | lines.append(line) 38 | 39 | for name, m in model.named_children(): 40 | space_num = len(name) + spaces + 1 41 | if m.training: 42 | name = _color(name) 43 | line = "{head}.{name} ({type})".format( 44 | head=head, 45 | name=name, 46 | type=m.__class__.__name__) 47 | lines.append(line) 48 | _describe(m, lines, space_num) 49 | 50 | 51 | def commit(): 52 | root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) 53 | cmd = "cd {}; git log | head -n1 | awk '{{print $2}}'".format(root) 54 | commit = _exec(cmd) 55 | cmd = "cd {}; git log --oneline | head -n1".format(root) 56 | commit_log = _exec(cmd) 57 | return "commit : {}\n log : {}".format(commit, commit_log) 58 | 59 | 60 | def describe(net, name=None): 61 | num = 0 62 | lines = [] 63 | if name is not None: 64 | lines.append(name) 65 | num = len(name) 66 | _describe(net, lines, num) 67 | return "\n".join(lines) 68 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/siamban/utils/point.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | from __future__ import unicode_literals 5 | 6 | import numpy as np 7 | 8 | 9 | class Point: 10 | """ 11 | This class generate points. 12 | """ 13 | def __init__(self, stride, size, image_center): 14 | self.stride = stride 15 | self.size = size 16 | self.image_center = image_center 17 | 18 | self.points = self.generate_points(self.stride, self.size, self.image_center) 19 | 20 | def generate_points(self, stride, size, im_c): 21 | ori = im_c - size // 2 * stride 22 | x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)], 23 | [ori + stride * dy for dy in np.arange(0, size)]) 24 | points = np.zeros((2, size, size), dtype=np.float32) 25 | points[0, :, :], points[1, :, :] = x.astype(np.float32), y.astype(np.float32) 26 | 27 | return points 28 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/test.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=3 nohup python -u tools/demo.py --config experiments/siamban_r50_l234/config.yaml --snapshot experiments/siamban_r50_l234/snapshot/Final-VIS-Model.pth --video_path /data/lizf/HOT/dataset/test/test_HSI/ > nohup.test.VIS.log 2>&1 & 2 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/testing_dataset/README.md: -------------------------------------------------------------------------------- 1 | # Testing dataset directory 2 | # putting your testing dataset here 3 | - [x] [VOT2018](http://www.votchallenge.net/vot2018/dataset.html) 4 | - [x] [VOT2019](https://www.votchallenge.net/vot2019/dataset.html) 5 | - [x] [OTB100(OTB2015)](http://cvlab.hanyang.ac.kr/tracker_benchmark/datasets.html) 6 | - [x] [UAV123](https://ivul.kaust.edu.sa/Pages/Dataset-UAV123.aspx) 7 | - [x] [NFS](http://ci2cv.net/nfs/index.html) 8 | - [x] [LaSOT](https://cis.temple.edu/lasot/) 9 | 10 | ## Download Dataset 11 | Download [json files](https://drive.google.com/drive/folders/10cfXjwQQBQeu48XMf2xc_W1LucpistPI). 12 | 13 | 1. Put CVRP13.json, OTB100.json, OTB50.json in OTB100 dataset directory (you need to copy Jogging to Jogging-1 and Jogging-2, and copy Skating2 to Skating2-1 and Skating2-2 or using softlink) 14 | 15 | The directory should have the below format 16 | 17 | | -- OTB100/ 18 | 19 | ​ | -- Basketball 20 | 21 | ​ | ...... 22 | 23 | ​ | -- Woman 24 | 25 | ​ | -- OTB100.json 26 | 27 | ​ | -- OTB50.json 28 | 29 | ​ | -- CVPR13.json 30 | 31 | 2. Put all other jsons in the dataset directory like in step 1 32 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/toolkit/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .vot import VOTDataset, VOTLTDataset 2 | from .otb import OTBDataset 3 | from .uav import UAVDataset 4 | from .lasot import LaSOTDataset 5 | from .nfs import NFSDataset 6 | from .trackingnet import TrackingNetDataset 7 | from .got10k import GOT10kDataset 8 | 9 | class DatasetFactory(object): 10 | @staticmethod 11 | def create_dataset(**kwargs): 12 | """ 13 | Args: 14 | name: dataset name 'OTB2015', 'LaSOT', 'UAV123', 'NFS240', 'NFS30', 15 | 'VOT2018', 'VOT2016', 'VOT2018-LT' 16 | dataset_root: dataset root 17 | load_img: wether to load image 18 | Return: 19 | dataset 20 | """ 21 | assert 'name' in kwargs, "should provide dataset name" 22 | name = kwargs['name'] 23 | if 'OTB' in name: 24 | dataset = OTBDataset(**kwargs) 25 | elif 'LaSOT' == name: 26 | dataset = LaSOTDataset(**kwargs) 27 | elif 'UAV' in name: 28 | dataset = UAVDataset(**kwargs) 29 | elif 'NFS' in name: 30 | dataset = NFSDataset(**kwargs) 31 | elif 'VOT2018' == name or 'VOT2016' == name or 'VOT2019' == name: 32 | dataset = VOTDataset(**kwargs) 33 | elif 'VOT2018-LT' == name: 34 | dataset = VOTLTDataset(**kwargs) 35 | elif 'TrackingNet' == name: 36 | dataset = TrackingNetDataset(**kwargs) 37 | elif 'GOT-10k' == name: 38 | dataset = GOT10kDataset(**kwargs) 39 | else: 40 | raise Exception("unknow dataset {}".format(kwargs['name'])) 41 | return dataset 42 | 43 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/dataset.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | class Dataset(object): 4 | def __init__(self, name, dataset_root): 5 | self.name = name 6 | self.dataset_root = dataset_root 7 | self.videos = None 8 | 9 | def __getitem__(self, idx): 10 | if isinstance(idx, str): 11 | return self.videos[idx] 12 | elif isinstance(idx, int): 13 | return self.videos[sorted(list(self.videos.keys()))[idx]] 14 | 15 | def __len__(self): 16 | return len(self.videos) 17 | 18 | def __iter__(self): 19 | keys = sorted(list(self.videos.keys())) 20 | for key in keys: 21 | yield self.videos[key] 22 | 23 | def set_tracker(self, path, tracker_names): 24 | """ 25 | Args: 26 | path: path to tracker results, 27 | tracker_names: list of tracker name 28 | """ 29 | self.tracker_path = path 30 | self.tracker_names = tracker_names 31 | # for video in tqdm(self.videos.values(), 32 | # desc='loading tacker result', ncols=100): 33 | # video.load_tracker(path, tracker_names) 34 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/got10k.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import os 4 | 5 | from tqdm import tqdm 6 | 7 | from .dataset import Dataset 8 | from .video import Video 9 | 10 | class GOT10kVideo(Video): 11 | """ 12 | Args: 13 | name: video name 14 | root: dataset root 15 | video_dir: video directory 16 | init_rect: init rectangle 17 | img_names: image names 18 | gt_rect: groundtruth rectangle 19 | attr: attribute of video 20 | """ 21 | def __init__(self, name, root, video_dir, init_rect, img_names, 22 | gt_rect, attr, load_img=False): 23 | super(GOT10kVideo, self).__init__(name, root, video_dir, 24 | init_rect, img_names, gt_rect, attr, load_img) 25 | 26 | # def load_tracker(self, path, tracker_names=None): 27 | # """ 28 | # Args: 29 | # path(str): path to result 30 | # tracker_name(list): name of tracker 31 | # """ 32 | # if not tracker_names: 33 | # tracker_names = [x.split('/')[-1] for x in glob(path) 34 | # if os.path.isdir(x)] 35 | # if isinstance(tracker_names, str): 36 | # tracker_names = [tracker_names] 37 | # # self.pred_trajs = {} 38 | # for name in tracker_names: 39 | # traj_file = os.path.join(path, name, self.name+'.txt') 40 | # if os.path.exists(traj_file): 41 | # with open(traj_file, 'r') as f : 42 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 43 | # for x in f.readlines()] 44 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 45 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 46 | # else: 47 | 48 | # self.tracker_names = list(self.pred_trajs.keys()) 49 | 50 | class GOT10kDataset(Dataset): 51 | """ 52 | Args: 53 | name: dataset name, should be "NFS30" or "NFS240" 54 | dataset_root, dataset root dir 55 | """ 56 | def __init__(self, name, dataset_root, load_img=False): 57 | super(GOT10kDataset, self).__init__(name, dataset_root) 58 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 59 | meta_data = json.load(f) 60 | 61 | # load videos 62 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 63 | self.videos = {} 64 | for video in pbar: 65 | pbar.set_postfix_str(video) 66 | self.videos[video] = GOT10kVideo(video, 67 | dataset_root, 68 | meta_data[video]['video_dir'], 69 | meta_data[video]['init_rect'], 70 | meta_data[video]['img_names'], 71 | meta_data[video]['gt_rect'], 72 | None) 73 | self.attr = {} 74 | self.attr['ALL'] = list(self.videos.keys()) 75 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/nfs.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from tqdm import tqdm 6 | from glob import glob 7 | 8 | from .dataset import Dataset 9 | from .video import Video 10 | 11 | 12 | class NFSVideo(Video): 13 | """ 14 | Args: 15 | name: video name 16 | root: dataset root 17 | video_dir: video directory 18 | init_rect: init rectangle 19 | img_names: image names 20 | gt_rect: groundtruth rectangle 21 | attr: attribute of video 22 | """ 23 | def __init__(self, name, root, video_dir, init_rect, img_names, 24 | gt_rect, attr, load_img=False): 25 | super(NFSVideo, self).__init__(name, root, video_dir, 26 | init_rect, img_names, gt_rect, attr, load_img) 27 | 28 | # def load_tracker(self, path, tracker_names=None): 29 | # """ 30 | # Args: 31 | # path(str): path to result 32 | # tracker_name(list): name of tracker 33 | # """ 34 | # if not tracker_names: 35 | # tracker_names = [x.split('/')[-1] for x in glob(path) 36 | # if os.path.isdir(x)] 37 | # if isinstance(tracker_names, str): 38 | # tracker_names = [tracker_names] 39 | # # self.pred_trajs = {} 40 | # for name in tracker_names: 41 | # traj_file = os.path.join(path, name, self.name+'.txt') 42 | # if os.path.exists(traj_file): 43 | # with open(traj_file, 'r') as f : 44 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 45 | # for x in f.readlines()] 46 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 47 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 48 | # else: 49 | 50 | # self.tracker_names = list(self.pred_trajs.keys()) 51 | 52 | class NFSDataset(Dataset): 53 | """ 54 | Args: 55 | name: dataset name, should be "NFS30" or "NFS240" 56 | dataset_root, dataset root dir 57 | """ 58 | def __init__(self, name, dataset_root, load_img=False): 59 | super(NFSDataset, self).__init__(name, dataset_root) 60 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 61 | meta_data = json.load(f) 62 | 63 | # load videos 64 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 65 | self.videos = {} 66 | for video in pbar: 67 | pbar.set_postfix_str(video) 68 | self.videos[video] = NFSVideo(video, 69 | dataset_root, 70 | meta_data[video]['video_dir'], 71 | meta_data[video]['init_rect'], 72 | meta_data[video]['img_names'], 73 | meta_data[video]['gt_rect'], 74 | None) 75 | 76 | self.attr = {} 77 | self.attr['ALL'] = list(self.videos.keys()) 78 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/trackingnet.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | 5 | from tqdm import tqdm 6 | from glob import glob 7 | 8 | from .dataset import Dataset 9 | from .video import Video 10 | 11 | class TrackingNetVideo(Video): 12 | """ 13 | Args: 14 | name: video name 15 | root: dataset root 16 | video_dir: video directory 17 | init_rect: init rectangle 18 | img_names: image names 19 | gt_rect: groundtruth rectangle 20 | attr: attribute of video 21 | """ 22 | def __init__(self, name, root, video_dir, init_rect, img_names, 23 | gt_rect, attr, load_img=False): 24 | super(TrackingNetVideo, self).__init__(name, root, video_dir, 25 | init_rect, img_names, gt_rect, attr, load_img) 26 | 27 | # def load_tracker(self, path, tracker_names=None): 28 | # """ 29 | # Args: 30 | # path(str): path to result 31 | # tracker_name(list): name of tracker 32 | # """ 33 | # if not tracker_names: 34 | # tracker_names = [x.split('/')[-1] for x in glob(path) 35 | # if os.path.isdir(x)] 36 | # if isinstance(tracker_names, str): 37 | # tracker_names = [tracker_names] 38 | # # self.pred_trajs = {} 39 | # for name in tracker_names: 40 | # traj_file = os.path.join(path, name, self.name+'.txt') 41 | # if os.path.exists(traj_file): 42 | # with open(traj_file, 'r') as f : 43 | # self.pred_trajs[name] = [list(map(float, x.strip().split(','))) 44 | # for x in f.readlines()] 45 | # if len(self.pred_trajs[name]) != len(self.gt_traj): 46 | # print(name, len(self.pred_trajs[name]), len(self.gt_traj), self.name) 47 | # else: 48 | 49 | # self.tracker_names = list(self.pred_trajs.keys()) 50 | 51 | class TrackingNetDataset(Dataset): 52 | """ 53 | Args: 54 | name: dataset name, should be "NFS30" or "NFS240" 55 | dataset_root, dataset root dir 56 | """ 57 | def __init__(self, name, dataset_root, load_img=False): 58 | super(TrackingNetDataset, self).__init__(name, dataset_root) 59 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 60 | meta_data = json.load(f) 61 | 62 | # load videos 63 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 64 | self.videos = {} 65 | for video in pbar: 66 | pbar.set_postfix_str(video) 67 | self.videos[video] = TrackingNetVideo(video, 68 | dataset_root, 69 | meta_data[video]['video_dir'], 70 | meta_data[video]['init_rect'], 71 | meta_data[video]['img_names'], 72 | meta_data[video]['gt_rect'], 73 | None) 74 | self.attr = {} 75 | self.attr['ALL'] = list(self.videos.keys()) 76 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/datasets/uav.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | from tqdm import tqdm 5 | from glob import glob 6 | 7 | from .dataset import Dataset 8 | from .video import Video 9 | 10 | class UAVVideo(Video): 11 | """ 12 | Args: 13 | name: video name 14 | root: dataset root 15 | video_dir: video directory 16 | init_rect: init rectangle 17 | img_names: image names 18 | gt_rect: groundtruth rectangle 19 | attr: attribute of video 20 | """ 21 | def __init__(self, name, root, video_dir, init_rect, img_names, 22 | gt_rect, attr, load_img=False): 23 | super(UAVVideo, self).__init__(name, root, video_dir, 24 | init_rect, img_names, gt_rect, attr, load_img) 25 | 26 | 27 | class UAVDataset(Dataset): 28 | """ 29 | Args: 30 | name: dataset name, should be 'UAV123', 'UAV20L' 31 | dataset_root: dataset root 32 | load_img: wether to load all imgs 33 | """ 34 | def __init__(self, name, dataset_root, load_img=False): 35 | super(UAVDataset, self).__init__(name, dataset_root) 36 | with open(os.path.join(dataset_root, name+'.json'), 'r') as f: 37 | meta_data = json.load(f) 38 | 39 | # load videos 40 | pbar = tqdm(meta_data.keys(), desc='loading '+name, ncols=100) 41 | self.videos = {} 42 | for video in pbar: 43 | pbar.set_postfix_str(video) 44 | self.videos[video] = UAVVideo(video, 45 | dataset_root, 46 | meta_data[video]['video_dir'], 47 | meta_data[video]['init_rect'], 48 | meta_data[video]['img_names'], 49 | meta_data[video]['gt_rect'], 50 | meta_data[video]['attr']) 51 | 52 | # set attr 53 | attr = [] 54 | for x in self.videos.values(): 55 | attr += x.attr 56 | attr = set(attr) 57 | self.attr = {} 58 | self.attr['ALL'] = list(self.videos.keys()) 59 | for x in attr: 60 | self.attr[x] = [] 61 | for k, v in self.videos.items(): 62 | for attr_ in v.attr: 63 | self.attr[attr_].append(k) 64 | 65 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .ar_benchmark import AccuracyRobustnessBenchmark 2 | from .eao_benchmark import EAOBenchmark 3 | from .ope_benchmark import OPEBenchmark 4 | from .f1_benchmark import F1Benchmark 5 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from . import region 2 | from .statistics import * 3 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/utils/c_region.pxd: -------------------------------------------------------------------------------- 1 | cdef extern from "src/region.h": 2 | ctypedef enum region_type "RegionType": 3 | EMTPY 4 | SPECIAL 5 | RECTANGEL 6 | POLYGON 7 | MASK 8 | 9 | ctypedef struct region_bounds: 10 | float top 11 | float bottom 12 | float left 13 | float right 14 | 15 | ctypedef struct region_rectangle: 16 | float x 17 | float y 18 | float width 19 | float height 20 | 21 | # ctypedef struct region_mask: 22 | # int x 23 | # int y 24 | # int width 25 | # int height 26 | # char *data 27 | 28 | ctypedef struct region_polygon: 29 | int count 30 | float *x 31 | float *y 32 | 33 | ctypedef union region_container_data: 34 | region_rectangle rectangle 35 | region_polygon polygon 36 | # region_mask mask 37 | int special 38 | 39 | ctypedef struct region_container: 40 | region_type type 41 | region_container_data data 42 | 43 | # ctypedef struct region_overlap: 44 | # float overlap 45 | # float only1 46 | # float only2 47 | 48 | # region_overlap region_compute_overlap(const region_container* ra, const region_container* rb, region_bounds bounds) 49 | 50 | float compute_polygon_overlap(const region_polygon* p1, const region_polygon* p2, float *only1, float *only2, region_bounds bounds) 51 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/utils/misc.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author fangyi.zhang@vipl.ict.ac.cn 3 | """ 4 | import numpy as np 5 | 6 | def determine_thresholds(confidence, resolution=100): 7 | """choose threshold according to confidence 8 | 9 | Args: 10 | confidence: list or numpy array or numpy array 11 | reolution: number of threshold to choose 12 | 13 | Restures: 14 | threshold: numpy array 15 | """ 16 | if isinstance(confidence, list): 17 | confidence = np.array(confidence) 18 | confidence = confidence.flatten() 19 | confidence = confidence[~np.isnan(confidence)] 20 | confidence.sort() 21 | 22 | assert len(confidence) > resolution and resolution > 2 23 | 24 | thresholds = np.ones((resolution)) 25 | thresholds[0] = - np.inf 26 | thresholds[-1] = np.inf 27 | delta = np.floor(len(confidence) / (resolution - 2)) 28 | idxs = np.linspace(delta, len(confidence)-delta, resolution-2, dtype=np.int32) 29 | thresholds[1:-1] = confidence[idxs] 30 | return thresholds 31 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/utils/region.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/toolkit/utils/region.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .draw_f1 import draw_f1 2 | from .draw_success_precision import draw_success_precision 3 | from .draw_eao import draw_eao 4 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/visualization/draw_eao.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pickle 4 | 5 | from matplotlib import rc 6 | from .draw_utils import COLOR, MARKER_STYLE 7 | 8 | rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) 9 | rc('text', usetex=True) 10 | 11 | def draw_eao(result): 12 | fig = plt.figure() 13 | ax = fig.add_subplot(111, projection='polar') 14 | angles = np.linspace(0, 2*np.pi, 8, endpoint=True) 15 | 16 | attr2value = [] 17 | for i, (tracker_name, ret) in enumerate(result.items()): 18 | value = list(ret.values()) 19 | attr2value.append(value) 20 | value.append(value[0]) 21 | attr2value = np.array(attr2value) 22 | max_value = np.max(attr2value, axis=0) 23 | min_value = np.min(attr2value, axis=0) 24 | for i, (tracker_name, ret) in enumerate(result.items()): 25 | value = list(ret.values()) 26 | value.append(value[0]) 27 | value = np.array(value) 28 | value *= (1 / max_value) 29 | plt.plot(angles, value, linestyle='-', color=COLOR[i], marker=MARKER_STYLE[i], 30 | label=tracker_name, linewidth=1.5, markersize=6) 31 | 32 | attrs = ["Overall", "Camera motion", 33 | "Illumination change","Motion Change", 34 | "Size change","Occlusion", 35 | "Unassigned"] 36 | attr_value = [] 37 | for attr, maxv, minv in zip(attrs, max_value, min_value): 38 | attr_value.append(attr + "\n({:.3f},{:.3f})".format(minv, maxv)) 39 | ax.set_thetagrids(angles[:-1] * 180/np.pi, attr_value) 40 | ax.spines['polar'].set_visible(False) 41 | ax.legend(loc='upper center', bbox_to_anchor=(0.5,-0.07), frameon=False, ncol=5) 42 | ax.grid(b=False) 43 | ax.set_ylim(0, 1.18) 44 | ax.set_yticks([]) 45 | plt.show() 46 | 47 | if __name__ == '__main__': 48 | result = pickle.load(open("../../result.pkl", 'rb')) 49 | draw_eao(result) 50 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/visualization/draw_f1.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | from matplotlib import rc 5 | from .draw_utils import COLOR, LINE_STYLE 6 | 7 | rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) 8 | rc('text', usetex=True) 9 | 10 | def draw_f1(result, bold_name=None): 11 | # drawing f1 contour 12 | fig, ax = plt.subplots() 13 | for f1 in np.arange(0.1, 1, 0.1): 14 | recall = np.arange(f1, 1+0.01, 0.01) 15 | precision = f1 * recall / (2 * recall - f1) 16 | ax.plot(recall, precision, color=[0,1,0], linestyle='-', linewidth=0.5) 17 | ax.plot(precision, recall, color=[0,1,0], linestyle='-', linewidth=0.5) 18 | ax.grid(b=True) 19 | ax.set_aspect(1) 20 | plt.xlabel('Recall') 21 | plt.ylabel('Precision') 22 | plt.axis([0, 1, 0, 1]) 23 | plt.title(r'\textbf{VOT2018-LT Precision vs Recall}') 24 | 25 | # draw result line 26 | all_precision = {} 27 | all_recall = {} 28 | best_f1 = {} 29 | best_idx = {} 30 | for tracker_name, ret in result.items(): 31 | precision = np.mean(list(ret['precision'].values()), axis=0) 32 | recall = np.mean(list(ret['recall'].values()), axis=0) 33 | f1 = 2 * precision * recall / (precision + recall) 34 | max_idx = np.argmax(f1) 35 | all_precision[tracker_name] = precision 36 | all_recall[tracker_name] = recall 37 | best_f1[tracker_name] = f1[max_idx] 38 | best_idx[tracker_name] = max_idx 39 | 40 | for idx, (tracker_name, best_f1) in \ 41 | enumerate(sorted(best_f1.items(), key=lambda x:x[1], reverse=True)): 42 | if tracker_name == bold_name: 43 | label = r"\textbf{[%.3f] Ours}" % (best_f1) 44 | else: 45 | label = "[%.3f] " % (best_f1) + tracker_name 46 | recall = all_recall[tracker_name][:-1] 47 | precision = all_precision[tracker_name][:-1] 48 | ax.plot(recall, precision, color=COLOR[idx], linestyle='-', 49 | label=label) 50 | f1_idx = best_idx[tracker_name] 51 | ax.plot(recall[f1_idx], precision[f1_idx], color=[0,0,0], marker='o', 52 | markerfacecolor=COLOR[idx], markersize=5) 53 | ax.legend(loc='lower right', labelspacing=0.2) 54 | plt.xticks(np.arange(0, 1+0.1, 0.1)) 55 | plt.yticks(np.arange(0, 1+0.1, 0.1)) 56 | plt.show() 57 | 58 | if __name__ == '__main__': 59 | draw_f1(None) 60 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/toolkit/visualization/draw_utils.py: -------------------------------------------------------------------------------- 1 | 2 | COLOR = ((1, 0, 0), 3 | (0, 1, 0), 4 | (1, 0, 1), 5 | (1, 1, 0), 6 | (0 , 162/255, 232/255), 7 | (0.5, 0.5, 0.5), 8 | (0, 0, 1), 9 | (0, 1, 1), 10 | (136/255, 0 , 21/255), 11 | (255/255, 127/255, 39/255), 12 | (0, 0, 0)) 13 | 14 | LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-'] 15 | 16 | MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.'] 17 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/tools/test_epochs.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("..") 3 | import os 4 | import time 5 | import argparse 6 | from mpi4py import MPI 7 | 8 | 9 | parser = argparse.ArgumentParser(description="multi-gpu test all epochs") 10 | parser.add_argument("--start_epoch", default=1, type=int, required=True, help="test end epoch") 11 | parser.add_argument("--end_epoch", default=20, type=int, required=True, help="test end epoch") 12 | parser.add_argument("--gpu_nums", default=3, type=int, required=True, help="gpu numbers") 13 | parser.add_argument("--threads", default=3, type=int, required=True) 14 | parser.add_argument("--dataset", default="OTB2015", type=str, help="benchmark to test") 15 | args = parser.parse_args() 16 | 17 | # init gpu and epochs 18 | comm = MPI.COMM_WORLD 19 | size = comm.Get_size() 20 | rank = comm.Get_rank() 21 | GPU_ID = rank % args.gpu_nums 22 | node_name = MPI.Get_processor_name() # get the name of the node 23 | os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_ID) 24 | print("node name: {}, GPU_ID: {}".format(node_name, GPU_ID)) 25 | time.sleep(rank * 5) 26 | 27 | # run test scripts -- one epoch for each thread 28 | for i in range((args.end_epoch - args.start_epoch + 1) // args.threads + 1): 29 | dataset = args.dataset 30 | try: 31 | epoch_ID += args.threads 32 | except: 33 | epoch_ID = rank % (args.end_epoch - args.start_epoch + 1) + args.start_epoch 34 | 35 | if epoch_ID > args.end_epoch: 36 | continue 37 | 38 | snapshot = "snapshot/checkpoint_e{}.pth".format(epoch_ID) 39 | print("==> test {}th epoch".format(epoch_ID)) 40 | 41 | os.system("python ../../tools/test.py --snapshot {0} --dataset {1} --config config.yaml".format(snapshot, dataset)) 42 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/vot_siamban/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/code/SEE-Net-VIS-16bands/vot_siamban/__init__.py -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/vot_siamban/tracker_SiamBAN.m: -------------------------------------------------------------------------------- 1 | 2 | % error('Tracker not configured! Please edit the tracker_test.m file.'); % Remove this line after proper configuration 3 | 4 | % The human readable label for the tracker, used to identify the tracker in reports 5 | % If not set, it will be set to the same value as the identifier. 6 | % It does not have to be unique, but it is best that it is. 7 | tracker_label = ['SiamBAN']; 8 | 9 | % For Python implementations we have created a handy function that generates the appropritate 10 | % command that will run the python executable and execute the given script that includes your 11 | % tracker implementation. 12 | % 13 | % Please customize the line below by substituting the first argument with the name of the 14 | % script of your tracker (not the .py file but just the name of the script) and also provide the 15 | % path (or multiple paths) where the tracker sources % are found as the elements of the cell 16 | % array (second argument). 17 | setenv('MKL_NUM_THREADS','1'); 18 | siamban_root = 'path/to/siamban'; 19 | track_build_path = 'path/to/track/build'; 20 | support_python_path = '/path/to/vot-toolkit/native/trax/support/python'; 21 | tracker_command = generate_python_command('vot_siambox.vot_iter', {sianban_root, 22 | [track_build_path '/python/lib'], 23 | support_python_path}); 24 | 25 | tracker_interpreter = 'python'; 26 | 27 | tracker_linkpath = {track_build_path}; 28 | 29 | % tracker_linkpath = {}; % A cell array of custom library directories used by the tracker executable (optional) 30 | -------------------------------------------------------------------------------- /code/SEE-Net-VIS-16bands/vot_siamban/vot_siamban.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import cv2 3 | import torch 4 | import numpy as np 5 | import os 6 | from os.path import join 7 | 8 | from siamban.core.config import cfg 9 | from siamban.models.model_builder import ModelBuilder 10 | from siamban.tracker.tracker_builder import build_tracker 11 | from siamban.utils.bbox import get_axis_aligned_bbox 12 | from siamban.utils.model_load import load_pretrain 13 | from toolkit.datasets import DatasetFactory 14 | from toolkit.utils.region import vot_overlap, vot_float2str 15 | 16 | from . import vot 17 | from .vot import Rectangle, Polygon, Point 18 | 19 | 20 | # modify root 21 | 22 | # cfg_root = "path/to/expr" 23 | # model_file = join(cfg_root, 'model.pth') 24 | # cfg_file = join(cfg_root, 'config.yaml') 25 | 26 | cfg_root = "/path/to/siamban/experiments/siamban_r50_l234_dwxcorr" 27 | model_file = join(cfg_root, 'snapshot/checkpoint_e20.pth') 28 | cfg_file = join(cfg_root, 'config.yaml') 29 | 30 | def warmup(model): 31 | for i in range(10): 32 | model.template(torch.FloatTensor(1,3,127,127).cuda()) 33 | 34 | def setup_tracker(): 35 | cfg.merge_from_file(cfg_file) 36 | 37 | model = ModelBuilder() 38 | model = load_pretrain(model, model_file).cuda().eval() 39 | 40 | tracker = build_tracker(model) 41 | warmup(model) 42 | return tracker 43 | 44 | 45 | tracker = setup_tracker() 46 | 47 | handle = vot.VOT("polygon") 48 | region = handle.region() 49 | try: 50 | region = np.array([region[0][0][0], region[0][0][1], region[0][1][0], region[0][1][1], 51 | region[0][2][0], region[0][2][1], region[0][3][0], region[0][3][1]]) 52 | except: 53 | region = np.array(region) 54 | 55 | cx, cy, w, h = get_axis_aligned_bbox(region) 56 | 57 | image_file = handle.frame() 58 | if not image_file: 59 | sys.exit(0) 60 | 61 | im = cv2.imread(image_file) # HxWxC 62 | # init 63 | target_pos, target_sz = np.array([cx, cy]), np.array([w, h]) 64 | gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h] 65 | tracker.init(im, gt_bbox_) 66 | 67 | while True: 68 | img_file = handle.frame() 69 | if not img_file: 70 | break 71 | im = cv2.imread(img_file) 72 | outputs = tracker.track(im) 73 | pred_bbox = outputs['bbox'] 74 | result = Rectangle(*pred_bbox) 75 | score = outputs['best_score'] 76 | 77 | handle.report(result, score) 78 | -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/BAE-Net-0.6062-0.8778.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/BAE-Net-0.6062-0.8778.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/CNHT-0.1713-0.3351.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/CNHT-0.1713-0.3351.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/DeepHKCF-0.3033-0.5415.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/DeepHKCF-0.3033-0.5415.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/MFI-0.6009-0.8925.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/MFI-0.6009-0.8925.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/MHT-0.5860-0.8818.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/MHT-0.5860-0.8818.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/SEE-Net-0.6657-0.9327.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/SEE-Net-0.6657-0.9327.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/SST-Net-0.6230-0.9161.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAUC_DP_curve/SST-Net-0.6230-0.9161.mat -------------------------------------------------------------------------------- /plot-tools/plotAUC_DP_curve/readme.txt: -------------------------------------------------------------------------------- 1 | run plotTracking_AUC_DP.m 2 | 3 | You need change the value in Line 38-39 in "plotTracking_AUC_DP.m"; 4 | You need change the value in Line 46 in "plotTracking_AUC_DP.m", where "1" is used for drawing the AUC curve and "0" for drawing the DP curve. -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/BAE-Net-0.6062-0.8778.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/BAE-Net-0.6062-0.8778.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/CNHT-0.1713-0.3351.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/CNHT-0.1713-0.3351.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/DROL-0.6262-0.9001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/DROL-0.6262-0.9001.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/DeepHKCF-0.3033-0.5415.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/DeepHKCF-0.3033-0.5415.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/MFI-0.6009-0.8925.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/MFI-0.6009-0.8925.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/MHT-0.5860-0.8818.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/MHT-0.5860-0.8818.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/SEE-Net-0.6657-0.9327.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/SEE-Net-0.6657-0.9327.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/SST-Net-0.6230-0.9161.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/SST-Net-0.6230-0.9161.mat -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/computeArea.m: -------------------------------------------------------------------------------- 1 | 2 | function areaBB = computeArea(bb) 3 | %computes area of the bb=[xmin ymin xmax ymax] 4 | 5 | if ((bb(1) > bb(3)) || (bb(2) > bb(4))) 6 | areaBB = 0; 7 | else 8 | areaBB = (bb(3) - bb(1) + 1) * (bb(4) - bb(2) + 1); 9 | end -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/computeDistance.m: -------------------------------------------------------------------------------- 1 | function distances=computeDistance(positions, ground_truth) 2 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 3 | (positions(:,2) - ground_truth(:,2)).^2); 4 | distances(isnan(distances)) = []; 5 | 6 | end -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/computeIntersectionArea.m: -------------------------------------------------------------------------------- 1 | function areaIntersection = computeIntersectionArea(bb1,bb2) 2 | %compute intersection anrea of bb1 and bb2 3 | %bb1 and bb2 - bounding boxes 4 | %bbi = [xmin ymin xmax ymax] for i=1,2 5 | 6 | xmin = max(bb1(1),bb2(1)); 7 | xmax = min(bb1(3),bb2(3)); 8 | ymin = max(bb1(2),bb2(2)); 9 | ymax = min(bb1(4),bb2(4)); 10 | 11 | areaIntersection = computeArea([xmin ymin xmax ymax]); 12 | 13 | end -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/computeMetric.m: -------------------------------------------------------------------------------- 1 | function [distance_rec,PASCAL_rec,average_cle_rec]= computeMetric(pd_boxes,ground_truth,distance_precision_threshold,PASCAL_threshold) 2 | PASCAL_rec=zeros(1,length(PASCAL_threshold)); 3 | average_cle_rec=zeros(1,length(PASCAL_threshold)); 4 | distance_rec=zeros(1,length(distance_precision_threshold)); 5 | for j=1:length(distance_precision_threshold) 6 | [distance_rec(j),PASCAL_rec(j),average_cle_rec(j)]= ... 7 | compute_performance_measures(pd_boxes, ground_truth,distance_precision_threshold(j),PASCAL_threshold(j)); 8 | end 9 | end -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/computePascalScore.m: -------------------------------------------------------------------------------- 1 | function pascalScore = computePascalScore(bb1,bb2) 2 | %compute the Pascal score of the bb1, bb2 (intersection/union) 3 | 4 | intersectionArea = computeIntersectionArea(bb1,bb2); 5 | pascalScore = intersectionArea/(computeArea(bb1)+computeArea(bb2)-intersectionArea); 6 | return -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/compute_relaibitlity.m: -------------------------------------------------------------------------------- 1 | function [overlaps,distances] = ... 2 | compute_relaibitlity(positions, ground_truth) 3 | 4 | % [distance_precision, PASCAL_precision, average_center_location_error] = ... 5 | % compute_performance_measures(positions, ground_truth, distance_precision_threshold, PASCAL_threshold) 6 | % 7 | % For the given tracker output positions and ground truth it computes the: 8 | % * Distance Precision at the specified threshold (20 pixels as default if 9 | % omitted) 10 | % * PASCAL Precision at the specified threshold (0.5 as default if omitted) 11 | % * Average Center Location error (CLE). 12 | % 13 | % The tracker positions and ground truth must be Nx4-matrices where N is 14 | % the number of time steps in the tracking. Each row has to be on the form 15 | % [c1, c2, s1, s2] where (c1, c2) is the center coordinate and s1 and s2 16 | % are the size in the first and second dimension respectively (the order of 17 | % x and y does not matter here). 18 | 19 | if size(positions,1) ~= size(ground_truth,1), 20 | disp('Could not calculate precisions, because the number of ground') 21 | disp('truth frames does not match the number of tracked frames.') 22 | return 23 | end 24 | 25 | ground_truth = [ground_truth(:,1:2) + (ground_truth(:,3:4) - 1) / 2 , ground_truth(:,3:4)]; 26 | positions = [positions(:,1:2) + (positions(:,3:4) - 1) / 2 , positions(:,3:4)]; 27 | 28 | %calculate distances to ground truth over all frames 29 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 30 | (positions(:,2) - ground_truth(:,2)).^2); 31 | distances(isnan(distances)) =Inf; 32 | 33 | %calculate distance precision 34 | 35 | %calculate average center location error (CLE) 36 | 37 | %calculate the overlap in each dimension 38 | overlap_height = min(positions(:,1) + positions(:,3)/2, ground_truth(:,1) + ground_truth(:,3)/2) ... 39 | - max(positions(:,1) - positions(:,3)/2, ground_truth(:,1) - ground_truth(:,3)/2); 40 | overlap_width = min(positions(:,2) + positions(:,4)/2, ground_truth(:,2) + ground_truth(:,4)/2) ... 41 | - max(positions(:,2) - positions(:,4)/2, ground_truth(:,2) - ground_truth(:,4)/2); 42 | 43 | % if no overlap, set to zero 44 | overlap_height(overlap_height < 0) = 0; 45 | overlap_width(overlap_width < 0) = 0; 46 | 47 | % remove NaN values (should not exist any) 48 | valid_ind = ~isnan(overlap_height) & ~isnan(overlap_width); 49 | 50 | % calculate area 51 | overlap_area = overlap_height(valid_ind) .* overlap_width(valid_ind); 52 | tracked_area = positions(valid_ind,3) .* positions(valid_ind,4); 53 | ground_truth_area = ground_truth(valid_ind,3) .* ground_truth(valid_ind,4); 54 | 55 | % calculate PASCAL overlaps 56 | overlaps = overlap_area ./ (tracked_area + ground_truth_area - overlap_area+eps); 57 | 58 | end -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/load_video_info.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotAttribute_Curve/load_video_info.m -------------------------------------------------------------------------------- /plot-tools/plotAttribute_Curve/readme.txt: -------------------------------------------------------------------------------- 1 | Run: Li_res_attribute_comp.m 2 | You NEED change the values in Line 35-36 of "drawPlot_new.m"! -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/BAE-Net/car.txt: -------------------------------------------------------------------------------- 1 | 219.0 87.0 32.0 15.0 2 | 218.0 87.0 32.0 15.0 3 | 216.0 87.0 33.0 14.0 4 | 215.0 88.0 33.0 14.0 5 | 213.0 88.0 32.0 14.0 6 | 212.0 88.0 32.0 14.0 7 | 210.0 88.0 32.0 14.0 8 | 209.0 89.0 32.0 14.0 9 | 208.0 89.0 31.0 14.0 10 | 206.0 89.0 32.0 14.0 11 | 205.0 89.0 32.0 14.0 12 | 203.0 89.0 32.0 14.0 13 | 202.0 89.0 32.0 14.0 14 | 199.0 90.0 33.0 14.0 15 | 198.0 90.0 32.0 14.0 16 | 197.0 90.0 32.0 14.0 17 | 195.0 90.0 34.0 15.0 18 | 193.0 90.0 34.0 14.0 19 | 191.0 91.0 34.0 14.0 20 | 190.0 91.0 33.0 14.0 21 | 188.0 91.0 33.0 14.0 22 | 187.0 91.0 34.0 15.0 23 | 185.0 91.0 35.0 14.0 24 | 183.0 91.0 34.0 14.0 25 | 181.0 91.0 34.0 14.0 26 | 180.0 91.0 33.0 14.0 27 | 178.0 91.0 34.0 15.0 28 | 177.0 92.0 34.0 14.0 29 | 173.0 92.0 34.0 14.0 30 | 173.0 92.0 34.0 15.0 31 | 171.0 92.0 33.0 15.0 32 | 169.0 92.0 34.0 15.0 33 | 165.0 92.0 35.0 15.0 34 | 164.0 92.0 34.0 15.0 35 | 163.0 93.0 34.0 15.0 36 | 162.0 94.0 35.0 15.0 37 | 159.0 94.0 35.0 15.0 38 | 157.0 94.0 36.0 15.0 39 | 156.0 94.0 36.0 15.0 40 | 154.0 94.0 36.0 15.0 41 | 153.0 94.0 36.0 15.0 42 | 150.0 94.0 37.0 15.0 43 | 148.0 95.0 37.0 15.0 44 | 146.0 95.0 37.0 16.0 45 | 145.0 96.0 37.0 16.0 46 | 143.0 96.0 37.0 15.0 47 | 141.0 96.0 37.0 15.0 48 | 139.0 96.0 37.0 16.0 49 | 137.0 96.0 37.0 16.0 50 | 135.0 96.0 37.0 15.0 51 | 132.0 96.0 37.0 16.0 52 | 131.0 97.0 37.0 16.0 53 | 129.0 97.0 38.0 16.0 54 | 126.0 98.0 38.0 15.0 55 | 125.0 98.0 38.0 16.0 56 | 123.0 98.0 38.0 16.0 57 | 121.0 98.0 37.0 16.0 58 | 119.0 98.0 38.0 16.0 59 | 117.0 99.0 38.0 16.0 60 | 115.0 99.0 38.0 16.0 61 | 113.0 99.0 38.0 16.0 62 | 110.0 99.0 38.0 16.0 63 | 108.0 100.0 39.0 16.0 64 | 106.0 100.0 39.0 16.0 65 | 104.0 100.0 38.0 16.0 66 | 101.0 100.0 38.0 16.0 67 | 99.0 101.0 39.0 16.0 68 | 96.0 101.0 39.0 15.0 69 | 95.0 101.0 39.0 16.0 70 | 92.0 102.0 40.0 16.0 71 | 90.0 102.0 39.0 16.0 72 | 88.0 102.0 40.0 16.0 73 | 88.0 102.0 40.0 15.0 74 | 82.0 103.0 41.0 16.0 75 | 81.0 103.0 40.0 16.0 76 | 79.0 103.0 40.0 16.0 77 | 76.0 104.0 40.0 16.0 78 | 73.0 104.0 41.0 16.0 79 | 73.0 104.0 39.0 15.0 80 | 69.0 104.0 40.0 16.0 81 | 67.0 105.0 41.0 16.0 82 | 63.0 105.0 40.0 16.0 83 | 62.0 105.0 40.0 16.0 84 | 59.0 106.0 40.0 15.0 85 | 56.0 106.0 40.0 16.0 86 | 54.0 106.0 41.0 16.0 87 | 52.0 106.0 42.0 16.0 88 | 48.0 106.0 41.0 16.0 89 | 47.0 107.0 40.0 16.0 90 | 43.0 108.0 41.0 16.0 91 | 39.0 108.0 40.0 16.0 92 | 38.0 108.0 41.0 16.0 93 | 37.0 109.0 41.0 16.0 94 | 32.0 109.0 41.0 16.0 95 | 31.0 109.0 41.0 16.0 96 | 28.0 109.0 41.0 16.0 97 | 24.0 110.0 42.0 16.0 98 | 21.0 110.0 41.0 16.0 99 | 18.0 111.0 41.0 16.0 100 | 16.0 110.0 42.0 16.0 101 | 13.0 110.0 42.0 16.0 102 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/BAE-Net/kangaroo.txt: -------------------------------------------------------------------------------- 1 | 322.0 131.0 22.0 41.0 2 | 322.0 131.0 22.0 42.0 3 | 322.0 131.0 22.0 42.0 4 | 322.0 129.0 21.0 43.0 5 | 321.0 128.0 21.0 42.0 6 | 319.0 126.0 22.0 44.0 7 | 317.0 126.0 21.0 40.0 8 | 316.0 126.0 21.0 39.0 9 | 314.0 126.0 21.0 40.0 10 | 313.0 127.0 21.0 41.0 11 | 309.0 126.0 23.0 40.0 12 | 305.0 123.0 24.0 44.0 13 | 304.0 125.0 23.0 43.0 14 | 303.0 127.0 23.0 43.0 15 | 300.0 126.0 24.0 44.0 16 | 301.0 128.0 24.0 44.0 17 | 300.0 128.0 23.0 43.0 18 | 299.0 127.0 23.0 42.0 19 | 295.0 125.0 25.0 44.0 20 | 293.0 126.0 24.0 42.0 21 | 292.0 127.0 23.0 38.0 22 | 289.0 128.0 23.0 37.0 23 | 287.0 128.0 20.0 37.0 24 | 282.0 129.0 19.0 36.0 25 | 276.0 129.0 19.0 36.0 26 | 274.0 127.0 20.0 39.0 27 | 271.0 128.0 21.0 40.0 28 | 268.0 129.0 22.0 39.0 29 | 271.0 131.0 22.0 40.0 30 | 269.0 129.0 21.0 39.0 31 | 265.0 128.0 22.0 40.0 32 | 262.0 127.0 22.0 39.0 33 | 258.0 125.0 21.0 40.0 34 | 254.0 125.0 21.0 37.0 35 | 250.0 127.0 22.0 35.0 36 | 247.0 124.0 20.0 38.0 37 | 239.0 122.0 21.0 38.0 38 | 237.0 124.0 20.0 38.0 39 | 232.0 125.0 22.0 39.0 40 | 229.0 127.0 22.0 38.0 41 | 229.0 129.0 22.0 37.0 42 | 227.0 128.0 24.0 38.0 43 | 224.0 126.0 23.0 38.0 44 | 222.0 125.0 22.0 39.0 45 | 219.0 125.0 22.0 39.0 46 | 216.0 125.0 21.0 37.0 47 | 212.0 126.0 21.0 33.0 48 | 206.0 123.0 22.0 35.0 49 | 203.0 124.0 21.0 35.0 50 | 199.0 121.0 20.0 37.0 51 | 194.0 122.0 20.0 37.0 52 | 191.0 125.0 21.0 38.0 53 | 189.0 128.0 22.0 37.0 54 | 188.0 127.0 22.0 37.0 55 | 186.0 126.0 22.0 38.0 56 | 184.0 124.0 20.0 39.0 57 | 182.0 124.0 19.0 38.0 58 | 178.0 121.0 19.0 39.0 59 | 172.0 122.0 20.0 36.0 60 | 169.0 123.0 20.0 33.0 61 | 166.0 123.0 21.0 35.0 62 | 162.0 123.0 22.0 35.0 63 | 158.0 122.0 19.0 35.0 64 | 153.0 123.0 19.0 35.0 65 | 149.0 123.0 20.0 39.0 66 | 147.0 124.0 21.0 39.0 67 | 145.0 125.0 22.0 38.0 68 | 144.0 125.0 21.0 38.0 69 | 143.0 125.0 20.0 35.0 70 | 141.0 124.0 19.0 34.0 71 | 138.0 124.0 19.0 35.0 72 | 135.0 124.0 18.0 34.0 73 | 130.0 122.0 18.0 34.0 74 | 127.0 122.0 19.0 34.0 75 | 120.0 120.0 21.0 36.0 76 | 117.0 121.0 21.0 36.0 77 | 114.0 121.0 21.0 38.0 78 | 112.0 123.0 19.0 37.0 79 | 110.0 125.0 19.0 37.0 80 | 110.0 126.0 20.0 37.0 81 | 108.0 125.0 19.0 37.0 82 | 106.0 122.0 18.0 38.0 83 | 100.0 118.0 19.0 39.0 84 | 97.0 118.0 20.0 39.0 85 | 93.0 117.0 22.0 40.0 86 | 92.0 118.0 21.0 38.0 87 | 90.0 119.0 20.0 38.0 88 | 88.0 120.0 19.0 35.0 89 | 83.0 121.0 19.0 35.0 90 | 77.0 122.0 18.0 34.0 91 | 75.0 122.0 18.0 34.0 92 | 71.0 121.0 20.0 37.0 93 | 70.0 122.0 20.0 37.0 94 | 69.0 122.0 21.0 37.0 95 | 67.0 121.0 21.0 35.0 96 | 66.0 121.0 20.0 35.0 97 | 65.0 122.0 20.0 34.0 98 | 62.0 121.0 19.0 35.0 99 | 59.0 120.0 19.0 35.0 100 | 56.0 122.0 19.0 33.0 101 | 50.0 120.0 19.0 33.0 102 | 51.0 123.0 19.0 32.0 103 | 50.0 123.0 19.0 32.0 104 | 40.0 122.0 19.0 34.0 105 | 39.0 123.0 17.0 32.0 106 | 36.0 123.0 19.0 35.0 107 | 37.0 125.0 20.0 34.0 108 | 35.0 124.0 20.0 35.0 109 | 32.0 121.0 20.0 35.0 110 | 31.0 120.0 19.0 36.0 111 | 29.0 120.0 18.0 36.0 112 | 27.0 120.0 19.0 35.0 113 | 23.0 120.0 19.0 34.0 114 | 17.0 118.0 18.0 35.0 115 | 14.0 117.0 19.0 37.0 116 | 13.0 121.0 19.0 36.0 117 | 12.0 118.0 19.0 36.0 118 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/GT/bus_gt.txt: -------------------------------------------------------------------------------- 1 | 4 82 17 27 2 | 6 81 18 28 3 | 9 80 18 28 4 | 12 81 16 27 5 | 16 80 16 27 6 | 17 81 19 27 7 | 22 80 16 28 8 | 25 79 16 29 9 | 26 80 18 28 10 | 28 80 18 26 11 | 32 80 17 26 12 | 35 80 17 25 13 | 37 79 18 26 14 | 41 80 18 24 15 | 44 79 17 25 16 | 44 80 20 24 17 | 47 79 19 25 18 | 50 78 19 26 19 | 54 78 17 25 20 | 55 79 19 25 21 | 59 79 17 26 22 | 61 75 18 29 23 | 66 77 17 25 24 | 68 78 17 23 25 | 69 78 18 25 26 | 72 77 16 26 27 | 76 77 15 25 28 | 74 76 20 25 29 | 77 77 20 24 30 | 81 76 17 25 31 | 83 76 17 24 32 | 86 76 17 25 33 | 88 76 17 25 34 | 90 75 17 26 35 | 91 76 19 25 36 | 94 76 18 24 37 | 96 75 17 24 38 | 98 74 18 26 39 | 100 75 19 23 40 | 102 75 19 23 41 | 105 73 18 25 42 | 107 73 19 26 43 | 108 72 19 26 44 | 112 74 17 26 45 | 113 74 18 24 46 | 116 74 17 24 47 | 118 72 17 26 48 | 119 73 19 24 49 | 122 73 18 23 50 | 123 73 18 24 51 | 125 73 19 23 52 | 127 73 19 24 53 | 129 72 18 24 54 | 132 73 17 22 55 | 134 73 17 22 56 | 134 73 19 21 57 | 136 70 19 24 58 | 139 71 18 23 59 | 140 73 18 22 60 | 141 70 20 24 61 | 144 73 18 21 62 | 145 72 18 21 63 | 148 71 17 22 64 | 151 71 16 24 65 | 151 71 18 23 66 | 153 72 17 21 67 | 156 71 15 22 68 | 155 71 18 21 69 | 158 70 17 22 70 | 160 72 16 21 71 | 161 71 18 22 72 | 164 71 17 21 73 | 165 71 16 20 74 | 167 71 16 20 75 | 167 70 18 22 76 | 170 70 17 23 77 | 170 70 19 21 78 | 171 70 20 22 79 | 173 71 18 20 80 | 175 69 18 22 81 | 177 71 17 20 82 | 180 68 16 22 83 | 181 70 17 21 84 | 182 70 17 21 85 | 184 70 16 20 86 | 185 68 16 21 87 | 186 69 17 19 88 | 186 69 18 20 89 | 189 69 17 20 90 | 191 69 17 21 91 | 193 70 16 20 92 | 192 68 18 21 93 | 195 70 16 19 94 | 196 69 17 20 95 | 197 69 16 20 96 | 198 69 17 20 97 | 200 68 16 21 98 | 201 68 18 21 99 | 202 69 18 19 100 | 204 69 16 19 101 | 204 69 17 21 102 | 205 68 17 21 103 | 205 67 19 21 104 | 208 67 17 21 105 | 208 68 18 20 106 | 208 68 20 20 107 | 211 67 17 22 108 | 213 67 16 22 109 | 214 69 17 19 110 | 216 68 15 19 111 | 218 69 15 18 112 | 218 68 17 20 113 | 219 68 16 19 114 | 222 68 13 18 115 | 220 68 16 20 116 | 222 68 15 18 117 | 223 67 15 20 118 | 223 67 17 19 119 | 223 66 18 20 120 | 224 67 18 19 121 | 225 67 17 19 122 | 228 68 15 18 123 | 227 66 16 19 124 | 230 66 15 21 125 | 229 66 16 20 126 | 230 65 17 23 127 | 232 66 15 20 128 | 233 66 17 20 129 | 233 66 17 20 130 | 233 66 17 19 131 | 238 65 15 21 132 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/GT/car2_gt.txt: -------------------------------------------------------------------------------- 1 | 89 83 22 15 2 | 89 83 22 16 3 | 88 83 24 15 4 | 89 84 24 15 5 | 90 83 23 15 6 | 90 84 25 13 7 | 92 85 25 14 8 | 91 85 25 13 9 | 91 83 26 16 10 | 93 84 25 14 11 | 93 84 24 14 12 | 94 83 23 16 13 | 93 84 24 15 14 | 94 84 24 15 15 | 96 83 21 15 16 | 94 83 25 15 17 | 94 85 27 15 18 | 96 85 25 14 19 | 95 83 27 16 20 | 97 84 27 16 21 | 98 84 25 16 22 | 98 84 26 16 23 | 98 84 26 16 24 | 100 85 26 16 25 | 99 84 27 16 26 | 101 84 26 16 27 | 103 85 24 15 28 | 102 84 26 16 29 | 104 86 25 15 30 | 104 85 25 16 31 | 105 86 26 15 32 | 106 84 26 17 33 | 105 86 27 15 34 | 107 86 27 15 35 | 106 85 28 16 36 | 107 85 27 16 37 | 108 86 28 15 38 | 109 86 28 16 39 | 110 85 28 17 40 | 111 85 27 16 41 | 112 86 27 16 42 | 113 87 28 16 43 | 112 85 29 18 44 | 113 88 29 15 45 | 114 86 29 18 46 | 114 86 29 17 47 | 114 86 29 17 48 | 116 86 29 17 49 | 116 86 30 17 50 | 118 85 30 19 51 | 119 85 31 19 52 | 119 86 32 18 53 | 120 85 32 19 54 | 120 86 33 18 55 | 120 86 33 18 56 | 122 86 33 18 57 | 123 87 33 17 58 | 125 86 31 18 59 | 125 86 32 19 60 | 126 86 32 19 61 | 127 87 33 18 62 | 128 86 33 19 63 | 128 87 33 18 64 | 130 86 33 19 65 | 131 87 35 19 66 | 132 86 34 19 67 | 133 86 35 21 68 | 134 86 35 20 69 | 135 87 37 19 70 | 136 86 37 20 71 | 136 85 38 21 72 | 137 87 39 19 73 | 140 86 37 21 74 | 141 86 38 21 75 | 142 87 37 21 76 | 142 88 39 20 77 | 144 87 39 20 78 | 146 87 39 21 79 | 147 87 41 22 80 | 148 87 40 22 81 | 148 87 41 21 82 | 150 87 43 21 83 | 154 88 39 20 84 | 154 87 40 21 85 | 156 88 39 20 86 | 157 87 40 22 87 | 159 86 41 23 88 | 158 88 44 22 89 | 161 87 43 23 90 | 163 88 44 22 91 | 164 88 46 22 92 | 165 88 46 21 93 | 167 88 46 23 94 | 168 88 47 23 95 | 171 88 46 22 96 | 172 88 47 24 97 | 173 88 48 23 98 | 174 87 50 25 99 | 176 87 50 25 100 | 177 87 51 25 101 | 179 85 51 27 102 | 182 86 51 26 103 | 183 86 52 27 104 | 185 87 52 25 105 | 189 88 50 25 106 | 191 88 51 25 107 | 192 87 53 26 108 | 193 88 55 25 109 | 195 88 54 26 110 | 196 87 54 27 111 | 198 87 56 26 112 | 200 88 56 26 113 | 203 89 56 25 114 | 204 88 59 27 115 | 206 87 55 26 116 | 206 88 63 26 117 | 210 89 61 25 118 | 212 89 63 26 119 | 214 88 62 26 120 | 216 88 64 27 121 | 218 88 64 28 122 | 220 90 65 26 123 | 222 90 66 25 124 | 224 90 69 27 125 | 226 91 72 25 126 | 228 88 72 29 127 | 231 90 73 27 128 | 234 90 73 28 129 | 235 91 77 27 130 | 239 89 75 29 131 | 242 90 76 27 132 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/GT/car_gt.txt: -------------------------------------------------------------------------------- 1 | 219 87 32 15 2 | 220 86 29 14 3 | 217 86 32 18 4 | 217 86 32 17 5 | 216 86 31 17 6 | 213 86 31 16 7 | 211 87 32 17 8 | 209 86 34 14 9 | 211 87 31 15 10 | 207 89 31 12 11 | 205 89 32 13 12 | 200 89 36 13 13 | 202 88 35 15 14 | 198 89 36 14 15 | 197 89 35 15 16 | 194 89 38 14 17 | 193 89 36 15 18 | 192 90 36 15 19 | 192 91 35 14 20 | 190 91 34 14 21 | 188 91 34 13 22 | 185 91 38 14 23 | 182 89 40 16 24 | 182 91 38 14 25 | 184 91 31 14 26 | 181 92 33 13 27 | 179 91 34 15 28 | 174 91 39 15 29 | 174 92 37 13 30 | 172 92 37 15 31 | 172 92 35 14 32 | 169 93 37 13 33 | 167 94 38 13 34 | 165 92 37 15 35 | 163 94 38 14 36 | 162 93 35 15 37 | 160 93 38 15 38 | 158 94 39 15 39 | 154 94 43 15 40 | 154 95 38 14 41 | 153 96 38 13 42 | 148 96 41 13 43 | 148 96 41 15 44 | 144 96 42 14 45 | 145 97 39 14 46 | 142 96 39 14 47 | 138 95 42 15 48 | 137 97 41 14 49 | 135 98 42 14 50 | 136 97 38 15 51 | 132 97 41 16 52 | 131 97 41 17 53 | 128 97 39 16 54 | 128 97 39 17 55 | 125 99 41 15 56 | 120 99 45 14 57 | 119 98 43 16 58 | 118 98 41 17 59 | 116 98 43 17 60 | 116 98 40 17 61 | 109 97 45 18 62 | 111 99 42 15 63 | 106 99 45 17 64 | 105 98 44 17 65 | 100 98 46 18 66 | 102 98 43 19 67 | 96 100 47 16 68 | 95 101 46 16 69 | 94 101 46 17 70 | 91 100 46 17 71 | 89 98 46 19 72 | 87 99 44 19 73 | 83 102 47 17 74 | 82 100 46 19 75 | 77 102 48 17 76 | 78 102 48 17 77 | 75 102 46 17 78 | 74 102 45 17 79 | 69 103 48 17 80 | 65 102 51 19 81 | 66 104 47 17 82 | 63 103 49 17 83 | 59 105 50 16 84 | 55 105 53 15 85 | 54 106 50 16 86 | 53 105 49 16 87 | 51 105 48 17 88 | 47 105 50 16 89 | 46 105 49 17 90 | 40 107 54 15 91 | 39 108 53 15 92 | 40 106 48 18 93 | 33 107 53 17 94 | 32 109 50 16 95 | 29 107 52 18 96 | 27 107 51 19 97 | 23 108 55 18 98 | 22 108 53 18 99 | 21 108 49 19 100 | 17 108 53 20 101 | 14 109 53 19 102 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/GT/coin_gt.txt: -------------------------------------------------------------------------------- 1 | 96 83 16 16 2 | 96 83 16 16 3 | 96 83 16 16 4 | 96 83 16 16 5 | 96 83 16 16 6 | 96 83 16 16 7 | 96 83 16 16 8 | 96 83 16 16 9 | 96 83 16 16 10 | 96 83 16 16 11 | 96 83 16 16 12 | 96 83 16 16 13 | 96 83 16 16 14 | 96 83 16 16 15 | 96 83 16 16 16 | 96 83 16 16 17 | 96 83 16 16 18 | 96 83 16 16 19 | 96 83 16 16 20 | 95 84 16 16 21 | 95 84 16 16 22 | 96 85 16 16 23 | 96 85 16 16 24 | 96 86 16 16 25 | 96 86 16 16 26 | 96 86 16 16 27 | 96 86 16 16 28 | 96 86 16 16 29 | 96 86 16 16 30 | 94 86 16 16 31 | 92 86 16 16 32 | 89 88 16 16 33 | 87 90 16 16 34 | 84 92 16 16 35 | 82 93 16 16 36 | 80 95 16 16 37 | 78 96 16 16 38 | 76 96 16 16 39 | 72 96 16 16 40 | 71 96 16 16 41 | 71 96 16 16 42 | 71 97 16 16 43 | 72 97 16 16 44 | 73 97 16 16 45 | 76 97 16 16 46 | 78 97 16 16 47 | 80 95 16 16 48 | 83 93 16 16 49 | 85 92 16 16 50 | 87 91 16 16 51 | 88 91 16 16 52 | 89 90 16 16 53 | 90 91 16 16 54 | 91 91 16 16 55 | 93 91 16 16 56 | 93 91 16 16 57 | 93 93 16 16 58 | 92 95 16 16 59 | 91 98 16 16 60 | 91 99 16 16 61 | 93 99 16 16 62 | 95 100 16 16 63 | 96 100 16 16 64 | 97 101 16 16 65 | 98 101 16 16 66 | 99 100 16 16 67 | 99 100 16 16 68 | 102 100 16 16 69 | 102 100 16 16 70 | 103 100 16 16 71 | 103 100 16 16 72 | 103 100 16 16 73 | 104 100 16 16 74 | 104 100 16 16 75 | 104 99 16 16 76 | 104 99 16 16 77 | 102 98 16 16 78 | 102 97 16 16 79 | 99 95 16 16 80 | 98 94 16 16 81 | 96 94 16 16 82 | 92 94 16 16 83 | 90 94 16 16 84 | 89 94 16 16 85 | 87 93 16 16 86 | 87 92 16 16 87 | 86 91 16 16 88 | 86 91 16 16 89 | 87 90 16 16 90 | 87 90 16 16 91 | 87 90 16 16 92 | 87 90 16 16 93 | 87 90 16 16 94 | 87 90 16 16 95 | 89 90 16 16 96 | 89 90 16 16 97 | 90 91 16 16 98 | 91 91 16 16 99 | 92 92 16 16 100 | 92 92 16 16 101 | 93 93 16 16 102 | 94 92 16 16 103 | 97 92 16 16 104 | 97 92 16 16 105 | 99 92 16 16 106 | 100 93 16 16 107 | 102 94 16 16 108 | 103 95 16 16 109 | 106 94 16 16 110 | 107 94 16 16 111 | 110 94 16 16 112 | 110 94 16 16 113 | 111 94 16 16 114 | 112 94 16 16 115 | 113 94 16 16 116 | 114 95 16 16 117 | 115 95 16 16 118 | 117 95 16 16 119 | 117 95 16 16 120 | 119 95 16 16 121 | 120 95 16 16 122 | 121 95 16 16 123 | 121 95 16 16 124 | 124 96 16 16 125 | 126 96 16 16 126 | 128 96 16 16 127 | 130 96 16 16 128 | 133 97 16 16 129 | 133 97 16 16 130 | 134 96 16 16 131 | 134 96 16 16 132 | 133 96 16 16 133 | 133 96 16 16 134 | 133 96 16 16 135 | 130 96 16 16 136 | 131 95 16 16 137 | 131 95 16 16 138 | 129 95 16 16 139 | 129 95 16 16 140 | 129 95 16 16 141 | 129 95 16 16 142 | 129 95 16 16 143 | 129 95 16 16 144 | 129 95 16 16 145 | 129 95 16 16 146 | 129 97 16 16 147 | 128 98 16 16 148 | 127 98 16 16 149 | 125 101 16 16 150 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/GT/kangaroo_gt.txt: -------------------------------------------------------------------------------- 1 | 322 131 22 41 2 | 322 126 20 46 3 | 321 128 22 43 4 | 320 127 23 46 5 | 318 125 24 45 6 | 317 123 25 46 7 | 316 121 21 47 8 | 313 121 24 45 9 | 313 122 22 46 10 | 312 123 20 45 11 | 309 122 22 47 12 | 309 125 18 44 13 | 307 126 20 44 14 | 306 129 19 44 15 | 303 130 20 43 16 | 302 130 22 42 17 | 300 129 22 42 18 | 296 127 24 42 19 | 292 124 26 45 20 | 289 125 28 42 21 | 288 124 26 43 22 | 284 125 25 41 23 | 283 126 23 44 24 | 279 128 21 38 25 | 277 130 18 38 26 | 275 129 19 41 27 | 274 132 19 40 28 | 269 132 21 38 29 | 267 133 22 36 30 | 263 130 25 40 31 | 259 128 26 40 32 | 256 128 27 38 33 | 252 126 25 39 34 | 251 125 23 40 35 | 247 127 21 37 36 | 242 126 23 38 37 | 241 128 18 37 38 | 237 131 19 36 39 | 233 131 20 38 40 | 231 131 19 38 41 | 227 131 21 36 42 | 223 130 24 37 43 | 220 130 25 37 44 | 217 128 25 36 45 | 214 127 24 36 46 | 211 127 24 34 47 | 209 127 21 36 48 | 204 127 21 35 49 | 202 128 18 34 50 | 198 129 19 33 51 | 195 131 19 32 52 | 193 132 16 33 53 | 189 134 20 33 54 | 187 132 19 34 55 | 182 129 24 36 56 | 177 127 25 35 57 | 176 126 23 34 58 | 171 126 26 34 59 | 172 128 20 32 60 | 166 124 21 36 61 | 163 125 19 35 62 | 159 127 19 34 63 | 157 128 17 32 64 | 153 130 19 32 65 | 150 130 19 34 66 | 148 131 18 34 67 | 144 132 20 30 68 | 142 129 21 32 69 | 138 127 23 33 70 | 135 127 23 33 71 | 134 126 20 33 72 | 130 125 24 33 73 | 127 125 20 34 74 | 124 125 20 32 75 | 120 127 20 31 76 | 118 127 20 33 77 | 114 128 19 32 78 | 112 130 19 32 79 | 108 132 20 29 80 | 105 132 21 29 81 | 104 130 20 31 82 | 102 128 22 33 83 | 97 126 23 34 84 | 96 125 22 30 85 | 92 123 23 33 86 | 89 123 22 33 87 | 87 125 20 32 88 | 83 126 20 31 89 | 82 125 18 32 90 | 79 128 17 33 91 | 76 126 16 32 92 | 72 129 19 30 93 | 69 131 21 28 94 | 68 132 20 29 95 | 65 126 21 32 96 | 62 126 22 32 97 | 58 127 25 31 98 | 58 126 22 30 99 | 54 124 22 31 100 | 51 125 25 30 101 | 50 126 23 29 102 | 49 127 22 30 103 | 42 126 24 32 104 | 41 124 23 34 105 | 36 127 21 32 106 | 35 128 22 32 107 | 34 127 26 32 108 | 33 130 18 30 109 | 32 129 20 28 110 | 27 127 23 30 111 | 25 125 22 31 112 | 24 125 20 28 113 | 18 122 22 32 114 | 16 125 21 29 115 | 16 125 19 28 116 | 12 122 19 32 117 | 10 122 19 33 118 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/MHT/car.txt: -------------------------------------------------------------------------------- 1 | 219.0 87.0 32.0 15.0 2 | 218.0 87.0 32.0 15.0 3 | 218.0 87.0 31.0 14.0 4 | 216.0 87.0 30.0 14.0 5 | 215.0 87.0 30.0 14.0 6 | 213.0 88.0 30.0 14.0 7 | 211.0 88.0 30.0 14.0 8 | 210.0 88.0 30.0 14.0 9 | 208.0 88.0 30.0 14.0 10 | 207.0 88.0 30.0 14.0 11 | 205.0 88.0 31.0 14.0 12 | 204.0 89.0 30.0 14.0 13 | 202.0 89.0 30.0 14.0 14 | 201.0 89.0 31.0 14.0 15 | 199.0 89.0 31.0 14.0 16 | 197.0 89.0 30.0 14.0 17 | 196.0 90.0 30.0 14.0 18 | 194.0 90.0 30.0 14.0 19 | 193.0 90.0 30.0 14.0 20 | 191.0 90.0 31.0 14.0 21 | 190.0 90.0 30.0 14.0 22 | 188.0 90.0 30.0 14.0 23 | 186.0 90.0 30.0 14.0 24 | 185.0 90.0 30.0 14.0 25 | 183.0 90.0 31.0 14.0 26 | 181.0 90.0 31.0 14.0 27 | 178.0 90.0 32.0 15.0 28 | 178.0 91.0 31.0 14.0 29 | 177.0 91.0 31.0 14.0 30 | 175.0 92.0 31.0 14.0 31 | 173.0 92.0 30.0 14.0 32 | 171.0 92.0 31.0 14.0 33 | 169.0 92.0 31.0 14.0 34 | 167.0 93.0 31.0 14.0 35 | 166.0 93.0 31.0 14.0 36 | 163.0 93.0 32.0 15.0 37 | 161.0 93.0 33.0 15.0 38 | 160.0 93.0 33.0 15.0 39 | 158.0 93.0 33.0 15.0 40 | 156.0 94.0 33.0 15.0 41 | 153.0 93.0 34.0 16.0 42 | 151.0 93.0 34.0 16.0 43 | 149.0 94.0 35.0 16.0 44 | 147.0 94.0 34.0 16.0 45 | 146.0 94.0 34.0 16.0 46 | 144.0 94.0 34.0 16.0 47 | 142.0 94.0 34.0 16.0 48 | 140.0 95.0 35.0 16.0 49 | 138.0 95.0 35.0 16.0 50 | 136.0 95.0 35.0 16.0 51 | 134.0 95.0 35.0 16.0 52 | 132.0 95.0 35.0 16.0 53 | 131.0 96.0 34.0 16.0 54 | 129.0 96.0 35.0 16.0 55 | 125.0 96.0 36.0 17.0 56 | 123.0 96.0 36.0 17.0 57 | 121.0 97.0 36.0 17.0 58 | 119.0 97.0 36.0 17.0 59 | 117.0 97.0 36.0 17.0 60 | 115.0 97.0 37.0 17.0 61 | 113.0 97.0 37.0 17.0 62 | 111.0 98.0 37.0 17.0 63 | 109.0 98.0 37.0 17.0 64 | 106.0 98.0 38.0 17.0 65 | 103.0 98.0 38.0 17.0 66 | 102.0 99.0 37.0 17.0 67 | 99.0 99.0 37.0 17.0 68 | 98.0 100.0 37.0 17.0 69 | 96.0 100.0 37.0 17.0 70 | 93.0 99.0 38.0 18.0 71 | 92.0 100.0 37.0 17.0 72 | 88.0 100.0 38.0 17.0 73 | 87.0 101.0 37.0 17.0 74 | 84.0 100.0 38.0 18.0 75 | 81.0 100.0 39.0 18.0 76 | 79.0 100.0 39.0 18.0 77 | 77.0 101.0 38.0 18.0 78 | 75.0 101.0 39.0 18.0 79 | 72.0 101.0 39.0 18.0 80 | 70.0 101.0 39.0 18.0 81 | 68.0 102.0 38.0 18.0 82 | 65.0 102.0 39.0 18.0 83 | 62.0 102.0 39.0 18.0 84 | 60.0 103.0 39.0 18.0 85 | 58.0 103.0 39.0 18.0 86 | 56.0 103.0 39.0 18.0 87 | 53.0 104.0 40.0 18.0 88 | 50.0 104.0 40.0 18.0 89 | 47.0 104.0 39.0 18.0 90 | 44.0 105.0 40.0 18.0 91 | 42.0 105.0 40.0 18.0 92 | 38.0 105.0 40.0 19.0 93 | 37.0 106.0 41.0 19.0 94 | 35.0 106.0 41.0 19.0 95 | 31.0 106.0 41.0 19.0 96 | 28.0 106.0 41.0 19.0 97 | 26.0 106.0 42.0 20.0 98 | 23.0 106.0 43.0 20.0 99 | 19.0 107.0 42.0 19.0 100 | 18.0 107.0 43.0 20.0 101 | 15.0 107.0 43.0 20.0 102 | -------------------------------------------------------------------------------- /plot-tools/plotRect/detection_res/SST-Net/car.txt: -------------------------------------------------------------------------------- 1 | 219.0 87.0 32.0 15.0 2 | 218.0 87.0 32.0 15.0 3 | 216.0 87.0 32.0 15.0 4 | 215.0 88.0 32.0 14.0 5 | 214.0 88.0 32.0 14.0 6 | 212.0 88.0 32.0 14.0 7 | 210.0 88.0 33.0 14.0 8 | 209.0 88.0 32.0 14.0 9 | 208.0 89.0 32.0 14.0 10 | 206.0 89.0 32.0 14.0 11 | 205.0 89.0 32.0 14.0 12 | 203.0 89.0 32.0 14.0 13 | 201.0 89.0 32.0 14.0 14 | 200.0 90.0 33.0 14.0 15 | 199.0 90.0 33.0 14.0 16 | 197.0 90.0 32.0 14.0 17 | 195.0 90.0 33.0 14.0 18 | 193.0 90.0 33.0 14.0 19 | 191.0 91.0 34.0 14.0 20 | 190.0 91.0 33.0 14.0 21 | 189.0 91.0 33.0 14.0 22 | 187.0 91.0 33.0 14.0 23 | 185.0 91.0 33.0 14.0 24 | 184.0 91.0 33.0 14.0 25 | 182.0 92.0 33.0 14.0 26 | 182.0 91.0 33.0 14.0 27 | 181.0 92.0 33.0 14.0 28 | 177.0 92.0 33.0 14.0 29 | 175.0 92.0 33.0 14.0 30 | 175.0 92.0 33.0 14.0 31 | 172.0 92.0 34.0 14.0 32 | 170.0 93.0 35.0 15.0 33 | 168.0 93.0 35.0 15.0 34 | 166.0 93.0 35.0 15.0 35 | 163.0 94.0 35.0 15.0 36 | 161.0 94.0 36.0 15.0 37 | 160.0 94.0 36.0 15.0 38 | 159.0 94.0 36.0 15.0 39 | 156.0 94.0 36.0 15.0 40 | 154.0 94.0 37.0 15.0 41 | 152.0 94.0 37.0 15.0 42 | 149.0 95.0 38.0 15.0 43 | 147.0 95.0 39.0 15.0 44 | 146.0 95.0 38.0 15.0 45 | 144.0 95.0 38.0 15.0 46 | 142.0 96.0 38.0 15.0 47 | 141.0 96.0 38.0 15.0 48 | 138.0 96.0 38.0 15.0 49 | 137.0 96.0 38.0 15.0 50 | 134.0 97.0 39.0 16.0 51 | 133.0 97.0 38.0 16.0 52 | 132.0 97.0 37.0 15.0 53 | 128.0 97.0 39.0 15.0 54 | 127.0 98.0 38.0 15.0 55 | 126.0 98.0 38.0 15.0 56 | 123.0 98.0 39.0 15.0 57 | 120.0 98.0 39.0 15.0 58 | 119.0 99.0 38.0 15.0 59 | 117.0 99.0 37.0 15.0 60 | 115.0 99.0 38.0 15.0 61 | 112.0 99.0 38.0 15.0 62 | 111.0 99.0 38.0 15.0 63 | 109.0 100.0 38.0 15.0 64 | 107.0 100.0 38.0 15.0 65 | 103.0 100.0 38.0 15.0 66 | 103.0 101.0 36.0 14.0 67 | 99.0 101.0 38.0 15.0 68 | 98.0 101.0 38.0 15.0 69 | 96.0 101.0 37.0 15.0 70 | 95.0 102.0 37.0 15.0 71 | 91.0 102.0 38.0 15.0 72 | 87.0 102.0 41.0 16.0 73 | 87.0 102.0 40.0 15.0 74 | 83.0 102.0 41.0 16.0 75 | 83.0 103.0 40.0 16.0 76 | 79.0 103.0 41.0 16.0 77 | 76.0 103.0 41.0 16.0 78 | 73.0 103.0 42.0 16.0 79 | 71.0 104.0 42.0 16.0 80 | 70.0 104.0 41.0 16.0 81 | 67.0 104.0 41.0 15.0 82 | 65.0 103.0 37.0 17.0 83 | 61.0 105.0 41.0 15.0 84 | 60.0 105.0 40.0 15.0 85 | 57.0 106.0 42.0 15.0 86 | 54.0 106.0 43.0 16.0 87 | 53.0 106.0 43.0 15.0 88 | 49.0 107.0 43.0 15.0 89 | 46.0 107.0 43.0 16.0 90 | 45.0 108.0 42.0 15.0 91 | 42.0 108.0 42.0 15.0 92 | 39.0 108.0 43.0 16.0 93 | 36.0 108.0 43.0 16.0 94 | 33.0 108.0 44.0 16.0 95 | 30.0 109.0 45.0 16.0 96 | 29.0 109.0 44.0 16.0 97 | 25.0 110.0 44.0 16.0 98 | 21.0 110.0 45.0 16.0 99 | 20.0 109.0 45.0 16.0 100 | 17.0 110.0 46.0 16.0 101 | 14.0 110.0 45.0 17.0 102 | -------------------------------------------------------------------------------- /plot-tools/plotRect/draw_visual_bar/plotTracking_legend_bar.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotRect/draw_visual_bar/plotTracking_legend_bar.m -------------------------------------------------------------------------------- /plot-tools/plotRect/draw_visual_bar/plotTracking_legend_bar_right.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/plotRect/draw_visual_bar/plotTracking_legend_bar_right.m -------------------------------------------------------------------------------- /plot-tools/plotRect/readme.txt: -------------------------------------------------------------------------------- 1 | run comp_demo_whisper_TIP.py 2 | -------------------------------------------------------------------------------- /plot-tools/readme.txt: -------------------------------------------------------------------------------- 1 | Firstly, you MUST run the code in "test-AUC-DP-on-H_F-videos" or "test-AUC-DP-on-color-videos" to generate the ".mat" for further ploting the AUC and DP curve. 2 | The code in "test-AUC-DP-on-H_F-videos" is used to generate the results on hyperspectral videos or false-color videos. 3 | The code in "test-AUC-DP-on-color-videos" is used to generate the results on color videos. 4 | 5 | Secondly, you can run the code in "plotAUC_DP_curve" to generate the AUC and DP curve. 6 | 7 | Thirdly, you can run the code in "plotAttribute_Curve" to generate the AUC and DP curve in each challenging scene (i.e., attribute). 8 | 9 | Finally, you can run the code in plotRect (Python code) to generate the results of qualitative comparison. -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/Li_res.m: -------------------------------------------------------------------------------- 1 | clear all;clc; 2 | close all; 3 | trackerName = 'SEE-Net'; % tracker name 4 | root_path= ['../../../detection-res/HOT-tracker-results/' trackerName]; % src path 5 | base_path=root_path; 6 | addpath(genpath('../tools')); 7 | addpath(genpath('../hyperspectralToolbox')) 8 | addpath(genpath('../toolbox')) 9 | 10 | videos={'ball';'basketball';'board';'book';'bus';'bus2';'campus';'car';'car2';'car3';'card';'coin';'coke';'drive';'excavator';'face';'face2';'forest';'forest2';'fruit';'hand';'kangaroo';'paper';'pedestrain';'player';'playground';'rubik';'student';'toy1';'toy2';'worker';'pedestrian2';'rider1';'rider2';'trucker'}; 11 | index=[1:35]; 12 | 13 | 14 | distance_precision_threshold=0:50; 15 | PASCAL_threshold=0:0.02:1; 16 | 17 | saveOTB = 0; 18 | cle = 0; 19 | dp = 0; 20 | OP = 0; 21 | for i=1:35 22 | videos{index(i)}; 23 | [seq, ground_truth] = load_video_info(videos{index(i)}); 24 | res = dlmread(strcat(base_path,'/',videos{index(i)},'.txt')); 25 | [distance_rec(i,:),PASCAL_rec(i,:),average_cle_rec(i,:)]= computeMetric(res,ground_truth,distance_precision_threshold,PASCAL_threshold); 26 | 27 | end 28 | 29 | % AUC, DP 30 | res = [mean(mean(PASCAL_rec(index,2:51))) ,mean(distance_rec(index,21))] 31 | 32 | ss = sprintf('%.4f-%.4f.mat', res(1), res(2)); 33 | savename = strcat(trackerName, '-', ss); 34 | save(savename, 'PASCAL_rec', 'average_cle_rec', 'distance_rec'); 35 | 36 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/computeArea.m: -------------------------------------------------------------------------------- 1 | 2 | function areaBB = computeArea(bb) 3 | %computes area of the bb=[xmin ymin xmax ymax] 4 | 5 | if ((bb(1) > bb(3)) || (bb(2) > bb(4))) 6 | areaBB = 0; 7 | else 8 | areaBB = (bb(3) - bb(1) + 1) * (bb(4) - bb(2) + 1); 9 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/computeDistance.m: -------------------------------------------------------------------------------- 1 | function distances=computeDistance(positions, ground_truth) 2 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 3 | (positions(:,2) - ground_truth(:,2)).^2); 4 | distances(isnan(distances)) = []; 5 | 6 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/computeIntersectionArea.m: -------------------------------------------------------------------------------- 1 | function areaIntersection = computeIntersectionArea(bb1,bb2) 2 | %compute intersection anrea of bb1 and bb2 3 | %bb1 and bb2 - bounding boxes 4 | %bbi = [xmin ymin xmax ymax] for i=1,2 5 | 6 | xmin = max(bb1(1),bb2(1)); 7 | xmax = min(bb1(3),bb2(3)); 8 | ymin = max(bb1(2),bb2(2)); 9 | ymax = min(bb1(4),bb2(4)); 10 | 11 | areaIntersection = computeArea([xmin ymin xmax ymax]); 12 | 13 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/computeMetric.m: -------------------------------------------------------------------------------- 1 | function [distance_rec,PASCAL_rec,average_cle_rec]= computeMetric(pd_boxes,ground_truth,distance_precision_threshold,PASCAL_threshold) 2 | PASCAL_rec=zeros(1,length(PASCAL_threshold)); 3 | average_cle_rec=zeros(1,length(PASCAL_threshold)); 4 | distance_rec=zeros(1,length(distance_precision_threshold)); 5 | for j=1:length(distance_precision_threshold) 6 | [distance_rec(j),PASCAL_rec(j),average_cle_rec(j)]= ... 7 | compute_performance_measures(pd_boxes, ground_truth,distance_precision_threshold(j),PASCAL_threshold(j)); 8 | end 9 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/computePascalScore.m: -------------------------------------------------------------------------------- 1 | function pascalScore = computePascalScore(bb1,bb2) 2 | %compute the Pascal score of the bb1, bb2 (intersection/union) 3 | 4 | intersectionArea = computeIntersectionArea(bb1,bb2); 5 | pascalScore = intersectionArea/(computeArea(bb1)+computeArea(bb2)-intersectionArea); 6 | return -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/compute_relaibitlity.m: -------------------------------------------------------------------------------- 1 | function [overlaps,distances] = ... 2 | compute_relaibitlity(positions, ground_truth) 3 | 4 | % [distance_precision, PASCAL_precision, average_center_location_error] = ... 5 | % compute_performance_measures(positions, ground_truth, distance_precision_threshold, PASCAL_threshold) 6 | % 7 | % For the given tracker output positions and ground truth it computes the: 8 | % * Distance Precision at the specified threshold (20 pixels as default if 9 | % omitted) 10 | % * PASCAL Precision at the specified threshold (0.5 as default if omitted) 11 | % * Average Center Location error (CLE). 12 | % 13 | % The tracker positions and ground truth must be Nx4-matrices where N is 14 | % the number of time steps in the tracking. Each row has to be on the form 15 | % [c1, c2, s1, s2] where (c1, c2) is the center coordinate and s1 and s2 16 | % are the size in the first and second dimension respectively (the order of 17 | % x and y does not matter here). 18 | 19 | if size(positions,1) ~= size(ground_truth,1), 20 | disp('Could not calculate precisions, because the number of ground') 21 | disp('truth frames does not match the number of tracked frames.') 22 | return 23 | end 24 | 25 | ground_truth = [ground_truth(:,1:2) + (ground_truth(:,3:4) - 1) / 2 , ground_truth(:,3:4)]; 26 | positions = [positions(:,1:2) + (positions(:,3:4) - 1) / 2 , positions(:,3:4)]; 27 | 28 | %calculate distances to ground truth over all frames 29 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 30 | (positions(:,2) - ground_truth(:,2)).^2); 31 | distances(isnan(distances)) =Inf; 32 | 33 | %calculate distance precision 34 | 35 | %calculate average center location error (CLE) 36 | 37 | %calculate the overlap in each dimension 38 | overlap_height = min(positions(:,1) + positions(:,3)/2, ground_truth(:,1) + ground_truth(:,3)/2) ... 39 | - max(positions(:,1) - positions(:,3)/2, ground_truth(:,1) - ground_truth(:,3)/2); 40 | overlap_width = min(positions(:,2) + positions(:,4)/2, ground_truth(:,2) + ground_truth(:,4)/2) ... 41 | - max(positions(:,2) - positions(:,4)/2, ground_truth(:,2) - ground_truth(:,4)/2); 42 | 43 | % if no overlap, set to zero 44 | overlap_height(overlap_height < 0) = 0; 45 | overlap_width(overlap_width < 0) = 0; 46 | 47 | % remove NaN values (should not exist any) 48 | valid_ind = ~isnan(overlap_height) & ~isnan(overlap_width); 49 | 50 | % calculate area 51 | overlap_area = overlap_height(valid_ind) .* overlap_width(valid_ind); 52 | tracked_area = positions(valid_ind,3) .* positions(valid_ind,4); 53 | ground_truth_area = ground_truth(valid_ind,3) .* ground_truth(valid_ind,4); 54 | 55 | % calculate PASCAL overlaps 56 | overlaps = overlap_area ./ (tracked_area + ground_truth_area - overlap_area+eps); 57 | 58 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/gt_falsecolor_website/bus_gt.txt: -------------------------------------------------------------------------------- 1 | 4 82 17 27 2 | 6 81 18 28 3 | 9 80 18 28 4 | 12 81 16 27 5 | 16 80 16 27 6 | 17 81 19 27 7 | 22 80 16 28 8 | 25 79 16 29 9 | 26 80 18 28 10 | 28 80 18 26 11 | 32 80 17 26 12 | 35 80 17 25 13 | 37 79 18 26 14 | 41 80 18 24 15 | 44 79 17 25 16 | 44 80 20 24 17 | 47 79 19 25 18 | 50 78 19 26 19 | 54 78 17 25 20 | 55 79 19 25 21 | 59 79 17 26 22 | 61 75 18 29 23 | 66 77 17 25 24 | 68 78 17 23 25 | 69 78 18 25 26 | 72 77 16 26 27 | 76 77 15 25 28 | 74 76 20 25 29 | 77 77 20 24 30 | 81 76 17 25 31 | 83 76 17 24 32 | 86 76 17 25 33 | 88 76 17 25 34 | 90 75 17 26 35 | 91 76 19 25 36 | 94 76 18 24 37 | 96 75 17 24 38 | 98 74 18 26 39 | 100 75 19 23 40 | 102 75 19 23 41 | 105 73 18 25 42 | 107 73 19 26 43 | 108 72 19 26 44 | 112 74 17 26 45 | 113 74 18 24 46 | 116 74 17 24 47 | 118 72 17 26 48 | 119 73 19 24 49 | 122 73 18 23 50 | 123 73 18 24 51 | 125 73 19 23 52 | 127 73 19 24 53 | 129 72 18 24 54 | 132 73 17 22 55 | 134 73 17 22 56 | 134 73 19 21 57 | 136 70 19 24 58 | 139 71 18 23 59 | 140 73 18 22 60 | 141 70 20 24 61 | 144 73 18 21 62 | 145 72 18 21 63 | 148 71 17 22 64 | 151 71 16 24 65 | 151 71 18 23 66 | 153 72 17 21 67 | 156 71 15 22 68 | 155 71 18 21 69 | 158 70 17 22 70 | 160 72 16 21 71 | 161 71 18 22 72 | 164 71 17 21 73 | 165 71 16 20 74 | 167 71 16 20 75 | 167 70 18 22 76 | 170 70 17 23 77 | 170 70 19 21 78 | 171 70 20 22 79 | 173 71 18 20 80 | 175 69 18 22 81 | 177 71 17 20 82 | 180 68 16 22 83 | 181 70 17 21 84 | 182 70 17 21 85 | 184 70 16 20 86 | 185 68 16 21 87 | 186 69 17 19 88 | 186 69 18 20 89 | 189 69 17 20 90 | 191 69 17 21 91 | 193 70 16 20 92 | 192 68 18 21 93 | 195 70 16 19 94 | 196 69 17 20 95 | 197 69 16 20 96 | 198 69 17 20 97 | 200 68 16 21 98 | 201 68 18 21 99 | 202 69 18 19 100 | 204 69 16 19 101 | 204 69 17 21 102 | 205 68 17 21 103 | 205 67 19 21 104 | 208 67 17 21 105 | 208 68 18 20 106 | 208 68 20 20 107 | 211 67 17 22 108 | 213 67 16 22 109 | 214 69 17 19 110 | 216 68 15 19 111 | 218 69 15 18 112 | 218 68 17 20 113 | 219 68 16 19 114 | 222 68 13 18 115 | 220 68 16 20 116 | 222 68 15 18 117 | 223 67 15 20 118 | 223 67 17 19 119 | 223 66 18 20 120 | 224 67 18 19 121 | 225 67 17 19 122 | 228 68 15 18 123 | 227 66 16 19 124 | 230 66 15 21 125 | 229 66 16 20 126 | 230 65 17 23 127 | 232 66 15 20 128 | 233 66 17 20 129 | 233 66 17 20 130 | 233 66 17 19 131 | 238 65 15 21 132 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/gt_falsecolor_website/car2_gt.txt: -------------------------------------------------------------------------------- 1 | 89 83 22 15 2 | 89 83 22 16 3 | 88 83 24 15 4 | 89 84 24 15 5 | 90 83 23 15 6 | 90 84 25 13 7 | 92 85 25 14 8 | 91 85 25 13 9 | 91 83 26 16 10 | 93 84 25 14 11 | 93 84 24 14 12 | 94 83 23 16 13 | 93 84 24 15 14 | 94 84 24 15 15 | 96 83 21 15 16 | 94 83 25 15 17 | 94 85 27 15 18 | 96 85 25 14 19 | 95 83 27 16 20 | 97 84 27 16 21 | 98 84 25 16 22 | 98 84 26 16 23 | 98 84 26 16 24 | 100 85 26 16 25 | 99 84 27 16 26 | 101 84 26 16 27 | 103 85 24 15 28 | 102 84 26 16 29 | 104 86 25 15 30 | 104 85 25 16 31 | 105 86 26 15 32 | 106 84 26 17 33 | 105 86 27 15 34 | 107 86 27 15 35 | 106 85 28 16 36 | 107 85 27 16 37 | 108 86 28 15 38 | 109 86 28 16 39 | 110 85 28 17 40 | 111 85 27 16 41 | 112 86 27 16 42 | 113 87 28 16 43 | 112 85 29 18 44 | 113 88 29 15 45 | 114 86 29 18 46 | 114 86 29 17 47 | 114 86 29 17 48 | 116 86 29 17 49 | 116 86 30 17 50 | 118 85 30 19 51 | 119 85 31 19 52 | 119 86 32 18 53 | 120 85 32 19 54 | 120 86 33 18 55 | 120 86 33 18 56 | 122 86 33 18 57 | 123 87 33 17 58 | 125 86 31 18 59 | 125 86 32 19 60 | 126 86 32 19 61 | 127 87 33 18 62 | 128 86 33 19 63 | 128 87 33 18 64 | 130 86 33 19 65 | 131 87 35 19 66 | 132 86 34 19 67 | 133 86 35 21 68 | 134 86 35 20 69 | 135 87 37 19 70 | 136 86 37 20 71 | 136 85 38 21 72 | 137 87 39 19 73 | 140 86 37 21 74 | 141 86 38 21 75 | 142 87 37 21 76 | 142 88 39 20 77 | 144 87 39 20 78 | 146 87 39 21 79 | 147 87 41 22 80 | 148 87 40 22 81 | 148 87 41 21 82 | 150 87 43 21 83 | 154 88 39 20 84 | 154 87 40 21 85 | 156 88 39 20 86 | 157 87 40 22 87 | 159 86 41 23 88 | 158 88 44 22 89 | 161 87 43 23 90 | 163 88 44 22 91 | 164 88 46 22 92 | 165 88 46 21 93 | 167 88 46 23 94 | 168 88 47 23 95 | 171 88 46 22 96 | 172 88 47 24 97 | 173 88 48 23 98 | 174 87 50 25 99 | 176 87 50 25 100 | 177 87 51 25 101 | 179 85 51 27 102 | 182 86 51 26 103 | 183 86 52 27 104 | 185 87 52 25 105 | 189 88 50 25 106 | 191 88 51 25 107 | 192 87 53 26 108 | 193 88 55 25 109 | 195 88 54 26 110 | 196 87 54 27 111 | 198 87 56 26 112 | 200 88 56 26 113 | 203 89 56 25 114 | 204 88 59 27 115 | 206 87 55 26 116 | 206 88 63 26 117 | 210 89 61 25 118 | 212 89 63 26 119 | 214 88 62 26 120 | 216 88 64 27 121 | 218 88 64 28 122 | 220 90 65 26 123 | 222 90 66 25 124 | 224 90 69 27 125 | 226 91 72 25 126 | 228 88 72 29 127 | 231 90 73 27 128 | 234 90 73 28 129 | 235 91 77 27 130 | 239 89 75 29 131 | 242 90 76 27 132 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/gt_falsecolor_website/car_gt.txt: -------------------------------------------------------------------------------- 1 | 219 87 32 15 2 | 220 86 29 14 3 | 217 86 32 18 4 | 217 86 32 17 5 | 216 86 31 17 6 | 213 86 31 16 7 | 211 87 32 17 8 | 209 86 34 14 9 | 211 87 31 15 10 | 207 89 31 12 11 | 205 89 32 13 12 | 200 89 36 13 13 | 202 88 35 15 14 | 198 89 36 14 15 | 197 89 35 15 16 | 194 89 38 14 17 | 193 89 36 15 18 | 192 90 36 15 19 | 192 91 35 14 20 | 190 91 34 14 21 | 188 91 34 13 22 | 185 91 38 14 23 | 182 89 40 16 24 | 182 91 38 14 25 | 184 91 31 14 26 | 181 92 33 13 27 | 179 91 34 15 28 | 174 91 39 15 29 | 174 92 37 13 30 | 172 92 37 15 31 | 172 92 35 14 32 | 169 93 37 13 33 | 167 94 38 13 34 | 165 92 37 15 35 | 163 94 38 14 36 | 162 93 35 15 37 | 160 93 38 15 38 | 158 94 39 15 39 | 154 94 43 15 40 | 154 95 38 14 41 | 153 96 38 13 42 | 148 96 41 13 43 | 148 96 41 15 44 | 144 96 42 14 45 | 145 97 39 14 46 | 142 96 39 14 47 | 138 95 42 15 48 | 137 97 41 14 49 | 135 98 42 14 50 | 136 97 38 15 51 | 132 97 41 16 52 | 131 97 41 17 53 | 128 97 39 16 54 | 128 97 39 17 55 | 125 99 41 15 56 | 120 99 45 14 57 | 119 98 43 16 58 | 118 98 41 17 59 | 116 98 43 17 60 | 116 98 40 17 61 | 109 97 45 18 62 | 111 99 42 15 63 | 106 99 45 17 64 | 105 98 44 17 65 | 100 98 46 18 66 | 102 98 43 19 67 | 96 100 47 16 68 | 95 101 46 16 69 | 94 101 46 17 70 | 91 100 46 17 71 | 89 98 46 19 72 | 87 99 44 19 73 | 83 102 47 17 74 | 82 100 46 19 75 | 77 102 48 17 76 | 78 102 48 17 77 | 75 102 46 17 78 | 74 102 45 17 79 | 69 103 48 17 80 | 65 102 51 19 81 | 66 104 47 17 82 | 63 103 49 17 83 | 59 105 50 16 84 | 55 105 53 15 85 | 54 106 50 16 86 | 53 105 49 16 87 | 51 105 48 17 88 | 47 105 50 16 89 | 46 105 49 17 90 | 40 107 54 15 91 | 39 108 53 15 92 | 40 106 48 18 93 | 33 107 53 17 94 | 32 109 50 16 95 | 29 107 52 18 96 | 27 107 51 19 97 | 23 108 55 18 98 | 22 108 53 18 99 | 21 108 49 19 100 | 17 108 53 20 101 | 14 109 53 19 102 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/gt_falsecolor_website/coin_gt.txt: -------------------------------------------------------------------------------- 1 | 96 83 16 16 2 | 96 83 16 16 3 | 96 83 16 16 4 | 96 83 16 16 5 | 96 83 16 16 6 | 96 83 16 16 7 | 96 83 16 16 8 | 96 83 16 16 9 | 96 83 16 16 10 | 96 83 16 16 11 | 96 83 16 16 12 | 96 83 16 16 13 | 96 83 16 16 14 | 96 83 16 16 15 | 96 83 16 16 16 | 96 83 16 16 17 | 96 83 16 16 18 | 96 83 16 16 19 | 96 83 16 16 20 | 95 84 16 16 21 | 95 84 16 16 22 | 96 85 16 16 23 | 96 85 16 16 24 | 96 86 16 16 25 | 96 86 16 16 26 | 96 86 16 16 27 | 96 86 16 16 28 | 96 86 16 16 29 | 96 86 16 16 30 | 94 86 16 16 31 | 92 86 16 16 32 | 89 88 16 16 33 | 87 90 16 16 34 | 84 92 16 16 35 | 82 93 16 16 36 | 80 95 16 16 37 | 78 96 16 16 38 | 76 96 16 16 39 | 72 96 16 16 40 | 71 96 16 16 41 | 71 96 16 16 42 | 71 97 16 16 43 | 72 97 16 16 44 | 73 97 16 16 45 | 76 97 16 16 46 | 78 97 16 16 47 | 80 95 16 16 48 | 83 93 16 16 49 | 85 92 16 16 50 | 87 91 16 16 51 | 88 91 16 16 52 | 89 90 16 16 53 | 90 91 16 16 54 | 91 91 16 16 55 | 93 91 16 16 56 | 93 91 16 16 57 | 93 93 16 16 58 | 92 95 16 16 59 | 91 98 16 16 60 | 91 99 16 16 61 | 93 99 16 16 62 | 95 100 16 16 63 | 96 100 16 16 64 | 97 101 16 16 65 | 98 101 16 16 66 | 99 100 16 16 67 | 99 100 16 16 68 | 102 100 16 16 69 | 102 100 16 16 70 | 103 100 16 16 71 | 103 100 16 16 72 | 103 100 16 16 73 | 104 100 16 16 74 | 104 100 16 16 75 | 104 99 16 16 76 | 104 99 16 16 77 | 102 98 16 16 78 | 102 97 16 16 79 | 99 95 16 16 80 | 98 94 16 16 81 | 96 94 16 16 82 | 92 94 16 16 83 | 90 94 16 16 84 | 89 94 16 16 85 | 87 93 16 16 86 | 87 92 16 16 87 | 86 91 16 16 88 | 86 91 16 16 89 | 87 90 16 16 90 | 87 90 16 16 91 | 87 90 16 16 92 | 87 90 16 16 93 | 87 90 16 16 94 | 87 90 16 16 95 | 89 90 16 16 96 | 89 90 16 16 97 | 90 91 16 16 98 | 91 91 16 16 99 | 92 92 16 16 100 | 92 92 16 16 101 | 93 93 16 16 102 | 94 92 16 16 103 | 97 92 16 16 104 | 97 92 16 16 105 | 99 92 16 16 106 | 100 93 16 16 107 | 102 94 16 16 108 | 103 95 16 16 109 | 106 94 16 16 110 | 107 94 16 16 111 | 110 94 16 16 112 | 110 94 16 16 113 | 111 94 16 16 114 | 112 94 16 16 115 | 113 94 16 16 116 | 114 95 16 16 117 | 115 95 16 16 118 | 117 95 16 16 119 | 117 95 16 16 120 | 119 95 16 16 121 | 120 95 16 16 122 | 121 95 16 16 123 | 121 95 16 16 124 | 124 96 16 16 125 | 126 96 16 16 126 | 128 96 16 16 127 | 130 96 16 16 128 | 133 97 16 16 129 | 133 97 16 16 130 | 134 96 16 16 131 | 134 96 16 16 132 | 133 96 16 16 133 | 133 96 16 16 134 | 133 96 16 16 135 | 130 96 16 16 136 | 131 95 16 16 137 | 131 95 16 16 138 | 129 95 16 16 139 | 129 95 16 16 140 | 129 95 16 16 141 | 129 95 16 16 142 | 129 95 16 16 143 | 129 95 16 16 144 | 129 95 16 16 145 | 129 95 16 16 146 | 129 97 16 16 147 | 128 98 16 16 148 | 127 98 16 16 149 | 125 101 16 16 150 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/gt_falsecolor_website/kangaroo_gt.txt: -------------------------------------------------------------------------------- 1 | 322 131 22 41 2 | 322 126 20 46 3 | 321 128 22 43 4 | 320 127 23 46 5 | 318 125 24 45 6 | 317 123 25 46 7 | 316 121 21 47 8 | 313 121 24 45 9 | 313 122 22 46 10 | 312 123 20 45 11 | 309 122 22 47 12 | 309 125 18 44 13 | 307 126 20 44 14 | 306 129 19 44 15 | 303 130 20 43 16 | 302 130 22 42 17 | 300 129 22 42 18 | 296 127 24 42 19 | 292 124 26 45 20 | 289 125 28 42 21 | 288 124 26 43 22 | 284 125 25 41 23 | 283 126 23 44 24 | 279 128 21 38 25 | 277 130 18 38 26 | 275 129 19 41 27 | 274 132 19 40 28 | 269 132 21 38 29 | 267 133 22 36 30 | 263 130 25 40 31 | 259 128 26 40 32 | 256 128 27 38 33 | 252 126 25 39 34 | 251 125 23 40 35 | 247 127 21 37 36 | 242 126 23 38 37 | 241 128 18 37 38 | 237 131 19 36 39 | 233 131 20 38 40 | 231 131 19 38 41 | 227 131 21 36 42 | 223 130 24 37 43 | 220 130 25 37 44 | 217 128 25 36 45 | 214 127 24 36 46 | 211 127 24 34 47 | 209 127 21 36 48 | 204 127 21 35 49 | 202 128 18 34 50 | 198 129 19 33 51 | 195 131 19 32 52 | 193 132 16 33 53 | 189 134 20 33 54 | 187 132 19 34 55 | 182 129 24 36 56 | 177 127 25 35 57 | 176 126 23 34 58 | 171 126 26 34 59 | 172 128 20 32 60 | 166 124 21 36 61 | 163 125 19 35 62 | 159 127 19 34 63 | 157 128 17 32 64 | 153 130 19 32 65 | 150 130 19 34 66 | 148 131 18 34 67 | 144 132 20 30 68 | 142 129 21 32 69 | 138 127 23 33 70 | 135 127 23 33 71 | 134 126 20 33 72 | 130 125 24 33 73 | 127 125 20 34 74 | 124 125 20 32 75 | 120 127 20 31 76 | 118 127 20 33 77 | 114 128 19 32 78 | 112 130 19 32 79 | 108 132 20 29 80 | 105 132 21 29 81 | 104 130 20 31 82 | 102 128 22 33 83 | 97 126 23 34 84 | 96 125 22 30 85 | 92 123 23 33 86 | 89 123 22 33 87 | 87 125 20 32 88 | 83 126 20 31 89 | 82 125 18 32 90 | 79 128 17 33 91 | 76 126 16 32 92 | 72 129 19 30 93 | 69 131 21 28 94 | 68 132 20 29 95 | 65 126 21 32 96 | 62 126 22 32 97 | 58 127 25 31 98 | 58 126 22 30 99 | 54 124 22 31 100 | 51 125 25 30 101 | 50 126 23 29 102 | 49 127 22 30 103 | 42 126 24 32 104 | 41 124 23 34 105 | 36 127 21 32 106 | 35 128 22 32 107 | 34 127 26 32 108 | 33 130 18 30 109 | 32 129 20 28 110 | 27 127 23 30 111 | 25 125 22 31 112 | 24 125 20 28 113 | 18 122 22 32 114 | 16 125 21 29 115 | 16 125 19 28 116 | 12 122 19 32 117 | 10 122 19 33 118 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/load_video_info.m: -------------------------------------------------------------------------------- 1 | function [seq, ground_truth] = load_video_info(video_name) 2 | 3 | ground_truth = dlmread(['./gt_falsecolor_website/' video_name '_gt.txt']); 4 | 5 | seq.len = size(ground_truth, 1); 6 | seq.init_rect = ground_truth(1,:); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/mergeMulImageTo3DImahe.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/mergeMulImageTo3DImahe.m -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-H_F-videos/testSingleAUCAndDP/readme.txt: -------------------------------------------------------------------------------- 1 | run Li_res.m -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/Li_res.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/Li_res.m -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/computeArea.m: -------------------------------------------------------------------------------- 1 | 2 | function areaBB = computeArea(bb) 3 | %computes area of the bb=[xmin ymin xmax ymax] 4 | 5 | if ((bb(1) > bb(3)) || (bb(2) > bb(4))) 6 | areaBB = 0; 7 | else 8 | areaBB = (bb(3) - bb(1) + 1) * (bb(4) - bb(2) + 1); 9 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/computeDistance.m: -------------------------------------------------------------------------------- 1 | function distances=computeDistance(positions, ground_truth) 2 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 3 | (positions(:,2) - ground_truth(:,2)).^2); 4 | distances(isnan(distances)) = []; 5 | 6 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/computeIntersectionArea.m: -------------------------------------------------------------------------------- 1 | function areaIntersection = computeIntersectionArea(bb1,bb2) 2 | %compute intersection anrea of bb1 and bb2 3 | %bb1 and bb2 - bounding boxes 4 | %bbi = [xmin ymin xmax ymax] for i=1,2 5 | 6 | xmin = max(bb1(1),bb2(1)); 7 | xmax = min(bb1(3),bb2(3)); 8 | ymin = max(bb1(2),bb2(2)); 9 | ymax = min(bb1(4),bb2(4)); 10 | 11 | areaIntersection = computeArea([xmin ymin xmax ymax]); 12 | 13 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/computeMetric.m: -------------------------------------------------------------------------------- 1 | function [distance_rec,PASCAL_rec,average_cle_rec]= computeMetric(pd_boxes,ground_truth,distance_precision_threshold,PASCAL_threshold) 2 | PASCAL_rec=zeros(1,length(PASCAL_threshold)); 3 | average_cle_rec=zeros(1,length(PASCAL_threshold)); 4 | distance_rec=zeros(1,length(distance_precision_threshold)); 5 | for j=1:length(distance_precision_threshold) 6 | [distance_rec(j),PASCAL_rec(j),average_cle_rec(j)]= ... 7 | compute_performance_measures(pd_boxes, ground_truth,distance_precision_threshold(j),PASCAL_threshold(j)); 8 | end 9 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/computePascalScore.m: -------------------------------------------------------------------------------- 1 | function pascalScore = computePascalScore(bb1,bb2) 2 | %compute the Pascal score of the bb1, bb2 (intersection/union) 3 | 4 | intersectionArea = computeIntersectionArea(bb1,bb2); 5 | pascalScore = intersectionArea/(computeArea(bb1)+computeArea(bb2)-intersectionArea); 6 | return -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/compute_relaibitlity.m: -------------------------------------------------------------------------------- 1 | function [overlaps,distances] = ... 2 | compute_relaibitlity(positions, ground_truth) 3 | 4 | % [distance_precision, PASCAL_precision, average_center_location_error] = ... 5 | % compute_performance_measures(positions, ground_truth, distance_precision_threshold, PASCAL_threshold) 6 | % 7 | % For the given tracker output positions and ground truth it computes the: 8 | % * Distance Precision at the specified threshold (20 pixels as default if 9 | % omitted) 10 | % * PASCAL Precision at the specified threshold (0.5 as default if omitted) 11 | % * Average Center Location error (CLE). 12 | % 13 | % The tracker positions and ground truth must be Nx4-matrices where N is 14 | % the number of time steps in the tracking. Each row has to be on the form 15 | % [c1, c2, s1, s2] where (c1, c2) is the center coordinate and s1 and s2 16 | % are the size in the first and second dimension respectively (the order of 17 | % x and y does not matter here). 18 | 19 | if size(positions,1) ~= size(ground_truth,1), 20 | disp('Could not calculate precisions, because the number of ground') 21 | disp('truth frames does not match the number of tracked frames.') 22 | return 23 | end 24 | 25 | ground_truth = [ground_truth(:,1:2) + (ground_truth(:,3:4) - 1) / 2 , ground_truth(:,3:4)]; 26 | positions = [positions(:,1:2) + (positions(:,3:4) - 1) / 2 , positions(:,3:4)]; 27 | 28 | %calculate distances to ground truth over all frames 29 | distances = sqrt((positions(:,1) - ground_truth(:,1)).^2 + ... 30 | (positions(:,2) - ground_truth(:,2)).^2); 31 | distances(isnan(distances)) =Inf; 32 | 33 | %calculate distance precision 34 | 35 | %calculate average center location error (CLE) 36 | 37 | %calculate the overlap in each dimension 38 | overlap_height = min(positions(:,1) + positions(:,3)/2, ground_truth(:,1) + ground_truth(:,3)/2) ... 39 | - max(positions(:,1) - positions(:,3)/2, ground_truth(:,1) - ground_truth(:,3)/2); 40 | overlap_width = min(positions(:,2) + positions(:,4)/2, ground_truth(:,2) + ground_truth(:,4)/2) ... 41 | - max(positions(:,2) - positions(:,4)/2, ground_truth(:,2) - ground_truth(:,4)/2); 42 | 43 | % if no overlap, set to zero 44 | overlap_height(overlap_height < 0) = 0; 45 | overlap_width(overlap_width < 0) = 0; 46 | 47 | % remove NaN values (should not exist any) 48 | valid_ind = ~isnan(overlap_height) & ~isnan(overlap_width); 49 | 50 | % calculate area 51 | overlap_area = overlap_height(valid_ind) .* overlap_width(valid_ind); 52 | tracked_area = positions(valid_ind,3) .* positions(valid_ind,4); 53 | ground_truth_area = ground_truth(valid_ind,3) .* ground_truth(valid_ind,4); 54 | 55 | % calculate PASCAL overlaps 56 | overlaps = overlap_area ./ (tracked_area + ground_truth_area - overlap_area+eps); 57 | 58 | end -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/gt_color_website/bus_gt.txt: -------------------------------------------------------------------------------- 1 | 1 72 18 29 2 | 5 71 18 30 3 | 7 73 19 27 4 | 10 75 19 27 5 | 15 74 17 26 6 | 17 72 18 30 7 | 20 72 18 29 8 | 23 72 17 28 9 | 29 71 15 29 10 | 30 73 17 24 11 | 32 73 18 25 12 | 36 73 15 24 13 | 36 71 19 26 14 | 40 72 17 25 15 | 42 71 18 26 16 | 45 71 17 27 17 | 48 71 17 27 18 | 51 71 17 27 19 | 52 71 18 28 20 | 56 70 18 28 21 | 59 72 18 24 22 | 61 71 18 27 23 | 63 69 17 29 24 | 65 69 18 28 25 | 68 69 18 28 26 | 73 68 16 28 27 | 72 69 18 27 28 | 74 71 19 24 29 | 78 68 17 27 30 | 79 68 19 27 31 | 80 68 19 27 32 | 84 68 17 26 33 | 86 68 18 28 34 | 88 67 18 27 35 | 91 68 17 26 36 | 92 68 18 26 37 | 94 69 18 24 38 | 97 68 18 26 39 | 99 67 19 26 40 | 102 69 16 24 41 | 105 67 16 27 42 | 105 69 18 25 43 | 106 69 19 24 44 | 110 68 17 24 45 | 113 67 16 25 46 | 114 68 17 24 47 | 116 67 17 25 48 | 117 68 18 23 49 | 120 66 18 25 50 | 121 67 17 24 51 | 123 66 16 24 52 | 126 66 16 23 53 | 124 66 21 23 54 | 130 66 17 24 55 | 130 65 18 25 56 | 136 66 14 23 57 | 134 66 19 24 58 | 138 65 15 23 59 | 140 65 15 24 60 | 142 65 16 23 61 | 142 65 18 22 62 | 144 65 18 21 63 | 146 63 18 24 64 | 148 64 17 24 65 | 148 64 19 25 66 | 151 64 16 22 67 | 153 64 16 25 68 | 154 64 16 25 69 | 156 63 16 25 70 | 159 64 15 23 71 | 159 66 17 22 72 | 162 65 15 22 73 | 164 63 15 24 74 | 164 64 16 24 75 | 166 64 15 24 76 | 166 63 18 25 77 | 170 63 15 22 78 | 170 63 17 23 79 | 174 61 15 24 80 | 172 63 17 23 81 | 175 63 17 22 82 | 177 63 15 22 83 | 176 64 17 22 84 | 179 64 17 22 85 | 181 64 17 22 86 | 182 63 17 23 87 | 184 64 16 21 88 | 184 63 18 23 89 | 187 61 16 23 90 | 187 62 17 22 91 | 185 61 21 22 92 | 190 62 16 22 93 | 193 62 16 21 94 | 192 62 17 21 95 | 194 62 17 23 96 | 193 62 19 21 97 | 195 62 17 22 98 | 197 61 19 22 99 | 199 61 16 23 100 | 200 61 17 22 101 | 202 61 15 22 102 | 203 62 16 21 103 | 203 62 17 21 104 | 206 61 16 21 105 | 206 62 16 20 106 | 206 61 18 21 107 | 209 60 17 22 108 | 208 61 19 22 109 | 211 61 15 22 110 | 213 61 15 22 111 | 214 61 14 21 112 | 215 61 13 19 113 | 215 62 17 20 114 | 214 59 18 23 115 | 217 59 16 22 116 | 218 60 17 22 117 | 219 60 15 22 118 | 220 60 16 22 119 | 221 60 16 22 120 | 222 60 17 22 121 | 222 60 18 20 122 | 225 62 16 22 123 | 224 58 18 23 124 | 225 61 18 20 125 | 229 61 15 21 126 | 227 61 17 20 127 | 228 62 16 19 128 | 229 60 18 21 129 | 231 60 16 22 130 | 230 58 18 22 131 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/gt_color_website/car2_gt.txt: -------------------------------------------------------------------------------- 1 | 62 61 18 11 2 | 62 61 19 12 3 | 63 62 19 10 4 | 64 63 19 10 5 | 65 62 17 11 6 | 65 62 18 11 7 | 65 61 18 12 8 | 67 62 16 12 9 | 67 62 18 12 10 | 67 61 18 12 11 | 67 62 17 10 12 | 68 62 18 11 13 | 68 61 18 12 14 | 69 61 18 13 15 | 69 62 19 12 16 | 69 62 19 12 17 | 69 62 18 12 18 | 70 61 18 13 19 | 69 62 20 12 20 | 70 61 19 13 21 | 71 62 17 11 22 | 71 62 19 13 23 | 71 62 19 12 24 | 73 61 17 12 25 | 73 61 19 12 26 | 72 62 21 12 27 | 74 63 19 12 28 | 75 62 20 13 29 | 75 62 20 12 30 | 75 63 21 11 31 | 76 62 20 13 32 | 77 63 19 12 33 | 77 63 20 13 34 | 77 62 20 12 35 | 78 61 20 13 36 | 79 62 20 12 37 | 79 63 21 12 38 | 80 63 21 12 39 | 80 62 21 13 40 | 81 63 20 12 41 | 80 62 22 13 42 | 82 63 21 13 43 | 82 63 22 13 44 | 83 62 23 14 45 | 82 64 24 12 46 | 83 64 22 12 47 | 84 63 22 13 48 | 84 64 23 12 49 | 86 63 22 13 50 | 86 63 22 12 51 | 87 63 23 14 52 | 87 62 24 14 53 | 89 63 23 12 54 | 88 63 24 14 55 | 89 62 24 15 56 | 91 62 24 15 57 | 91 63 25 13 58 | 91 63 25 14 59 | 93 63 23 14 60 | 93 63 25 13 61 | 94 62 24 14 62 | 94 63 25 13 63 | 96 63 24 14 64 | 96 63 24 13 65 | 96 62 26 15 66 | 97 63 27 14 67 | 98 62 27 14 68 | 100 63 25 14 69 | 100 63 26 15 70 | 100 63 28 14 71 | 103 63 25 16 72 | 103 63 27 16 73 | 103 62 27 16 74 | 104 63 27 15 75 | 104 63 28 16 76 | 106 62 28 16 77 | 107 63 29 15 78 | 107 64 29 14 79 | 110 63 29 16 80 | 109 63 30 16 81 | 110 64 29 15 82 | 110 63 31 16 83 | 112 64 32 16 84 | 113 65 32 14 85 | 115 63 31 16 86 | 116 64 31 14 87 | 117 63 31 15 88 | 118 63 31 17 89 | 120 62 29 16 90 | 121 64 31 15 91 | 122 63 33 16 92 | 123 63 34 16 93 | 124 64 34 16 94 | 126 63 34 17 95 | 127 63 34 16 96 | 128 62 35 17 97 | 129 62 35 18 98 | 130 62 35 18 99 | 132 62 35 18 100 | 134 61 34 19 101 | 134 62 36 18 102 | 136 62 37 19 103 | 137 63 37 17 104 | 138 62 38 18 105 | 139 62 38 19 106 | 139 62 41 19 107 | 140 63 41 19 108 | 143 63 40 19 109 | 145 63 39 18 110 | 145 62 41 19 111 | 147 62 42 19 112 | 148 62 42 20 113 | 149 63 43 19 114 | 150 63 44 19 115 | 154 64 42 17 116 | 154 62 44 19 117 | 157 62 43 19 118 | 155 63 47 18 119 | 158 63 46 19 120 | 160 63 46 19 121 | 161 63 48 19 122 | 163 63 48 19 123 | 164 62 50 21 124 | 166 62 49 21 125 | 169 62 49 21 126 | 171 61 50 21 127 | 171 63 52 19 128 | 174 63 52 20 129 | 175 63 54 19 130 | 177 63 54 19 131 | 181 62 53 22 132 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/gt_color_website/car_gt.txt: -------------------------------------------------------------------------------- 1 | 213 80 33 16 2 | 211 79 35 17 3 | 211 81 33 15 4 | 207 79 35 16 5 | 208 81 33 14 6 | 205 80 35 16 7 | 204 81 32 14 8 | 202 81 33 13 9 | 200 82 34 13 10 | 199 81 35 15 11 | 196 81 36 13 12 | 195 81 33 15 13 | 193 82 35 14 14 | 191 81 37 15 15 | 191 82 34 13 16 | 189 82 36 14 17 | 186 83 37 13 18 | 186 84 33 12 19 | 183 82 34 14 20 | 181 81 37 15 21 | 180 83 34 13 22 | 179 82 35 14 23 | 176 82 36 15 24 | 173 84 37 13 25 | 172 83 38 13 26 | 171 85 37 12 27 | 169 85 37 13 28 | 166 83 39 15 29 | 166 84 37 13 30 | 164 84 38 14 31 | 164 84 35 14 32 | 160 85 37 14 33 | 159 85 36 13 34 | 157 86 40 13 35 | 154 86 40 12 36 | 153 86 39 13 37 | 152 84 37 15 38 | 148 86 39 13 39 | 148 87 38 12 40 | 145 87 38 12 41 | 143 86 41 14 42 | 140 85 43 15 43 | 140 88 36 13 44 | 137 88 38 14 45 | 135 87 38 14 46 | 134 88 38 14 47 | 129 88 42 13 48 | 128 86 40 16 49 | 128 87 38 16 50 | 126 89 38 13 51 | 124 85 37 18 52 | 120 88 41 15 53 | 118 88 43 15 54 | 117 88 42 15 55 | 115 88 42 15 56 | 112 90 45 13 57 | 109 89 44 15 58 | 108 90 41 14 59 | 106 90 40 16 60 | 102 88 42 17 61 | 103 89 41 16 62 | 100 90 43 15 63 | 97 92 43 14 64 | 97 92 40 14 65 | 93 90 43 17 66 | 92 91 42 16 67 | 89 92 42 16 68 | 86 92 43 16 69 | 84 92 45 16 70 | 81 94 46 14 71 | 81 93 43 16 72 | 79 94 45 15 73 | 74 95 47 13 74 | 73 94 46 15 75 | 69 95 47 16 76 | 67 96 50 14 77 | 63 95 51 14 78 | 61 95 48 15 79 | 59 94 49 17 80 | 55 95 51 17 81 | 54 94 49 16 82 | 52 96 49 15 83 | 51 97 49 14 84 | 48 96 48 17 85 | 45 96 50 17 86 | 43 98 50 15 87 | 40 96 49 17 88 | 35 96 52 18 89 | 33 98 54 15 90 | 30 97 54 16 91 | 29 100 52 15 92 | 27 100 51 14 93 | 24 99 51 16 94 | 21 101 54 13 95 | 17 101 53 14 96 | 18 100 50 16 97 | 15 99 52 17 98 | 11 99 53 18 99 | 7 100 55 17 100 | 4 100 54 19 101 | 3 101 53 17 102 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/gt_color_website/coin_gt.txt: -------------------------------------------------------------------------------- 1 | 95 73 17 19 2 | 95 73 17 19 3 | 95 73 17 19 4 | 95 73 17 19 5 | 95 73 17 19 6 | 95 73 17 19 7 | 95 73 17 19 8 | 95 73 17 19 9 | 95 73 17 19 10 | 95 73 17 19 11 | 95 73 17 19 12 | 95 73 17 19 13 | 95 73 17 19 14 | 95 73 17 19 15 | 95 73 17 19 16 | 95 73 17 19 17 | 95 73 17 19 18 | 95 73 17 19 19 | 95 73 17 19 20 | 95 74 17 18 21 | 95 74 17 18 22 | 95 76 17 18 23 | 95 75 17 18 24 | 95 75 17 18 25 | 95 77 17 18 26 | 95 76 17 18 27 | 95 76 17 18 28 | 95 77 17 18 29 | 95 76 17 18 30 | 92 77 17 18 31 | 91 78 17 18 32 | 90 78 17 18 33 | 88 80 17 18 34 | 85 82 17 18 35 | 83 84 17 18 36 | 80 85 17 18 37 | 79 86 17 18 38 | 77 87 17 18 39 | 74 87 17 18 40 | 72 86 17 18 41 | 71 86 17 18 42 | 70 87 17 18 43 | 67 88 17 18 44 | 73 87 17 18 45 | 76 87 17 18 46 | 77 87 17 18 47 | 79 87 17 18 48 | 82 86 17 18 49 | 85 84 17 18 50 | 86 83 17 18 51 | 87 82 17 18 52 | 88 81 17 18 53 | 90 81 17 18 54 | 91 81 17 18 55 | 93 81 17 18 56 | 94 82 17 18 57 | 94 84 17 18 58 | 93 85 17 18 59 | 92 87 17 18 60 | 92 89 17 18 61 | 93 90 17 18 62 | 93 91 17 18 63 | 93 91 17 18 64 | 95 91 17 18 65 | 97 91 17 18 66 | 100 91 17 18 67 | 101 91 17 18 68 | 103 91 17 18 69 | 102 91 17 18 70 | 103 91 17 18 71 | 103 91 17 18 72 | 103 91 17 18 73 | 103 91 17 18 74 | 103 91 17 18 75 | 103 90 17 18 76 | 103 90 17 18 77 | 102 89 17 18 78 | 102 88 17 18 79 | 100 87 17 18 80 | 97 85 17 18 81 | 96 84 17 18 82 | 93 84 17 18 83 | 91 84 17 18 84 | 89 84 17 18 85 | 87 84 17 18 86 | 87 82 17 18 87 | 86 82 17 18 88 | 86 82 17 18 89 | 86 81 17 18 90 | 87 81 17 18 91 | 87 81 17 18 92 | 86 80 17 18 93 | 86 82 17 18 94 | 87 81 17 18 95 | 87 81 17 18 96 | 88 81 17 18 97 | 89 81 17 18 98 | 91 81 17 18 99 | 91 81 17 18 100 | 92 82 17 18 101 | 92 84 17 18 102 | 94 84 17 18 103 | 96 83 17 18 104 | 96 83 17 18 105 | 97 83 17 18 106 | 99 84 17 18 107 | 101 84 17 18 108 | 103 84 17 18 109 | 104 84 17 18 110 | 106 84 17 18 111 | 108 84 17 18 112 | 109 84 17 18 113 | 112 84 17 18 114 | 112 84 17 18 115 | 113 84 17 18 116 | 114 84 17 18 117 | 114 84 17 18 118 | 116 85 17 18 119 | 117 85 17 18 120 | 119 85 17 18 121 | 120 86 17 18 122 | 121 86 17 18 123 | 122 86 17 18 124 | 124 86 17 18 125 | 124 86 17 18 126 | 126 86 17 18 127 | 128 86 17 18 128 | 131 86 17 18 129 | 133 86 17 18 130 | 134 87 17 18 131 | 135 87 17 18 132 | 135 85 17 18 133 | 135 85 17 18 134 | 135 85 17 18 135 | 134 85 17 18 136 | 134 85 17 18 137 | 134 85 17 18 138 | 132 86 17 18 139 | 132 86 17 18 140 | 132 86 17 18 141 | 131 86 17 18 142 | 131 86 17 18 143 | 131 86 17 18 144 | 131 86 17 18 145 | 130 86 17 18 146 | 130 86 17 18 147 | 130 86 17 18 148 | 128 87 17 18 149 | 128 88 17 18 150 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/gt_color_website/kangaroo_gt.txt: -------------------------------------------------------------------------------- 1 | 336 110 28 51 2 | 335 106 29 53 3 | 333 106 32 52 4 | 333 104 29 52 5 | 331 103 26 51 6 | 328 103 30 49 7 | 327 103 31 50 8 | 325 106 31 47 9 | 325 105 30 51 10 | 322 107 32 48 11 | 319 109 31 52 12 | 319 113 33 46 13 | 316 114 34 47 14 | 315 113 31 47 15 | 312 112 30 48 16 | 307 108 34 50 17 | 305 108 33 49 18 | 302 108 34 47 19 | 300 108 34 46 20 | 295 109 35 44 21 | 293 109 36 44 22 | 291 111 35 46 23 | 288 114 34 43 24 | 287 116 32 44 25 | 282 116 32 43 26 | 280 117 34 43 27 | 276 114 31 43 28 | 271 112 32 44 29 | 269 111 31 45 30 | 266 112 34 41 31 | 262 111 32 38 32 | 259 110 34 38 33 | 256 112 34 37 34 | 252 114 36 38 35 | 249 115 34 38 36 | 245 117 32 36 37 | 241 118 30 37 38 | 238 117 31 37 39 | 234 115 34 41 40 | 231 113 32 41 41 | 229 110 32 43 42 | 224 112 31 37 43 | 221 113 32 38 44 | 218 111 32 38 45 | 214 114 34 36 46 | 211 114 32 35 47 | 208 114 32 36 48 | 203 116 34 34 49 | 199 115 34 36 50 | 197 116 31 37 51 | 194 115 32 39 52 | 191 113 30 40 53 | 188 111 30 41 54 | 184 109 28 40 55 | 180 108 30 41 56 | 179 108 30 40 57 | 175 107 32 41 58 | 172 110 32 39 59 | 169 111 30 37 60 | 166 114 32 35 61 | 162 115 28 35 62 | 160 115 27 34 63 | 155 116 30 34 64 | 154 117 27 33 65 | 150 116 29 38 66 | 148 113 26 37 67 | 143 110 28 39 68 | 141 111 27 37 69 | 136 110 30 37 70 | 132 108 32 38 71 | 131 110 29 36 72 | 128 110 29 34 73 | 125 111 30 35 74 | 124 113 27 35 75 | 120 116 28 32 76 | 118 117 26 31 77 | 116 115 26 32 78 | 111 114 29 36 79 | 108 112 28 35 80 | 105 110 26 36 81 | 101 109 29 36 82 | 102 110 27 34 83 | 97 110 28 33 84 | 96 110 26 34 85 | 92 112 27 31 86 | 89 113 27 29 87 | 86 114 27 31 88 | 81 115 27 30 89 | 81 118 26 28 90 | 78 117 25 30 91 | 76 118 25 28 92 | 72 115 26 30 93 | 70 114 24 31 94 | 66 112 26 34 95 | 61 110 27 35 96 | 61 111 27 31 97 | 59 113 28 28 98 | 53 115 33 28 99 | 51 115 30 28 100 | 50 113 30 30 101 | 50 115 24 30 102 | 45 117 28 30 103 | 40 117 31 29 104 | 41 115 27 31 105 | 39 114 29 29 106 | 36 114 27 34 107 | 32 112 28 34 108 | 31 110 25 32 109 | 28 112 29 31 110 | 24 111 27 28 111 | 24 110 26 29 112 | 22 111 24 31 113 | 19 113 25 30 114 | 14 114 26 27 115 | 9 117 29 26 116 | 6 118 31 26 117 | 2 117 34 28 118 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/load_video_info.m: -------------------------------------------------------------------------------- 1 | function [seq, ground_truth] = load_video_info(video_name) 2 | 3 | ground_truth = dlmread(['./gt_color_website/' video_name '_gt.txt']); 4 | 5 | seq.len = size(ground_truth, 1); 6 | seq.init_rect = ground_truth(1,:); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/mergeMulImageTo3DImahe.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hscv/SEE-Net/00d8e798b13e88549af526793e9d6955b4c295af/plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/mergeMulImageTo3DImahe.m -------------------------------------------------------------------------------- /plot-tools/test-AUC-DP-on-color-videos/testSingleAUCAndDP/readme.txt: -------------------------------------------------------------------------------- 1 | run Li_res.m --------------------------------------------------------------------------------