├── LICENSE
├── README.md
├── assets
└── pipeline.png
├── install.sh
├── ltr
├── __init__.py
├── __pycache__
│ └── __init__.cpython-37.pyc
├── actors
│ ├── SBDT.py
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── SBDTv2.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ ├── base_actor.cpython-37.pyc
│ │ ├── bbreg.cpython-37.pyc
│ │ └── sbdt.cpython-37.pyc
│ ├── base_actor.py
│ └── bbreg.py
├── admin
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── environment.cpython-37.pyc
│ │ ├── loading.cpython-37.pyc
│ │ ├── local.cpython-37.pyc
│ │ ├── model_constructor.cpython-37.pyc
│ │ ├── settings.cpython-37.pyc
│ │ ├── stats.cpython-37.pyc
│ │ └── tensorboard.cpython-37.pyc
│ ├── environment.py
│ ├── loading.py
│ ├── local.py
│ ├── model_constructor.py
│ ├── settings.py
│ ├── stats.py
│ └── tensorboard.py
├── data
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── image_loader.cpython-37.pyc
│ │ ├── loader.cpython-37.pyc
│ │ ├── processing.cpython-37.pyc
│ │ ├── processing_utils.cpython-37.pyc
│ │ ├── sampler.cpython-37.pyc
│ │ └── transforms.cpython-37.pyc
│ ├── image_loader.py
│ ├── loader.py
│ ├── processing.py
│ ├── processing_utils.py
│ ├── sampler.py
│ └── transforms.py
├── data_specs
│ ├── got10k_train_split.txt
│ ├── got10k_val_split.txt
│ ├── got10k_vot_exclude.txt
│ ├── got10k_vot_train_split.txt
│ ├── got10k_vot_val_split.txt
│ ├── lasot_train_split-official.txt
│ └── lasot_train_split.txt
├── dataset
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── base_dataset.cpython-37.pyc
│ │ ├── coco_seq.cpython-37.pyc
│ │ ├── got10k.cpython-37.pyc
│ │ ├── imagenetvid.cpython-37.pyc
│ │ ├── lasot.cpython-37.pyc
│ │ └── tracking_net.cpython-37.pyc
│ ├── base_dataset.py
│ ├── coco_seq.py
│ ├── got10k.py
│ ├── imagenetvid.py
│ ├── lasot.py
│ └── tracking_net.py
├── external
│ └── PreciseRoIPooling
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── _assets
│ │ └── prroi_visualization.png
│ │ ├── pytorch
│ │ ├── prroi_pool
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── functional.cpython-37.pyc
│ │ │ │ └── prroi_pool.cpython-37.pyc
│ │ │ ├── _prroi_pooling
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __prroi_pooling.so
│ │ │ │ └── __pycache__
│ │ │ │ │ └── __init__.cpython-37.pyc
│ │ │ ├── build.py
│ │ │ ├── functional.py
│ │ │ ├── prroi_pool.py
│ │ │ ├── src
│ │ │ │ ├── prroi_pooling_gpu.c
│ │ │ │ ├── prroi_pooling_gpu.h
│ │ │ │ ├── prroi_pooling_gpu_impl.cu
│ │ │ │ ├── prroi_pooling_gpu_impl.cu.o
│ │ │ │ └── prroi_pooling_gpu_impl.cuh
│ │ │ └── travis.sh
│ │ └── tests
│ │ │ └── test_prroi_pooling2d.py
│ │ └── src
│ │ ├── prroi_pooling_gpu_impl.cu
│ │ └── prroi_pooling_gpu_impl.cuh
├── models
│ ├── SBDT
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── network.cpython-37.pyc
│ │ └── network.py
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-37.pyc
│ ├── backbone
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── resnet.cpython-37.pyc
│ │ │ └── resnet18_vggm.cpython-37.pyc
│ │ ├── resnet.py
│ │ └── resnet18_vggm.py
│ ├── bbreg
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── atom.cpython-37.pyc
│ │ │ └── atom_iou_net.cpython-37.pyc
│ │ ├── atom.py
│ │ └── atom_iou_net.py
│ ├── layers
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── blocks.cpython-37.pyc
│ │ └── blocks.py
│ └── locator
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ ├── onlineRR.cpython-37.pyc
│ │ ├── onlineRR18.cpython-37.pyc
│ │ ├── onlineRR50.cpython-37.pyc
│ │ └── onlineRR_net.cpython-37.pyc
│ │ ├── onlineRR18.py
│ │ └── onlineRR50.py
├── run_training.py
├── train_settings
│ ├── SBDT
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── atom_default.cpython-37.pyc
│ │ │ ├── default-res50.cpython-37.pyc
│ │ │ └── default.cpython-37.pyc
│ │ ├── default-res18.py
│ │ └── default-res50.py
│ ├── __init__.py
│ └── __pycache__
│ │ └── __init__.cpython-37.pyc
└── trainers
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── base_trainer.cpython-37.pyc
│ └── ltr_trainer.cpython-37.pyc
│ ├── base_trainer.py
│ └── ltr_trainer.py
└── pytracking
├── __init__.py
├── __pycache__
├── __init__.cpython-37.pyc
├── run_tracker.cpython-37.pyc
└── run_webcam.cpython-37.pyc
├── evaluation
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── data.cpython-37.pyc
│ ├── environment.cpython-37.pyc
│ ├── got10kdataset.cpython-37.pyc
│ ├── lasotdataset.cpython-37.pyc
│ ├── local.cpython-37.pyc
│ ├── nfsdataset.cpython-37.pyc
│ ├── nfstunedataset.cpython-37.pyc
│ ├── otbdataset.cpython-37.pyc
│ ├── otbtunedataset.cpython-37.pyc
│ ├── running.cpython-37.pyc
│ ├── tpldataset.cpython-37.pyc
│ ├── tracker.cpython-37.pyc
│ ├── trackingnetdataset.cpython-37.pyc
│ ├── trackingnettunedataset.cpython-37.pyc
│ ├── uavdataset.cpython-37.pyc
│ ├── vot18dataset.cpython-37.pyc
│ ├── vot19dataset.cpython-37.pyc
│ └── votdataset.cpython-37.pyc
├── data.py
├── environment.py
├── got10kdataset.py
├── lasotdataset.py
├── local.py
├── nfsdataset.py
├── otbdataset.py
├── running.py
├── tpldataset.py
├── tracker.py
├── trackingnetdataset.py
├── uavdataset.py
├── vot18dataset.py
└── vot19dataset.py
├── features
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── augmentation.cpython-37.pyc
│ ├── deep.cpython-37.pyc
│ ├── extractor.cpython-37.pyc
│ ├── featurebase.cpython-37.pyc
│ └── preprocessing.cpython-37.pyc
├── augmentation.py
├── color.py
├── deep.py
├── extractor.py
├── featurebase.py
├── preprocessing.py
└── util.py
├── libs
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── complex.cpython-37.pyc
│ ├── dcf.cpython-37.pyc
│ ├── fourier.cpython-37.pyc
│ ├── operation.cpython-37.pyc
│ ├── optimization.cpython-37.pyc
│ ├── tensordict.cpython-37.pyc
│ └── tensorlist.cpython-37.pyc
├── complex.py
├── dcf.py
├── fourier.py
├── operation.py
├── optimization.py
├── tensordict.py
└── tensorlist.py
├── networks
└── network.txt
├── parameter
├── __init__.py
├── __pycache__
│ └── __init__.cpython-37.pyc
├── sbdt
│ ├── GOT10k.py
│ ├── NfS.py
│ ├── OTB2015.py
│ ├── TrackingNet.py
│ ├── VOT19.py
│ ├── __init__.py
│ └── __pycache__
│ │ ├── GOT10k.cpython-37.pyc
│ │ ├── NfS.cpython-37.pyc
│ │ ├── OTB2015.cpython-37.pyc
│ │ ├── TrackingNet.cpython-37.pyc
│ │ ├── VOT19.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ ├── default.cpython-37.pyc
│ │ ├── default2.cpython-37.pyc
│ │ ├── defaultGOT.cpython-37.pyc
│ │ ├── defaultNFS.cpython-37.pyc
│ │ ├── defaultNFS2.cpython-37.pyc
│ │ └── norm_default2.cpython-37.pyc
└── sbdt50
│ ├── GOT10k.py
│ ├── TrackingNet.py
│ ├── VOT18.py
│ ├── __init__.py
│ └── __pycache__
│ ├── GOT10k.cpython-37.pyc
│ ├── TrackingNet.cpython-37.pyc
│ ├── VOT18.cpython-37.pyc
│ ├── __init__.cpython-37.pyc
│ ├── default.cpython-37.pyc
│ ├── default2.cpython-37.pyc
│ ├── defaultGOT.cpython-37.pyc
│ ├── defaultNFS.cpython-37.pyc
│ ├── defaultNFS2.cpython-37.pyc
│ ├── defaultTrackingNet.cpython-37.pyc
│ └── norm_default2.cpython-37.pyc
├── run_tracker.py
├── run_webcam.py
├── tracker
├── __init__.py
├── base
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── basetracker.cpython-37.pyc
│ └── basetracker.py
├── sbdt
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── sbdt.cpython-37.pyc
│ └── sbdt.py
└── sbdt50
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-37.pyc
│ └── sbdt.cpython-37.pyc
│ └── sbdt.py
├── tracking_results
└── save.txt
└── utils
├── __init__.py
├── __pycache__
├── __init__.cpython-37.pyc
├── params.cpython-37.pyc
└── plotting.cpython-37.pyc
├── gdrive_download
├── params.py
└── plotting.py
/assets/pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/assets/pipeline.png
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$#" -ne 2 ]; then
4 | echo "ERROR! Illegal number of parameters. Usage: bash install.sh conda_install_path environment_name"
5 | exit 0
6 | fi
7 |
8 | conda_install_path=$1
9 | conda_env_name=$2
10 |
11 | source $conda_install_path/etc/profile.d/conda.sh
12 | echo "****************** Creating conda environment ${conda_env_name} python=3.7 ******************"
13 | conda create -y --name $conda_env_name
14 |
15 | echo ""
16 | echo ""
17 | echo "****************** Activating conda environment ${conda_env_name} ******************"
18 | conda activate $conda_env_name
19 |
20 | echo ""
21 | echo ""
22 | echo "****************** Installing python and numpy ******************"
23 | conda install -y python=3.7.3
24 | conda install -y numpy=1.16.3
25 | conda install -y numpy-base=1.16.3
26 |
27 | echo ""
28 | echo ""
29 | echo "****************** Installing pytorch 0.4.1 with cuda80 ******************"
30 | conda install -y pytorch=0.4.1 torchvision=0.2.1 cuda80 -c pytorch
31 |
32 | echo ""
33 | echo ""
34 | echo "****************** Installing matplotlib 3.1.3 ******************"
35 | conda install -y matplotlib=3.1.3
36 |
37 | echo ""
38 | echo ""
39 | echo "****************** Installing pandas ******************"
40 | conda install -y pandas=1.0.3
41 |
42 | echo ""
43 | echo ""
44 | echo "****************** Installing opencv ******************"
45 | pip install opencv-python==4.0.1.24
46 |
47 | echo ""
48 | echo ""
49 | echo "****************** Installing tensorboardX ******************"
50 | pip install tensorboard
51 | pip install tensorboardX
52 |
53 | echo ""
54 | echo ""
55 | echo "****************** Installing nltk ******************"
56 | pip install nltk==3.4.1
57 |
58 | echo ""
59 | echo ""
60 | echo "****************** Installing cython ******************"
61 | conda install -y cython=0.29.15
62 |
63 | echo ""
64 | echo ""
65 | echo "****************** Installing coco toolkit ******************"
66 | pip install pycocotools
67 |
68 | echo ""
69 | echo ""
70 | echo "****************** Installing jpeg4py python wrapper ******************"
71 | pip install jpeg4py
72 |
73 |
74 | echo ""
75 | echo ""
76 | echo "****************** Installing Shapely for VOT challenge ******************"
77 | pip install Shapely==1.6.4.post2
78 |
79 | echo ""
80 | echo ""
81 | echo "****************** Compile RoI ******************"
82 | cd ltr/external/PreciseRoIPooling/pytorch/prroi_pool
83 | bash travis.sh
84 | cd ../../../../
85 |
--------------------------------------------------------------------------------
/ltr/__init__.py:
--------------------------------------------------------------------------------
1 | from .admin.loading import load_network
2 | from .admin.model_constructor import model_constructor
--------------------------------------------------------------------------------
/ltr/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/SBDT.py:
--------------------------------------------------------------------------------
1 | from . import BaseActor
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | class SBDTActor(BaseActor):
7 | """ Actor for training the SBDT"""
8 | def __call__(self, data):
9 | """
10 | args:
11 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_scale_proposals',
12 | 'proposal_iou', 'train_locator_proposals', 'train_locator_labels', 'test_locator_proposals', 'test_locator_labels'.
13 |
14 | returns:
15 | loss - the training loss
16 | states - dict containing detailed losses
17 | """
18 | # Run network to obtain IoU prediction for each proposal in 'test_scale_proposals' and regression values for each proposal in 'test_locator_proposals'
19 | iou_pred, locator_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_scale_proposals'],\
20 | data['train_locator_proposals'], data['train_locator_labels'], data['test_locator_proposals'])
21 |
22 | iou_pred = iou_pred.squeeze()
23 | locator_pred = locator_pred.squeeze()
24 | data['proposal_iou'] = data['proposal_iou'].squeeze()
25 | data['test_locator_labels'] = data['test_locator_labels'].squeeze()
26 |
27 | # L2 loss for IoU regression in ATOM
28 | iou_gt = data['proposal_iou']
29 | iou_loss = nn.MSELoss()(iou_pred, iou_gt)
30 |
31 | # Shrinkage loss for locator regression
32 | locator_gt = data['test_locator_labels']
33 | locator_pred = locator_pred.clamp(0-0.5,1+0.5)
34 | locator_absolute_error = (locator_pred - locator_gt).abs()
35 | locator_loss = torch.exp(locator_gt) * locator_absolute_error.pow(2) / (1 + torch.exp(10*(0.2-locator_absolute_error)))
36 | locator_loss = locator_loss.mean()
37 |
38 | # Return training stats
39 | loss = iou_loss + locator_loss
40 | stats = {'Loss/total': loss.item(),
41 | 'Loss/iou': iou_loss.item(),
42 | 'Loss/locator': locator_loss.item()}
43 |
44 | return loss, stats
--------------------------------------------------------------------------------
/ltr/actors/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_actor import BaseActor
2 | from .bbreg import AtomActor
3 | from .SBDT import SBDTActor
4 |
--------------------------------------------------------------------------------
/ltr/actors/__pycache__/SBDTv2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/actors/__pycache__/SBDTv2.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/actors/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/__pycache__/base_actor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/actors/__pycache__/base_actor.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/__pycache__/bbreg.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/actors/__pycache__/bbreg.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/__pycache__/sbdt.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/actors/__pycache__/sbdt.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/actors/base_actor.py:
--------------------------------------------------------------------------------
1 | from pytracking import TensorDict
2 |
3 |
4 | class BaseActor:
5 | """ Base class for actor. The actor class handles the passing of the data through the network
6 | and calculation the loss"""
7 | def __init__(self, net, objective=None):
8 | """
9 | args:
10 | net - The network to train
11 | objective - The loss function
12 | """
13 | self.net = net
14 | self.objective = objective
15 |
16 | def __call__(self, data: TensorDict):
17 | """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and
18 | return the training stats for the input data
19 | args:
20 | data - A TensorDict containing all the necessary data blocks.
21 |
22 | returns:
23 | loss - loss for the input data
24 | stats - a dict containing detailed losses
25 | """
26 | raise NotImplementedError
27 |
28 | def to(self, device):
29 | """ Move the network to device
30 | args:
31 | device - device to use. 'cpu' or 'cuda'
32 | """
33 | self.net.to(device)
34 |
35 | def train(self, mode=True):
36 | """ Set whether the network is in train mode.
37 | args:
38 | mode (True) - Bool specifying whether in training mode.
39 | """
40 | self.net.train(mode)
41 |
42 | def eval(self):
43 | """ Set network to eval mode"""
44 | self.train(False)
--------------------------------------------------------------------------------
/ltr/actors/bbreg.py:
--------------------------------------------------------------------------------
1 | from . import BaseActor
2 |
3 |
4 | class AtomActor(BaseActor):
5 | """ Actor for training the IoU-Net in ATOM"""
6 | def __call__(self, data):
7 | """
8 | args:
9 | data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',
10 | 'test_proposals' and 'proposal_iou'.
11 |
12 | returns:
13 | loss - the training loss
14 | states - dict containing detailed losses
15 | """
16 | # Run network to obtain IoU prediction for each proposal in 'test_proposals'
17 | iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals'])
18 |
19 | iou_pred = iou_pred.view(-1, iou_pred.shape[2])
20 | iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2])
21 |
22 | # hard mining
23 | batch_size, sample_num = iou_gt.shape[0], iou_gt.shape[1]
24 | hard_num = int(0.5 * sample_num)
25 | total_loss = (iou_pred - iou_gt).pow(2).squeeze()
26 | loss = 0.0
27 | for i in range(batch_size):
28 | _, pred = total_loss[i].topk(hard_num, 0, True, True)
29 | loss += total_loss[i].index_select(0, pred).mean()
30 | loss = loss / batch_size
31 |
32 | # Compute loss
33 | #loss = self.objective(iou_pred, iou_gt)
34 |
35 | # Return training stats
36 | stats = {'Loss/total': loss.item(),
37 | 'Loss/iou': loss.item()}
38 |
39 | return loss, stats
--------------------------------------------------------------------------------
/ltr/admin/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/environment.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/environment.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/loading.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/loading.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/local.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/local.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/model_constructor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/model_constructor.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/settings.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/settings.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/stats.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/stats.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/__pycache__/tensorboard.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/admin/__pycache__/tensorboard.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/admin/environment.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 | from collections import OrderedDict
4 |
5 |
6 | def create_default_local_file():
7 | path = os.path.join(os.path.dirname(__file__), 'local.py')
8 |
9 | empty_str = '\'\''
10 | default_settings = OrderedDict({
11 | 'workspace_dir': empty_str,
12 | 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'',
13 | 'lasot_dir': empty_str,
14 | 'got10k_dir': empty_str,
15 | 'trackingnet_dir': empty_str,
16 | 'coco_dir': empty_str,
17 | 'imagenet_dir': empty_str,
18 | 'imagenetdet_dir': empty_str})
19 |
20 | comment = {'workspace_dir': 'Base directory for saving network checkpoints.',
21 | 'tensorboard_dir': 'Directory for tensorboard files.'}
22 |
23 | with open(path, 'w') as f:
24 | f.write('class EnvironmentSettings:\n')
25 | f.write(' def __init__(self):\n')
26 |
27 | for attr, attr_val in default_settings.items():
28 | comment_str = None
29 | if attr in comment:
30 | comment_str = comment[attr]
31 | if comment_str is None:
32 | f.write(' self.{} = {}\n'.format(attr, attr_val))
33 | else:
34 | f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str))
35 |
36 |
37 | def env_settings():
38 | env_module_name = 'ltr.admin.local'
39 | try:
40 | env_module = importlib.import_module(env_module_name)
41 | return env_module.EnvironmentSettings()
42 | except:
43 | env_file = os.path.join(os.path.dirname(__file__), 'local.py')
44 |
45 | create_default_local_file()
46 | raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file))
47 |
--------------------------------------------------------------------------------
/ltr/admin/loading.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | import sys
4 | from pathlib import Path
5 | import importlib
6 |
7 |
8 | def load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs):
9 | """Loads a network checkpoint file.
10 |
11 | Can be called in two different ways:
12 | load_checkpoint(network_dir):
13 | Loads the checkpoint file given by the path. I checkpoint_dir is a directory,
14 | it tries to find the latest checkpoint in that directory.
15 | load_checkpoint(network_dir, checkpoint=epoch_num):
16 | Loads the network at the given epoch number (int).
17 |
18 | The extra keyword arguments are supplied to the network constructor to replace saved ones.
19 | """
20 |
21 |
22 | if network_dir is not None:
23 | net_path = Path(network_dir)
24 | else:
25 | net_path = None
26 |
27 | if net_path.is_file():
28 | checkpoint = str(net_path)
29 |
30 | if checkpoint is None:
31 | # Load most recent checkpoint
32 | checkpoint_list = sorted(net_path.glob('*.pth.tar'))
33 | if checkpoint_list:
34 | checkpoint_path = checkpoint_list[-1]
35 | else:
36 | raise Exception('No matching checkpoint file found')
37 | elif isinstance(checkpoint, int):
38 | # Checkpoint is the epoch number
39 | checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint)))
40 | if not checkpoint_list or len(checkpoint_list) == 0:
41 | raise Exception('No matching checkpoint file found')
42 | if len(checkpoint_list) > 1:
43 | raise Exception('Multiple matching checkpoint files found')
44 | else:
45 | checkpoint_path = checkpoint_list[0]
46 | elif isinstance(checkpoint, str):
47 | # checkpoint is the path
48 | checkpoint_path = os.path.expanduser(checkpoint)
49 | else:
50 | raise TypeError
51 |
52 | # Load network
53 | checkpoint_dict = torch_load_legacy(checkpoint_path)
54 |
55 | # Construct network model
56 | if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:
57 | net_constr = checkpoint_dict['constructor']
58 | if constructor_fun_name is not None:
59 | net_constr.fun_name = constructor_fun_name
60 | if constructor_module is not None:
61 | net_constr.fun_module = constructor_module
62 | for arg, val in kwargs.items():
63 | if arg in net_constr.kwds.keys():
64 | net_constr.kwds[arg] = val
65 | else:
66 | print('WARNING: Keyword argument "{}" not found when loading network.'.format(arg))
67 | # Legacy networks before refactoring
68 | if net_constr.fun_module.startswith('dlframework.'):
69 | net_constr.fun_module = net_constr.fun_module[len('dlframework.'):]
70 | net = net_constr.get()
71 | else:
72 | raise RuntimeError('No constructor for the given network.')
73 |
74 | net.load_state_dict(checkpoint_dict['net'])
75 |
76 | net.constructor = checkpoint_dict['constructor']
77 | if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:
78 | net.info = checkpoint_dict['net_info']
79 |
80 | return net, checkpoint_dict
81 |
82 |
83 | def load_weights(net, path, strict=True):
84 | checkpoint_dict = torch.load(path)
85 | weight_dict = checkpoint_dict['net']
86 | net.load_state_dict(weight_dict, strict=strict)
87 | return net
88 |
89 |
90 | def torch_load_legacy(path):
91 | """Load network with legacy environment."""
92 |
93 | # Setup legacy env (for older networks)
94 | _setup_legacy_env()
95 |
96 | # Load network
97 | checkpoint_dict = torch.load(path)
98 |
99 | # Cleanup legacy
100 | _cleanup_legacy_env()
101 |
102 | return checkpoint_dict
103 |
104 |
105 | def _setup_legacy_env():
106 | importlib.import_module('ltr')
107 | sys.modules['dlframework'] = sys.modules['ltr']
108 | sys.modules['dlframework.common'] = sys.modules['ltr']
109 | for m in ('model_constructor', 'stats', 'settings', 'local'):
110 | importlib.import_module('ltr.admin.'+m)
111 | sys.modules['dlframework.common.utils.'+m] = sys.modules['ltr.admin.'+m]
112 |
113 |
114 | def _cleanup_legacy_env():
115 | del_modules = []
116 | for m in sys.modules.keys():
117 | if m.startswith('dlframework'):
118 | del_modules.append(m)
119 | for m in del_modules:
120 | del sys.modules[m]
121 |
--------------------------------------------------------------------------------
/ltr/admin/local.py:
--------------------------------------------------------------------------------
1 | class EnvironmentSettings:
2 | def __init__(self):
3 | self.workspace_dir = '/home/zhenglinyu2/SBDT/model/' # Base directory for saving network checkpoints.
4 | self.tensorboard_dir = self.workspace_dir + '/tensorboard/' # Directory for tensorboard files.
5 | self.lasot_dir = '/data/zhenglinyu/LaSOT/source/LaSOTBenchmark/'
6 | self.got10k_dir = '/data/zhenglinyu/GOT/source/data/'
7 | self.trackingnet_dir = '/data/zhenglinyu/TrackingNet/source/'
8 | self.coco_dir = ''
9 | self.imagenet_dir = ''
10 | self.imagenetdet_dir = ''
11 |
--------------------------------------------------------------------------------
/ltr/admin/model_constructor.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | import importlib
3 |
4 |
5 | def model_constructor(f):
6 | """ Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned
7 | by 'f'. This field contains an instance of the 'NetConstructor' class, which contains the information needed to
8 | re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can
9 | be easily constructed from a saved checkpoint by calling NetConstructor.get() function.
10 | """
11 | @wraps(f)
12 | def f_wrapper(*args, **kwds):
13 | net_constr = NetConstructor(f.__name__, f.__module__, args, kwds)
14 | output = f(*args, **kwds)
15 | if isinstance(output, (tuple, list)):
16 | # Assume first argument is the network
17 | output[0].constructor = net_constr
18 | else:
19 | output.constructor = net_constr
20 | return output
21 | return f_wrapper
22 |
23 |
24 | class NetConstructor:
25 | """ Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module
26 | which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network
27 | function. The class object can then be stored along with the network weights to re-construct the network."""
28 | def __init__(self, fun_name, fun_module, args, kwds):
29 | """
30 | args:
31 | fun_name - The function which returns the network
32 | fun_module - the module which contains the network function
33 | args - arguments which are passed to the network function
34 | kwds - arguments which are passed to the network function
35 | """
36 | self.fun_name = fun_name
37 | self.fun_module = fun_module
38 | self.args = args
39 | self.kwds = kwds
40 |
41 | def get(self):
42 | """ Rebuild the network by calling the network function with the correct arguments. """
43 | net_module = importlib.import_module(self.fun_module)
44 | net_fun = getattr(net_module, self.fun_name)
45 | return net_fun(*self.args, **self.kwds)
46 |
--------------------------------------------------------------------------------
/ltr/admin/settings.py:
--------------------------------------------------------------------------------
1 | from ltr.admin.environment import env_settings
2 |
3 |
4 | class Settings:
5 | """ Training settings, e.g. the paths to datasets and networks."""
6 | def __init__(self):
7 | self.set_default()
8 |
9 | def set_default(self):
10 | self.env = env_settings()
11 | self.use_gpu = True
12 |
13 |
14 |
--------------------------------------------------------------------------------
/ltr/admin/stats.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class StatValue:
4 | def __init__(self):
5 | self.clear()
6 |
7 | def reset(self):
8 | self.val = 0
9 |
10 | def clear(self):
11 | self.reset()
12 | self.history = []
13 |
14 | def update(self, val):
15 | self.val = val
16 | self.history.append(self.val)
17 |
18 |
19 | class AverageMeter(object):
20 | """Computes and stores the average and current value"""
21 | def __init__(self):
22 | self.clear()
23 | self.has_new_data = False
24 |
25 | def reset(self):
26 | self.avg = 0
27 | self.val = 0
28 | self.sum = 0
29 | self.count = 0
30 |
31 | def clear(self):
32 | self.reset()
33 | self.history = []
34 |
35 | def update(self, val, n=1):
36 | self.val = val
37 | self.sum += val * n
38 | self.count += n
39 | self.avg = self.sum / self.count
40 |
41 | def new_epoch(self):
42 | if self.count > 0:
43 | self.history.append(self.avg)
44 | self.reset()
45 | self.has_new_data = True
46 | else:
47 | self.has_new_data = False
48 |
49 |
50 | def topk_accuracy(output, target, topk=(1,)):
51 | """Computes the precision@k for the specified values of k"""
52 | single_input = not isinstance(topk, (tuple, list))
53 | if single_input:
54 | topk = (topk,)
55 |
56 | maxk = max(topk)
57 | batch_size = target.size(0)
58 |
59 | _, pred = output.topk(maxk, 1, True, True)
60 | pred = pred.t()
61 | correct = pred.eq(target.view(1, -1).expand_as(pred))
62 |
63 | res = []
64 | for k in topk:
65 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0]
66 | res.append(correct_k * 100.0 / batch_size)
67 |
68 | if single_input:
69 | return res[0]
70 |
71 | return res
72 |
--------------------------------------------------------------------------------
/ltr/admin/tensorboard.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections import OrderedDict
3 | from tensorboardX import SummaryWriter
4 |
5 |
6 | class TensorboardWriter:
7 | def __init__(self, directory, loader_names):
8 | self.directory = directory
9 | self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names})
10 |
11 | def write_info(self, module_name, script_name, description):
12 | tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))
13 | tb_info_writer.add_text('Modulet_name', module_name)
14 | tb_info_writer.add_text('Script_name', script_name)
15 | tb_info_writer.add_text('Description', description)
16 | tb_info_writer.close()
17 |
18 | def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1):
19 | for loader_name, loader_stats in stats.items():
20 | if loader_stats is None:
21 | continue
22 | for var_name, val in loader_stats.items():
23 | if hasattr(val, 'history') and getattr(val, 'has_new_data', True):
24 | self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch)
--------------------------------------------------------------------------------
/ltr/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .loader import LTRLoader
--------------------------------------------------------------------------------
/ltr/data/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/image_loader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/image_loader.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/loader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/loader.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/processing.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/processing.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/processing_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/processing_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/sampler.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/sampler.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/__pycache__/transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/data/__pycache__/transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/data/image_loader.py:
--------------------------------------------------------------------------------
1 | import jpeg4py
2 | import cv2 as cv
3 |
4 |
5 | def default_image_loader(path):
6 | """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader,
7 | but reverts to the opencv_loader if the former is not available."""
8 | if default_image_loader.use_jpeg4py is None:
9 | # Try using jpeg4py
10 | im = jpeg4py_loader(path)
11 | if im is None:
12 | default_image_loader.use_jpeg4py = False
13 | print('Using opencv_loader instead.')
14 | else:
15 | default_image_loader.use_jpeg4py = True
16 | return im
17 | if default_image_loader.use_jpeg4py:
18 | return jpeg4py_loader(path)
19 | return opencv_loader(path)
20 |
21 | default_image_loader.use_jpeg4py = None
22 |
23 |
24 | def jpeg4py_loader(path):
25 | """ Image reading using jpeg4py (https://github.com/ajkxyz/jpeg4py)"""
26 | try:
27 | return jpeg4py.JPEG(path).decode()
28 | except Exception as e:
29 | print('ERROR: Could not read image "{}"'.format(path))
30 | print(e)
31 | return None
32 |
33 |
34 | def opencv_loader(path):
35 | """ Read image using opencv's imread function and returns it in rgb format"""
36 | try:
37 | im = cv.imread(path, cv.IMREAD_COLOR)
38 | # convert to rgb and return
39 | return cv.cvtColor(im, cv.COLOR_BGR2RGB)
40 | except Exception as e:
41 | print('ERROR: Could not read image "{}"'.format(path))
42 | print(e)
43 | return None
44 |
--------------------------------------------------------------------------------
/ltr/data/transforms.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 | import math
4 | import cv2 as cv
5 | import torch
6 | import torch.nn.functional as F
7 |
8 |
9 | class Transform:
10 | """ Class for applying various image transformations."""
11 | def __call__(self, *args):
12 | rand_params = self.roll()
13 | if rand_params is None:
14 | rand_params = ()
15 | elif not isinstance(rand_params, tuple):
16 | rand_params = (rand_params,)
17 | output = [self.transform(img, *rand_params) for img in args]
18 | if len(output) == 1:
19 | return output[0]
20 | return output
21 |
22 | def roll(self):
23 | return None
24 |
25 | def transform(self, img, *args):
26 | """Must be deterministic"""
27 | raise NotImplementedError
28 |
29 |
30 | class Compose:
31 | """Composes several transforms together.
32 |
33 | Args:
34 | transforms (list of ``Transform`` objects): list of transforms to compose.
35 | """
36 |
37 | def __init__(self, transforms):
38 | self.transforms = transforms
39 |
40 | def __call__(self, *args):
41 | for t in self.transforms:
42 | if not isinstance(args, tuple):
43 | args = (args,)
44 | args = t(*args)
45 | return args
46 |
47 | def __repr__(self):
48 | format_string = self.__class__.__name__ + '('
49 | for t in self.transforms:
50 | format_string += '\n'
51 | format_string += ' {0}'.format(t)
52 | format_string += '\n)'
53 | return format_string
54 |
55 |
56 | class ToTensorAndJitter(Transform):
57 | """ Convert to a Tensor and jitter brightness"""
58 | def __init__(self, brightness_jitter=0.0):
59 | self.brightness_jitter = brightness_jitter
60 |
61 | def roll(self):
62 | return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)
63 |
64 | def transform(self, img, brightness_factor):
65 | # handle numpy array
66 | img = torch.from_numpy(img.transpose((2, 0, 1)))
67 |
68 | # backward compatibility
69 | return img.float().mul(brightness_factor/255.0).clamp(0.0,1.0)
70 |
71 |
72 | class ToGrayscale(Transform):
73 | """Converts image to grayscale with probability"""
74 | def __init__(self, probability = 0.5):
75 | self.probability = probability
76 | self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)
77 |
78 | def roll(self):
79 | return random.random() < self.probability
80 |
81 | def transform(self, img, do_grayscale):
82 | if do_grayscale:
83 | if isinstance(img, torch.Tensor):
84 | raise NotImplementedError('Implement torch variant.')
85 | img_gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
86 | return np.stack([img_gray, img_gray, img_gray], axis=2)
87 | # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)
88 | return img
89 |
90 |
91 | class RandomHorizontalFlip(Transform):
92 | """Horizontally flip the given NumPy Image randomly with a probability p."""
93 | def __init__(self, probability = 0.5):
94 | self.probability = probability
95 |
96 | def roll(self):
97 | return random.random() < self.probability
98 |
99 | def transform(self, img, do_flip):
100 | if do_flip:
101 | if isinstance(img, torch.Tensor):
102 | return img.flip((2,))
103 | return np.fliplr(img).copy()
104 | return img
105 |
106 |
107 | class Blur(Transform):
108 | """ Blur the image by applying a gaussian kernel with given sigma"""
109 | def __init__(self, sigma):
110 | if isinstance(sigma, (float, int)):
111 | sigma = (sigma, sigma)
112 | self.sigma = sigma
113 | self.filter_size = [math.ceil(2*s) for s in self.sigma]
114 | x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]
115 | self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]
116 | self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()
117 | self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()
118 |
119 | def transform(self, img):
120 | if isinstance(img, torch.Tensor):
121 | sz = img.shape[2:]
122 | im1 = F.conv2d(img.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0))
123 | return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1])
124 | else:
125 | raise NotImplementedError
126 |
--------------------------------------------------------------------------------
/ltr/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from .lasot import Lasot
2 | from .got10k import Got10k
3 | from .tracking_net import TrackingNet
4 | from .imagenetvid import ImagenetVID
5 | from .coco_seq import MSCOCOSeq
6 |
7 |
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/base_dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/base_dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/coco_seq.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/coco_seq.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/got10k.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/got10k.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/imagenetvid.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/imagenetvid.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/lasot.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/lasot.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/__pycache__/tracking_net.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/dataset/__pycache__/tracking_net.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/dataset/base_dataset.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data
2 | from ltr.data.image_loader import default_image_loader
3 |
4 |
5 | class BaseDataset(torch.utils.data.Dataset):
6 | """ Base class for datasets """
7 |
8 | def __init__(self, root, image_loader=default_image_loader):
9 | """
10 | args:
11 | root - The root path to the dataset
12 | image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
13 | is used by default.
14 | """
15 | if root == '':
16 | raise Exception('The dataset path is not setup. Check your "ltr/admin/local.py".')
17 | self.root = root
18 | self.image_loader = image_loader
19 |
20 | self.sequence_list = [] # Contains the list of sequences.
21 |
22 | def __len__(self):
23 | """ Returns size of the dataset
24 | returns:
25 | int - number of samples in the dataset
26 | """
27 | return self.get_num_sequences()
28 |
29 | def __getitem__(self, index):
30 | """ Not to be used! Check get_frames() instead.
31 | """
32 | return None
33 |
34 | def is_video_sequence(self):
35 | """ Returns whether the dataset is a video dataset or an image dataset
36 |
37 | returns:
38 | bool - True if a video dataset
39 | """
40 | return True
41 |
42 | def get_name(self):
43 | """ Name of the dataset
44 |
45 | returns:
46 | string - Name of the dataset
47 | """
48 | raise NotImplementedError
49 |
50 | def get_num_sequences(self):
51 | """ Number of sequences in a dataset
52 |
53 | returns:
54 | int - number of sequences in the dataset."""
55 | return len(self.sequence_list)
56 |
57 | def get_sequence_info(self, seq_id):
58 | """ Returns information about a particular sequences,
59 |
60 | args:
61 | seq_id - index of the sequence
62 |
63 | returns:
64 | Tensor - Annotation for the sequence. A 2d tensor of shape (num_frames, 4).
65 | Format [top_left_x, top_left_y, width, height]
66 | Tensor - 1d Tensor specifying whether target is present (=1 )for each frame. shape (num_frames,)
67 | """
68 | raise NotImplementedError
69 |
70 | def get_frames(self, seq_id, frame_ids, anno=None):
71 | """ Get a set of frames from a particular sequence
72 |
73 | args:
74 | seq_id - index of sequence
75 | frame_ids - a list of frame numbers
76 | anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.
77 |
78 | returns:
79 | list - List of frames corresponding to frame_ids
80 | list - List of annotations (tensor of shape (4,)) for each frame
81 | dict - A dict containing meta information about the sequence, e.g. class of the target object.
82 |
83 | """
84 | raise NotImplementedError
85 |
86 |
--------------------------------------------------------------------------------
/ltr/dataset/coco_seq.py:
--------------------------------------------------------------------------------
1 | import os
2 | from .base_dataset import BaseDataset
3 | from ltr.data.image_loader import default_image_loader
4 | import torch
5 | from pycocotools.coco import COCO
6 | from collections import OrderedDict
7 | from ltr.admin.environment import env_settings
8 |
9 |
10 | class MSCOCOSeq(BaseDataset):
11 | """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.
12 |
13 | Publication:
14 | Microsoft COCO: Common Objects in Context.
15 | Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,
16 | Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick
17 | ECCV, 2014
18 | https://arxiv.org/pdf/1405.0312.pdf
19 |
20 | Download the images along with annotations from http://cocodataset.org/#download. The root folder should be
21 | organized as follows.
22 | - coco_root
23 | - annotations
24 | - instances_train2014.json
25 | - images
26 | - train2014
27 |
28 | Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.
29 | """
30 |
31 | def __init__(self, root=None, image_loader=default_image_loader):
32 | root = env_settings().coco_dir if root is None else root
33 | super().__init__(root, image_loader)
34 |
35 | self.img_pth = os.path.join(root, 'train2014/')
36 | self.anno_path = os.path.join(root, 'annotations/instances_train2014.json')
37 |
38 | # Load the COCO set.
39 | self.coco_set = COCO(self.anno_path)
40 |
41 | self.cats = self.coco_set.cats
42 | self.sequence_list = self._get_sequence_list()
43 |
44 | def _get_sequence_list(self):
45 | ann_list = list(self.coco_set.anns.keys())
46 | seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]
47 | seq_list = [a for a in seq_list if self.coco_set.anns[a]['area'] > 100]
48 | seq_list = [a for a in seq_list if (self.coco_set.anns[a]['bbox'][2]/self.coco_set.anns[a]['bbox'][3] < 5 and self.coco_set.anns[a]['bbox'][3]/self.coco_set.anns[a]['bbox'][2] < 5)]
49 |
50 | return seq_list
51 |
52 | def is_video_sequence(self):
53 | return False
54 |
55 | def get_name(self):
56 | return 'coco'
57 |
58 | def get_num_sequences(self):
59 | return len(self.sequence_list)
60 |
61 | def get_sequence_info(self, seq_id):
62 | anno = self._get_anno(seq_id)
63 |
64 | return anno, torch.Tensor([1])
65 |
66 | def _get_anno(self, seq_id):
67 | anno = self.coco_set.anns[self.sequence_list[seq_id]]['bbox']
68 | return torch.Tensor(anno).view(1, 4)
69 |
70 | def _get_frames(self, seq_id):
71 | path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']
72 | img = self.image_loader(os.path.join(self.img_pth, path))
73 | return img
74 |
75 | def get_meta_info(self, seq_id):
76 | try:
77 | cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]
78 | object_meta = OrderedDict({'object_class': cat_dict_current['name'],
79 | 'motion_class': None,
80 | 'major_class': cat_dict_current['supercategory'],
81 | 'root_class': None,
82 | 'motion_adverb': None})
83 | except:
84 | object_meta = OrderedDict({'object_class': None,
85 | 'motion_class': None,
86 | 'major_class': None,
87 | 'root_class': None,
88 | 'motion_adverb': None})
89 | return object_meta
90 |
91 | def get_frames(self, seq_id=None, frame_ids=None, anno=None):
92 | # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a
93 | # list containing these replicated images.
94 | frame = self._get_frames(seq_id)
95 |
96 | frame_list = [frame.copy() for _ in frame_ids]
97 |
98 | if anno is None:
99 | anno = self._get_anno(seq_id)
100 |
101 | anno_frames = [anno.clone()[0, :] for _ in frame_ids]
102 |
103 | object_meta = self.get_meta_info(seq_id)
104 |
105 | return frame_list, anno_frames, object_meta
106 |
--------------------------------------------------------------------------------
/ltr/dataset/tracking_net.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | import os.path
4 | import numpy as np
5 | import pandas
6 | from collections import OrderedDict
7 |
8 | from ltr.data.image_loader import default_image_loader
9 | from .base_dataset import BaseDataset
10 | from ltr.admin.environment import env_settings
11 |
12 |
13 | def list_sequences(root, set_ids):
14 | """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)
15 |
16 | args:
17 | root: Root directory to TrackingNet
18 | set_ids: Sets (0-11) which are to be used
19 |
20 | returns:
21 | list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence
22 | """
23 | sequence_list = []
24 |
25 | for s in set_ids:
26 | anno_dir = os.path.join(root, "TRAIN_" + str(s), "anno")
27 |
28 | sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]
29 | sequence_list += sequences_cur_set
30 |
31 | return sequence_list
32 |
33 |
34 | class TrackingNet(BaseDataset):
35 | """ TrackingNet dataset.
36 |
37 | Publication:
38 | TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.
39 | Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem
40 | ECCV, 2018
41 | https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf
42 |
43 | Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.
44 | """
45 | def __init__(self, root=None, image_loader=default_image_loader, set_ids=None):
46 | """
47 | args:
48 | root - The path to the TrackingNet folder, containing the training sets.
49 | image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
50 | is used by default.
51 | set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the
52 | sets (0 - 11) will be used.
53 | """
54 | root = env_settings().trackingnet_dir if root is None else root
55 | super().__init__(root, image_loader)
56 |
57 | if set_ids is None:
58 | set_ids = [i for i in range(12)]
59 |
60 | self.set_ids = set_ids
61 |
62 | # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and
63 | # video_name for each sequence
64 | self.sequence_list = list_sequences(self.root, self.set_ids)
65 |
66 | def get_name(self):
67 | return 'trackingnet'
68 |
69 | def _read_anno(self, seq_id):
70 | set_id = self.sequence_list[seq_id][0]
71 | vid_name = self.sequence_list[seq_id][1]
72 | anno_file = os.path.join(self.root, "TRAIN_" + str(set_id), "anno", vid_name + ".txt")
73 | gt = pandas.read_csv(anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values
74 | return torch.tensor(gt)
75 |
76 | def get_sequence_info(self, seq_id):
77 | anno = self._read_anno(seq_id)
78 | target_visible = (anno[:,2]>10) & (anno[:,3]>10) & (anno[:,2]/anno[:,3]<5) & (anno[:,3]/anno[:,2]<5)
79 | return anno, target_visible
80 |
81 | def _get_frame(self, seq_id, frame_id):
82 | set_id = self.sequence_list[seq_id][0]
83 | vid_name = self.sequence_list[seq_id][1]
84 | frame_path = os.path.join(self.root, "TRAIN_" + str(set_id), "frames", vid_name, str(frame_id) + ".jpg")
85 | return self.image_loader(frame_path)
86 |
87 | def get_frames(self, seq_id, frame_ids, anno=None):
88 | frame_list = [self._get_frame(seq_id, f) for f in frame_ids]
89 |
90 | if anno is None:
91 | anno = self._read_anno(seq_id)
92 |
93 | # Return as list of tensors
94 | anno_frames = [anno[f_id, :] for f_id in frame_ids]
95 |
96 | object_meta = OrderedDict({'object_class': None,
97 | 'motion_class': None,
98 | 'major_class': None,
99 | 'root_class': None,
100 | 'motion_adverb': None})
101 |
102 | return frame_list, anno_frames, object_meta
103 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Jiayuan Mao
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/README.md:
--------------------------------------------------------------------------------
1 | # PreciseRoIPooling
2 | This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).
3 |
4 | **Acquisition of Localization Confidence for Accurate Object Detection**
5 |
6 | _Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)
7 |
8 | https://arxiv.org/abs/1807.11590
9 |
10 | ## Brief
11 |
12 | In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:
13 |
14 | - different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.
15 | - different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.
16 |
17 | For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
18 |
19 |
20 |
21 | ## Implementation
22 |
23 | PrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome.
24 |
25 | ## Usage (PyTorch)
26 |
27 | In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented). To use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do:
28 |
29 | ```
30 | from prroi_pool import PrRoIPool2D
31 |
32 | avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)
33 | roi_features = avg_pool(features, rois)
34 |
35 | # for those who want to use the "functional"
36 |
37 | from prroi_pool.functional import prroi_pool2d
38 | roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)
39 | ```
40 |
41 | Here,
42 |
43 | - RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor.
44 | - `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`.
45 | - The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`.
46 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/_assets/prroi_visualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/_assets/prroi_visualization.png
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # File : __init__.py
4 | # Author : Jiayuan Mao, Tete Xiao
5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
6 | # Date : 07/13/2018
7 | #
8 | # This file is part of PreciseRoIPooling.
9 | # Distributed under terms of the MIT license.
10 | # Copyright (c) 2017 Megvii Technology Limited.
11 |
12 | from .prroi_pool import *
13 |
14 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/functional.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/functional.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/prroi_pool.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__pycache__/prroi_pool.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/_prroi_pooling/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from torch.utils.ffi import _wrap_function
3 | from .__prroi_pooling import lib as _lib, ffi as _ffi
4 |
5 | __all__ = []
6 | def _import_symbols(locals):
7 | for symbol in dir(_lib):
8 | fn = getattr(_lib, symbol)
9 | if callable(fn):
10 | locals[symbol] = _wrap_function(fn, _ffi)
11 | else:
12 | locals[symbol] = fn
13 | __all__.append(symbol)
14 |
15 | _import_symbols(locals())
16 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/_prroi_pooling/__prroi_pooling.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/_prroi_pooling/__prroi_pooling.so
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/_prroi_pooling/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/_prroi_pooling/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/build.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # File : build.py
4 | # Author : Jiayuan Mao, Tete Xiao
5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
6 | # Date : 07/13/2018
7 | #
8 | # This file is part of PreciseRoIPooling.
9 | # Distributed under terms of the MIT license.
10 | # Copyright (c) 2017 Megvii Technology Limited.
11 |
12 | import os
13 | import torch
14 |
15 | from torch.utils.ffi import create_extension
16 |
17 | headers = []
18 | sources = []
19 | defines = []
20 | extra_objects = []
21 | with_cuda = False
22 |
23 | if torch.cuda.is_available():
24 | with_cuda = True
25 |
26 | headers+= ['src/prroi_pooling_gpu.h']
27 | sources += ['src/prroi_pooling_gpu.c']
28 | defines += [('WITH_CUDA', None)]
29 |
30 | this_file = os.path.dirname(os.path.realpath(__file__))
31 | extra_objects_cuda = ['src/prroi_pooling_gpu_impl.cu.o']
32 | extra_objects_cuda = [os.path.join(this_file, fname) for fname in extra_objects_cuda]
33 | extra_objects.extend(extra_objects_cuda)
34 | else:
35 | # TODO(Jiayuan Mao @ 07/13): remove this restriction after we support the cpu implementation.
36 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')
37 |
38 | ffi = create_extension(
39 | '_prroi_pooling',
40 | headers=headers,
41 | sources=sources,
42 | define_macros=defines,
43 | relative_to=__file__,
44 | with_cuda=with_cuda,
45 | extra_objects=extra_objects
46 | )
47 |
48 | if __name__ == '__main__':
49 | ffi.build()
50 |
51 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # File : functional.py
4 | # Author : Jiayuan Mao, Tete Xiao
5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
6 | # Date : 07/13/2018
7 | #
8 | # This file is part of PreciseRoIPooling.
9 | # Distributed under terms of the MIT license.
10 | # Copyright (c) 2017 Megvii Technology Limited.
11 |
12 | import torch
13 | import torch.autograd as ag
14 |
15 | try:
16 | from . import _prroi_pooling
17 | except ImportError:
18 | raise ImportError('Can not found the compiled Precise RoI Pooling library. Run ./travis.sh in the directory first.')
19 |
20 | __all__ = ['prroi_pool2d']
21 |
22 |
23 | class PrRoIPool2DFunction(ag.Function):
24 | @staticmethod
25 | def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):
26 | assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \
27 | 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type())
28 |
29 | features = features.contiguous()
30 | rois = rois.contiguous()
31 | pooled_height = int(pooled_height)
32 | pooled_width = int(pooled_width)
33 | spatial_scale = float(spatial_scale)
34 |
35 | params = (pooled_height, pooled_width, spatial_scale)
36 | batch_size, nr_channels, data_height, data_width = features.size()
37 | nr_rois = rois.size(0)
38 | output = torch.zeros(
39 | (nr_rois, nr_channels, pooled_height, pooled_width),
40 | dtype=features.dtype, device=features.device
41 | )
42 |
43 | if features.is_cuda:
44 | _prroi_pooling.prroi_pooling_forward_cuda(features, rois, output, *params)
45 | ctx.params = params
46 | # everything here is contiguous.
47 | ctx.save_for_backward(features, rois, output)
48 | else:
49 | raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')
50 |
51 | return output
52 |
53 | @staticmethod
54 | def backward(ctx, grad_output):
55 | features, rois, output = ctx.saved_tensors
56 | grad_input = grad_coor = None
57 |
58 | if features.requires_grad:
59 | grad_output = grad_output.contiguous()
60 | grad_input = torch.zeros_like(features)
61 | _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, grad_input, *ctx.params)
62 | if rois.requires_grad:
63 | grad_output = grad_output.contiguous()
64 | grad_coor = torch.zeros_like(rois)
65 | _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, grad_coor, *ctx.params)
66 |
67 | return grad_input, grad_coor, None, None, None
68 |
69 |
70 | prroi_pool2d = PrRoIPool2DFunction.apply
71 |
72 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # File : prroi_pool.py
4 | # Author : Jiayuan Mao, Tete Xiao
5 | # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
6 | # Date : 07/13/2018
7 | #
8 | # This file is part of PreciseRoIPooling.
9 | # Distributed under terms of the MIT license.
10 | # Copyright (c) 2017 Megvii Technology Limited.
11 |
12 | import torch.nn as nn
13 |
14 | from .functional import prroi_pool2d
15 |
16 | __all__ = ['PrRoIPool2D']
17 |
18 |
19 | class PrRoIPool2D(nn.Module):
20 | def __init__(self, pooled_height, pooled_width, spatial_scale):
21 | super().__init__()
22 |
23 | self.pooled_height = int(pooled_height)
24 | self.pooled_width = int(pooled_width)
25 | self.spatial_scale = float(spatial_scale)
26 |
27 | def forward(self, features, rois):
28 | return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)
29 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c:
--------------------------------------------------------------------------------
1 | /*
2 | * File : prroi_pooling_gpu.c
3 | * Author : Jiayuan Mao, Tete Xiao
4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
5 | * Date : 07/13/2018
6 | *
7 | * Distributed under terms of the MIT license.
8 | * Copyright (c) 2017 Megvii Technology Limited.
9 | */
10 |
11 | #include
12 | #include
13 |
14 | #include "prroi_pooling_gpu_impl.cuh"
15 |
16 | extern THCState *state;
17 |
18 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale) {
19 | const float *data_ptr = THCudaTensor_data(state, features);
20 | const float *rois_ptr = THCudaTensor_data(state, rois);
21 | float *output_ptr = THCudaTensor_data(state, output);
22 |
23 | int nr_rois = THCudaTensor_size(state, rois, 0);
24 | int nr_channels = THCudaTensor_size(state, features, 1);
25 | int height = THCudaTensor_size(state, features, 2);
26 | int width = THCudaTensor_size(state, features, 3);
27 | int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
28 |
29 | cudaStream_t stream = THCState_getCurrentStream(state);
30 |
31 | PrRoIPoolingForwardGpu(
32 | stream, data_ptr, rois_ptr, output_ptr,
33 | nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
34 | top_count
35 | );
36 |
37 | return 1;
38 | }
39 |
40 | int prroi_pooling_backward_cuda(
41 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,
42 | int pooled_height, int pooled_width, float spatial_scale) {
43 |
44 | const float *data_ptr = THCudaTensor_data(state, features);
45 | const float *rois_ptr = THCudaTensor_data(state, rois);
46 | const float *output_ptr = THCudaTensor_data(state, output);
47 | const float *output_diff_ptr = THCudaTensor_data(state, output_diff);
48 | float *features_diff_ptr = THCudaTensor_data(state, features_diff);
49 |
50 | int nr_rois = THCudaTensor_size(state, rois, 0);
51 | int batch_size = THCudaTensor_size(state, features, 0);
52 | int nr_channels = THCudaTensor_size(state, features, 1);
53 | int height = THCudaTensor_size(state, features, 2);
54 | int width = THCudaTensor_size(state, features, 3);
55 | int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
56 | int bottom_count = batch_size * nr_channels * height * width;
57 |
58 | cudaStream_t stream = THCState_getCurrentStream(state);
59 |
60 | PrRoIPoolingBackwardGpu(
61 | stream, data_ptr, rois_ptr, output_ptr, output_diff_ptr, features_diff_ptr,
62 | nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
63 | top_count, bottom_count
64 | );
65 |
66 | return 1;
67 | }
68 |
69 | int prroi_pooling_coor_backward_cuda(
70 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *coor_diff,
71 | int pooled_height, int pooled_width, float spatial_scale) {
72 |
73 | const float *data_ptr = THCudaTensor_data(state, features);
74 | const float *rois_ptr = THCudaTensor_data(state, rois);
75 | const float *output_ptr = THCudaTensor_data(state, output);
76 | const float *output_diff_ptr = THCudaTensor_data(state, output_diff);
77 | float *coor_diff_ptr= THCudaTensor_data(state, coor_diff);
78 |
79 | int nr_rois = THCudaTensor_size(state, rois, 0);
80 | int nr_channels = THCudaTensor_size(state, features, 1);
81 | int height = THCudaTensor_size(state, features, 2);
82 | int width = THCudaTensor_size(state, features, 3);
83 | int top_count = nr_rois * nr_channels * pooled_height * pooled_width;
84 | int bottom_count = nr_rois * 5;
85 |
86 | cudaStream_t stream = THCState_getCurrentStream(state);
87 |
88 | PrRoIPoolingCoorBackwardGpu(
89 | stream, data_ptr, rois_ptr, output_ptr, output_diff_ptr, coor_diff_ptr,
90 | nr_channels, height, width, pooled_height, pooled_width, spatial_scale,
91 | top_count, bottom_count
92 | );
93 |
94 | return 1;
95 | }
96 |
97 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h:
--------------------------------------------------------------------------------
1 | /*
2 | * File : prroi_pooling_gpu.h
3 | * Author : Jiayuan Mao, Tete Xiao
4 | * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com
5 | * Date : 07/13/2018
6 | *
7 | * Distributed under terms of the MIT license.
8 | * Copyright (c) 2017 Megvii Technology Limited.
9 | */
10 |
11 | int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);
12 |
13 | int prroi_pooling_backward_cuda(
14 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,
15 | int pooled_height, int pooled_width, float spatial_scale
16 | );
17 |
18 | int prroi_pooling_coor_backward_cuda(
19 | THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,
20 | int pooled_height, int pooled_width, float spatial_scal
21 | );
22 |
23 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu.o
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh:
--------------------------------------------------------------------------------
1 | /*
2 | * File : prroi_pooling_gpu_impl.cuh
3 | * Author : Tete Xiao, Jiayuan Mao
4 | * Email : jasonhsiao97@gmail.com
5 | *
6 | * Distributed under terms of the MIT license.
7 | * Copyright (c) 2017 Megvii Technology Limited.
8 | */
9 |
10 | #ifndef PRROI_POOLING_GPU_IMPL_CUH
11 | #define PRROI_POOLING_GPU_IMPL_CUH
12 |
13 | #ifdef __cplusplus
14 | extern "C" {
15 | #endif
16 |
17 | #define F_DEVPTR_IN const float *
18 | #define F_DEVPTR_OUT float *
19 |
20 | void PrRoIPoolingForwardGpu(
21 | cudaStream_t stream,
22 | F_DEVPTR_IN bottom_data,
23 | F_DEVPTR_IN bottom_rois,
24 | F_DEVPTR_OUT top_data,
25 | const int channels_, const int height_, const int width_,
26 | const int pooled_height_, const int pooled_width_,
27 | const float spatial_scale_,
28 | const int top_count);
29 |
30 | void PrRoIPoolingBackwardGpu(
31 | cudaStream_t stream,
32 | F_DEVPTR_IN bottom_data,
33 | F_DEVPTR_IN bottom_rois,
34 | F_DEVPTR_IN top_data,
35 | F_DEVPTR_IN top_diff,
36 | F_DEVPTR_OUT bottom_diff,
37 | const int channels_, const int height_, const int width_,
38 | const int pooled_height_, const int pooled_width_,
39 | const float spatial_scale_,
40 | const int top_count, const int bottom_count);
41 |
42 | void PrRoIPoolingCoorBackwardGpu(
43 | cudaStream_t stream,
44 | F_DEVPTR_IN bottom_data,
45 | F_DEVPTR_IN bottom_rois,
46 | F_DEVPTR_IN top_data,
47 | F_DEVPTR_IN top_diff,
48 | F_DEVPTR_OUT bottom_diff,
49 | const int channels_, const int height_, const int width_,
50 | const int pooled_height_, const int pooled_width_,
51 | const float spatial_scale_,
52 | const int top_count, const int bottom_count);
53 |
54 | #ifdef __cplusplus
55 | } /* !extern "C" */
56 | #endif
57 |
58 | #endif /* !PRROI_POOLING_GPU_IMPL_CUH */
59 |
60 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/travis.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash -e
2 | # File : travis.sh
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | #
6 | # Distributed under terms of the MIT license.
7 | # Copyright (c) 2017 Megvii Technology Limited.
8 |
9 | cd src
10 | echo "Working directory: " `pwd`
11 | echo "Compiling prroi_pooling kernels by nvcc..."
12 | nvcc -c -o prroi_pooling_gpu_impl.cu.o prroi_pooling_gpu_impl.cu -x cu -Xcompiler -fPIC -arch=sm_52
13 |
14 | cd ../
15 | echo "Working directory: " `pwd`
16 | echo "Building python libraries..."
17 | python3 build.py
18 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : test_prroi_pooling2d.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 18/02/2018
6 | #
7 | # This file is part of Jacinle.
8 |
9 | import unittest
10 |
11 | import torch
12 | import torch.nn as nn
13 | import torch.nn.functional as F
14 |
15 | from jactorch.utils.unittest import TorchTestCase
16 |
17 | from prroi_pool import PrRoIPool2D
18 |
19 |
20 | class TestPrRoIPool2D(TorchTestCase):
21 | def test_forward(self):
22 | pool = PrRoIPool2D(7, 7, spatial_scale=0.5)
23 | features = torch.rand((4, 16, 24, 32)).cuda()
24 | rois = torch.tensor([
25 | [0, 0, 0, 14, 14],
26 | [1, 14, 14, 28, 28],
27 | ]).float().cuda()
28 |
29 | out = pool(features, rois)
30 | out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)
31 |
32 | self.assertTensorClose(out, torch.stack((
33 | out_gold[0, :, :7, :7],
34 | out_gold[1, :, 7:14, 7:14],
35 | ), dim=0))
36 |
37 | def test_backward_shapeonly(self):
38 | pool = PrRoIPool2D(2, 2, spatial_scale=0.5)
39 |
40 | features = torch.rand((4, 2, 24, 32)).cuda()
41 | rois = torch.tensor([
42 | [0, 0, 0, 4, 4],
43 | [1, 14, 14, 18, 18],
44 | ]).float().cuda()
45 | features.requires_grad = rois.requires_grad = True
46 | out = pool(features, rois)
47 |
48 | loss = out.sum()
49 | loss.backward()
50 |
51 | self.assertTupleEqual(features.size(), features.grad.size())
52 | self.assertTupleEqual(rois.size(), rois.grad.size())
53 |
54 |
55 | if __name__ == '__main__':
56 | unittest.main()
57 |
--------------------------------------------------------------------------------
/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh:
--------------------------------------------------------------------------------
1 | /*
2 | * File : prroi_pooling_gpu_impl.cuh
3 | * Author : Tete Xiao, Jiayuan Mao
4 | * Email : jasonhsiao97@gmail.com
5 | *
6 | * Distributed under terms of the MIT license.
7 | * Copyright (c) 2017 Megvii Technology Limited.
8 | */
9 |
10 | #ifndef PRROI_POOLING_GPU_IMPL_CUH
11 | #define PRROI_POOLING_GPU_IMPL_CUH
12 |
13 | #ifdef __cplusplus
14 | extern "C" {
15 | #endif
16 |
17 | #define F_DEVPTR_IN const float *
18 | #define F_DEVPTR_OUT float *
19 |
20 | void PrRoIPoolingForwardGpu(
21 | cudaStream_t stream,
22 | F_DEVPTR_IN bottom_data,
23 | F_DEVPTR_IN bottom_rois,
24 | F_DEVPTR_OUT top_data,
25 | const int channels_, const int height_, const int width_,
26 | const int pooled_height_, const int pooled_width_,
27 | const float spatial_scale_,
28 | const int top_count);
29 |
30 | void PrRoIPoolingBackwardGpu(
31 | cudaStream_t stream,
32 | F_DEVPTR_IN bottom_data,
33 | F_DEVPTR_IN bottom_rois,
34 | F_DEVPTR_IN top_data,
35 | F_DEVPTR_IN top_diff,
36 | F_DEVPTR_OUT bottom_diff,
37 | const int channels_, const int height_, const int width_,
38 | const int pooled_height_, const int pooled_width_,
39 | const float spatial_scale_,
40 | const int top_count, const int bottom_count);
41 |
42 | void PrRoIPoolingCoorBackwardGpu(
43 | cudaStream_t stream,
44 | F_DEVPTR_IN bottom_data,
45 | F_DEVPTR_IN bottom_rois,
46 | F_DEVPTR_IN top_data,
47 | F_DEVPTR_IN top_diff,
48 | F_DEVPTR_OUT bottom_diff,
49 | const int channels_, const int height_, const int width_,
50 | const int pooled_height_, const int pooled_width_,
51 | const float spatial_scale_,
52 | const int top_count, const int bottom_count);
53 |
54 | #ifdef __cplusplus
55 | } /* !extern "C" */
56 | #endif
57 |
58 | #endif /* !PRROI_POOLING_GPU_IMPL_CUH */
59 |
60 |
--------------------------------------------------------------------------------
/ltr/models/SBDT/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/ltr/models/SBDT/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/SBDT/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/SBDT/__pycache__/network.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/SBDT/__pycache__/network.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/ltr/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .resnet import *
2 | from .resnet18_vggm import *
3 |
--------------------------------------------------------------------------------
/ltr/models/backbone/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/backbone/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/backbone/__pycache__/resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/backbone/__pycache__/resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/backbone/__pycache__/resnet18_vggm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/backbone/__pycache__/resnet18_vggm.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/backbone/resnet.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch.nn as nn
3 | from collections import OrderedDict
4 | import torch.utils.model_zoo as model_zoo
5 | from torchvision.models.resnet import BasicBlock, Bottleneck, model_urls
6 |
7 |
8 | class ResNet(nn.Module):
9 | """ ResNet network module. Allows extracting specific feature blocks."""
10 | def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64):
11 | self.inplanes = inplanes
12 | super(ResNet, self).__init__()
13 | self.output_layers = output_layers
14 | self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,
15 | bias=False)
16 | self.bn1 = nn.BatchNorm2d(inplanes)
17 | self.relu = nn.ReLU(inplace=True)
18 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
19 | self.layer1 = self._make_layer(block, inplanes, layers[0])
20 | self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=2)
21 | self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=2)
22 | self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=2)
23 | # self.avgpool = nn.AvgPool2d(7, stride=1)
24 | self.avgpool = nn.AdaptiveAvgPool2d((1,1))
25 | self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)
26 |
27 | for m in self.modules():
28 | if isinstance(m, nn.Conv2d):
29 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
30 | m.weight.data.normal_(0, math.sqrt(2. / n))
31 | elif isinstance(m, nn.BatchNorm2d):
32 | m.weight.data.fill_(1)
33 | m.bias.data.zero_()
34 |
35 | def _make_layer(self, block, planes, blocks, stride=1):
36 | downsample = None
37 | if stride != 1 or self.inplanes != planes * block.expansion:
38 | downsample = nn.Sequential(
39 | nn.Conv2d(self.inplanes, planes * block.expansion,
40 | kernel_size=1, stride=stride, bias=False),
41 | nn.BatchNorm2d(planes * block.expansion),
42 | )
43 |
44 | layers = []
45 | layers.append(block(self.inplanes, planes, stride, downsample))
46 | self.inplanes = planes * block.expansion
47 | for i in range(1, blocks):
48 | layers.append(block(self.inplanes, planes))
49 |
50 | return nn.Sequential(*layers)
51 |
52 | def _add_output_and_check(self, name, x, outputs, output_layers):
53 | if name in output_layers:
54 | outputs[name] = x
55 | return len(output_layers) == len(outputs)
56 |
57 | def forward(self, x, output_layers=None):
58 | """ Forward pass with input x. The output_layers specify the feature blocks which must be returned """
59 | outputs = OrderedDict()
60 |
61 | if output_layers is None:
62 | output_layers = self.output_layers
63 |
64 | x = self.conv1(x)
65 | x = self.bn1(x)
66 | x = self.relu(x)
67 |
68 | if self._add_output_and_check('conv1', x, outputs, output_layers):
69 | return outputs
70 |
71 | x = self.maxpool(x)
72 |
73 | x = self.layer1(x)
74 |
75 | if self._add_output_and_check('layer1', x, outputs, output_layers):
76 | return outputs
77 |
78 | x = self.layer2(x)
79 |
80 | if self._add_output_and_check('layer2', x, outputs, output_layers):
81 | return outputs
82 |
83 | x = self.layer3(x)
84 |
85 | if self._add_output_and_check('layer3', x, outputs, output_layers):
86 | return outputs
87 |
88 | x = self.layer4(x)
89 |
90 | if self._add_output_and_check('layer4', x, outputs, output_layers):
91 | return outputs
92 |
93 | x = self.avgpool(x)
94 | x = x.view(x.size(0), -1)
95 | x = self.fc(x)
96 |
97 | if self._add_output_and_check('fc', x, outputs, output_layers):
98 | return outputs
99 |
100 | if len(output_layers) == 1 and output_layers[0] == 'default':
101 | return x
102 |
103 | raise ValueError('output_layer is wrong.')
104 |
105 |
106 | def resnet18(output_layers=None, pretrained=False):
107 | """Constructs a ResNet-18 model.
108 | """
109 |
110 | if output_layers is None:
111 | output_layers = ['default']
112 | else:
113 | for l in output_layers:
114 | if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
115 | raise ValueError('Unknown layer: {}'.format(l))
116 |
117 | model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers)
118 |
119 | if pretrained:
120 | model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
121 | return model
122 |
123 |
124 | def resnet50(output_layers=None, pretrained=False):
125 | """Constructs a ResNet-50 model.
126 | """
127 |
128 | if output_layers is None:
129 | output_layers = ['default']
130 | else:
131 | for l in output_layers:
132 | if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:
133 | raise ValueError('Unknown layer: {}'.format(l))
134 |
135 | model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers)
136 | if pretrained:
137 | model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
138 | return model
--------------------------------------------------------------------------------
/ltr/models/bbreg/__init__.py:
--------------------------------------------------------------------------------
1 | from .atom_iou_net import AtomIoUNet
2 |
--------------------------------------------------------------------------------
/ltr/models/bbreg/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/bbreg/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/bbreg/__pycache__/atom.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/bbreg/__pycache__/atom.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/bbreg/__pycache__/atom_iou_net.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/bbreg/__pycache__/atom_iou_net.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/bbreg/atom.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import ltr.models.backbone as backbones
3 | import ltr.models.bbreg as bbmodels
4 | from ltr import model_constructor
5 |
6 |
7 | class ATOMnet(nn.Module):
8 | """ ATOM network module"""
9 | def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True):
10 | """
11 | args:
12 | feature_extractor - backbone feature extractor
13 | bb_regressor - IoU prediction module
14 | bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to
15 | bb_regressor
16 | extractor_grad - Bool indicating whether backbone feature extractor requires gradients
17 | """
18 | super(ATOMnet, self).__init__()
19 |
20 | self.feature_extractor = feature_extractor
21 | self.bb_regressor = bb_regressor
22 | self.bb_regressor_layer = bb_regressor_layer
23 |
24 | if not extractor_grad:
25 | for p in self.feature_extractor.parameters():
26 | p.requires_grad_(False)
27 |
28 | def forward(self, train_imgs, test_imgs, train_bb, test_proposals):
29 | """ Forward pass
30 | Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension
31 | corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]
32 | """
33 | num_sequences = train_imgs.shape[-4]
34 | num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1
35 | num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1
36 |
37 | # Extract backbone features
38 | train_feat = self.extract_backbone_features(
39 | train_imgs.view(-1, train_imgs.shape[-3], train_imgs.shape[-2], train_imgs.shape[-1]))
40 | test_feat = self.extract_backbone_features(
41 | test_imgs.view(-1, test_imgs.shape[-3], test_imgs.shape[-2], test_imgs.shape[-1]))
42 |
43 | # For clarity, send the features to bb_regressor in sequence form, i.e. [sequence, batch, feature, row, col]
44 | train_feat_iou = [feat.view(num_train_images, num_sequences, feat.shape[-3], feat.shape[-2], feat.shape[-1])
45 | for feat in train_feat.values()]
46 | test_feat_iou = [feat.view(num_test_images, num_sequences, feat.shape[-3], feat.shape[-2], feat.shape[-1])
47 | for feat in test_feat.values()]
48 |
49 | # Obtain iou prediction
50 | iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou,
51 | train_bb.view(num_train_images, num_sequences, 4),
52 | test_proposals.view(num_train_images, num_sequences, -1, 4))
53 | return iou_pred
54 |
55 | def extract_backbone_features(self, im, layers=None):
56 | if layers is None:
57 | layers = self.bb_regressor_layer
58 | return self.feature_extractor(im, layers)
59 |
60 | def extract_features(self, im, layers):
61 | return self.feature_extractor(im, layers)
62 |
63 |
64 |
65 | @model_constructor
66 | def atom_resnet18(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True):
67 | # backbone
68 | backbone_net = backbones.resnet18(pretrained=backbone_pretrained)
69 |
70 | # Bounding box regressor
71 | iou_predictor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)
72 |
73 | net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'],
74 | extractor_grad=False)
75 |
76 | return net
77 |
--------------------------------------------------------------------------------
/ltr/models/layers/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/ltr/models/layers/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/layers/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/layers/__pycache__/blocks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/layers/__pycache__/blocks.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/layers/blocks.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True,
5 | batch_norm=True, relu=True):
6 | layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
7 | padding=padding, dilation=dilation, bias=bias)]
8 | if batch_norm:
9 | layers.append(nn.BatchNorm2d(out_planes))
10 | if relu:
11 | layers.append(nn.ReLU(inplace=True))
12 | return nn.Sequential(*layers)
13 |
14 |
15 | class LinearBlock(nn.Module):
16 | def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True):
17 | super().__init__()
18 | self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias)
19 | self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None
20 | self.relu = nn.ReLU(inplace=True) if relu else None
21 |
22 | def forward(self, x):
23 | x = self.linear(x.view(x.shape[0], -1))
24 | if self.bn is not None:
25 | x = self.bn(x.view(x.shape[0], x.shape[1], 1, 1))
26 | if self.relu is not None:
27 | x = self.relu(x)
28 | return x.view(x.shape[0], -1)
--------------------------------------------------------------------------------
/ltr/models/locator/__init__.py:
--------------------------------------------------------------------------------
1 | from .onlineRR18 import OnlineRRNet as OnlineRRNet18
2 | from .onlineRR50 import OnlineRRNet as OnlineRRNet50
--------------------------------------------------------------------------------
/ltr/models/locator/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/locator/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/locator/__pycache__/onlineRR.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/locator/__pycache__/onlineRR.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/locator/__pycache__/onlineRR18.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/locator/__pycache__/onlineRR18.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/locator/__pycache__/onlineRR50.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/locator/__pycache__/onlineRR50.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/locator/__pycache__/onlineRR_net.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/models/locator/__pycache__/onlineRR_net.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/models/locator/onlineRR18.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 | from ltr.models.layers.blocks import LinearBlock
5 | from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D
6 |
7 |
8 | def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
9 | return nn.Sequential(
10 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True),
11 | nn.BatchNorm2d(out_planes),
12 | nn.ReLU(inplace=True))
13 |
14 |
15 | class OnlineRRNet(nn.Module):
16 | def __init__(self, input_dim=(128,256), pred_input_dim=(128,256)):
17 | super().__init__()
18 | self.conv3_1 = conv(input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
19 | self.conv3_2 = conv(pred_input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
20 | self.conv4_1 = conv(input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
21 | self.conv4_2 = conv(pred_input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
22 |
23 | self.prroi_pool3 = PrRoIPool2D(8, 8, 1/8)
24 | self.prroi_pool4 = PrRoIPool2D(4, 4, 1/16)
25 |
26 | ## We perform L2 norm to features, therefore barch_norm is not needed.
27 | ## When relu is True, the linear system is easy to non-invertible.
28 | self.fc3 = LinearBlock(pred_input_dim[0], 512, 8, batch_norm=False, relu=False)
29 | self.fc4 = LinearBlock(pred_input_dim[1], 512, 4, batch_norm=False, relu=False)
30 |
31 | # Init weights
32 | for m in self.modules():
33 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):
34 | nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
35 | if m.bias is not None:
36 | m.bias.data.zero_()
37 |
38 |
39 | def forward(self, train_feat, test_feat, train_proposals, train_labels, test_proposals):
40 | assert(train_feat[0].shape[0]==1)
41 |
42 | batch_size = train_feat[0].shape[1]
43 |
44 | # transform
45 | train_feat = [f[0,...] for f in train_feat]
46 | test_feat = [f[0,...] for f in test_feat]
47 | train_labels = train_labels.view(batch_size, -1, 1)
48 | train_proposals = train_proposals.view(batch_size, -1, 4)
49 | test_proposals = test_proposals.view(batch_size, -1, 4)
50 |
51 | # Extract features
52 | train_feat_locator = self.get_locator_feat(train_feat, train_proposals)
53 | test_feat_locator = self.get_locator_feat(test_feat, test_proposals)
54 |
55 | # Train by solving the ridge regression problem
56 | train_XTY = torch.matmul(train_feat_locator.permute(0,2,1), train_labels)
57 | train_XTX = torch.matmul(train_feat_locator.permute(0,2,1), train_feat_locator)
58 | W, _ = torch.gesv(train_XTY, train_XTX + 0.1*torch.eye(train_feat_locator.shape[2]).to(train_XTX.device))
59 |
60 | # Evaluation
61 | prediction = torch.matmul(test_feat_locator, W)
62 | return prediction
63 |
64 |
65 | def get_locator_feat(self, feat, proposals):
66 | batch_size = feat[0].shape[0]
67 | num_proposals_per_batch = proposals.shape[1]
68 |
69 | # Convolution
70 | feat_layer_3 = self.conv3_2(self.conv3_1(feat[0]))
71 | feat_layer_4 = self.conv4_2(self.conv4_1(feat[1]))
72 |
73 | # Convert the xywh input proposals to x0y0x1y1 format
74 | proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2)
75 |
76 | # Add batch index to rois
77 | batch_index = torch.Tensor([x for x in range(batch_size)]).view(batch_size, 1).to(feat_layer_3.device)
78 | rois = torch.cat((batch_index.view(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1), proposals_xyxy), dim=2)
79 | rois = rois.view(-1, 5).to(proposals_xyxy.device)
80 |
81 | # Extract features for each sample roi
82 | feat_layer_3_roi = self.prroi_pool3(feat_layer_3, rois)
83 | feat_layer_4_roi = self.prroi_pool4(feat_layer_4, rois)
84 |
85 | # Full connection
86 | feat_layer_3_fc = self.fc3(feat_layer_3_roi)
87 | feat_layer_4_fc = self.fc4(feat_layer_4_roi)
88 |
89 | # L2 norm
90 | feat_layer_3_fc = feat_layer_3_fc.view(batch_size, num_proposals_per_batch, -1)
91 | feat_layer_4_fc = feat_layer_4_fc.view(batch_size, num_proposals_per_batch, -1)
92 |
93 | feat_layer_3_fc_norm = feat_layer_3_fc / ((torch.sum(feat_layer_3_fc.abs()**2, dim=1, keepdim=True) / (feat_layer_3_fc.shape[1] + 1e-10))**(1/2))
94 | feat_layer_4_fc_norm = feat_layer_4_fc / ((torch.sum(feat_layer_4_fc.abs()**2, dim=1, keepdim=True) / (feat_layer_4_fc.shape[1] + 1e-10))**(1/2))
95 | return torch.cat((feat_layer_3_fc_norm, feat_layer_4_fc_norm), dim=2)
--------------------------------------------------------------------------------
/ltr/models/locator/onlineRR50.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 | from ltr.models.layers.blocks import LinearBlock
5 | from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D
6 |
7 |
8 | def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
9 | return nn.Sequential(
10 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True),
11 | nn.BatchNorm2d(out_planes),
12 | nn.ReLU(inplace=True))
13 |
14 |
15 | class OnlineRRNet(nn.Module):
16 | def __init__(self, input_dim=(128,256), pred_input_dim=(128,256)):
17 | super().__init__()
18 | self.conv3_1 = conv(input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
19 | self.conv3_2 = conv(pred_input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
20 | self.conv3_3 = conv(pred_input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
21 | self.conv3_4 = conv(pred_input_dim[0], pred_input_dim[0], kernel_size=3, stride=1)
22 | self.conv4_1 = conv(input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
23 | self.conv4_2 = conv(pred_input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
24 | self.conv4_3 = conv(pred_input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
25 | self.conv4_4 = conv(pred_input_dim[1], pred_input_dim[1], kernel_size=3, stride=1)
26 |
27 | self.prroi_pool3 = PrRoIPool2D(8, 8, 1/8)
28 | self.prroi_pool4 = PrRoIPool2D(4, 4, 1/16)
29 |
30 | self.fc3 = LinearBlock(pred_input_dim[0], 512, 8, batch_norm=False, relu=False)
31 | self.fc4 = LinearBlock(pred_input_dim[1], 512, 4, batch_norm=False, relu=False)
32 |
33 | # Init weights
34 | for m in self.modules():
35 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):
36 | nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
37 | if m.bias is not None:
38 | m.bias.data.zero_()
39 |
40 |
41 | def forward(self, train_feat, test_feat, train_proposals, train_labels, test_proposals):
42 | assert(train_feat[0].shape[0]==1)
43 |
44 | batch_size = train_feat[0].shape[1]
45 |
46 | # transform
47 | train_feat = [f[0,...] for f in train_feat]
48 | test_feat = [f[0,...] for f in test_feat]
49 | train_labels = train_labels.view(batch_size, -1, 1)
50 | train_proposals = train_proposals.view(batch_size, -1, 4)
51 | test_proposals = test_proposals.view(batch_size, -1, 4)
52 |
53 | # Extract features
54 | train_feat_locator = self.get_locator_feat(train_feat, train_proposals)
55 | test_feat_locator = self.get_locator_feat(test_feat, test_proposals)
56 |
57 | # Train by solving the ridge regression problem
58 | train_XTY = torch.matmul(train_feat_locator.permute(0,2,1), train_labels)
59 | train_XTX = torch.matmul(train_feat_locator.permute(0,2,1), train_feat_locator)
60 | W, _ = torch.gesv(train_XTY, train_XTX + 0.1*torch.eye(train_feat_locator.shape[2]).to(train_XTX.device))
61 |
62 | # Evaluation
63 | prediction = torch.matmul(test_feat_locator, W)
64 |
65 | return prediction
66 |
67 |
68 | def get_locator_feat(self, feat, proposals):
69 | batch_size = feat[0].shape[0]
70 | num_proposals_per_batch = proposals.shape[1]
71 |
72 | # Convolution
73 | feat_layer_3 = self.conv3_4(self.conv3_3(self.conv3_2(self.conv3_1(feat[0]))))
74 | feat_layer_4 = self.conv4_4(self.conv4_3(self.conv4_2(self.conv4_1(feat[1]))))
75 |
76 | # Convert the xywh input proposals to x0y0x1y1 format
77 | proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2)
78 |
79 | # Add batch index to rois
80 | batch_index = torch.Tensor([x for x in range(batch_size)]).view(batch_size, 1).to(feat_layer_3.device)
81 | rois = torch.cat((batch_index.view(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1), proposals_xyxy), dim=2)
82 | rois = rois.view(-1, 5).to(proposals_xyxy.device)
83 |
84 | # Extract features for each sample roi
85 | feat_layer_3_roi = self.prroi_pool3(feat_layer_3, rois)
86 | feat_layer_4_roi = self.prroi_pool4(feat_layer_4, rois)
87 |
88 | # Full connection
89 | feat_layer_3_fc = self.fc3(feat_layer_3_roi)
90 | feat_layer_4_fc = self.fc4(feat_layer_4_roi)
91 |
92 | feat_layer_3_fc = feat_layer_3_fc.view(batch_size, num_proposals_per_batch, -1)
93 | feat_layer_4_fc = feat_layer_4_fc.view(batch_size, num_proposals_per_batch, -1)
94 |
95 | # L2 norm
96 | layer_3_norm = (torch.sum(feat_layer_3_fc.abs()**2, dim=1, keepdim=True) / (feat_layer_3_fc.shape[1] + 1e-10))**(1/2)
97 | layer_4_norm = (torch.sum(feat_layer_4_fc.abs()**2, dim=1, keepdim=True) / (feat_layer_4_fc.shape[1] + 1e-10))**(1/2)
98 |
99 | feat_layer_3_fc_norm = feat_layer_3_fc / layer_3_norm
100 | feat_layer_4_fc_norm = feat_layer_4_fc / layer_4_norm
101 |
102 | return torch.cat((feat_layer_3_fc_norm, feat_layer_4_fc_norm), dim=2)
--------------------------------------------------------------------------------
/ltr/run_training.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import importlib
5 | import multiprocessing
6 | import cv2 as cv
7 | import torch.backends.cudnn
8 |
9 | env_path = os.path.join(os.path.dirname(__file__), '..')
10 | if env_path not in sys.path:
11 | sys.path.append(env_path)
12 |
13 | import ltr.admin.settings as ws_settings
14 |
15 |
16 | def run_training(train_module, train_name, cudnn_benchmark=True):
17 | """Run a train scripts in train_settings.
18 | args:
19 | train_module: Name of module in the "train_settings/" folder.
20 | train_name: Name of the train settings file.
21 | cudnn_benchmark: Use cudnn benchmark or not (default is True).
22 | """
23 |
24 | # This is needed to avoid strange crashes related to opencv
25 | cv.setNumThreads(0)
26 |
27 | torch.backends.cudnn.benchmark = cudnn_benchmark
28 |
29 | print('Training: {} {}'.format(train_module, train_name))
30 |
31 | settings = ws_settings.Settings()
32 |
33 | if settings.env.workspace_dir == '':
34 | raise Exception('Setup your workspace_dir in "ltr/admin/local.py".')
35 |
36 | settings.module_name = train_module
37 | settings.script_name = train_name
38 | settings.project_path = 'ltr/{}/{}'.format(train_module, train_name)
39 |
40 | expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name))
41 | expr_func = getattr(expr_module, 'run')
42 |
43 | expr_func(settings)
44 |
45 |
46 | def main():
47 | parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')
48 | parser.add_argument('train_module', type=str, help='Name of module in the "train_settings/" folder.')
49 | parser.add_argument('train_name', type=str, help='Name of the train settings file.')
50 | parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).')
51 |
52 | args = parser.parse_args()
53 |
54 | run_training(args.train_module, args.train_name, args.cudnn_benchmark)
55 |
56 |
57 | if __name__ == '__main__':
58 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
59 | multiprocessing.set_start_method('spawn', force=True)
60 | main()
61 |
--------------------------------------------------------------------------------
/ltr/train_settings/SBDT/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/ltr/train_settings/SBDT/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/train_settings/SBDT/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/train_settings/SBDT/__pycache__/atom_default.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/train_settings/SBDT/__pycache__/atom_default.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/train_settings/SBDT/__pycache__/default-res50.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/train_settings/SBDT/__pycache__/default-res50.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/train_settings/SBDT/__pycache__/default.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/train_settings/SBDT/__pycache__/default.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/train_settings/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/ltr/train_settings/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/train_settings/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/trainers/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_trainer import BaseTrainer
2 | from .ltr_trainer import LTRTrainer
--------------------------------------------------------------------------------
/ltr/trainers/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/trainers/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/trainers/__pycache__/base_trainer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/trainers/__pycache__/base_trainer.cpython-37.pyc
--------------------------------------------------------------------------------
/ltr/trainers/__pycache__/ltr_trainer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/ltr/trainers/__pycache__/ltr_trainer.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/__init__.py:
--------------------------------------------------------------------------------
1 | from pytracking.libs import TensorList, TensorDict
2 | import pytracking.libs.complex as complex
3 | import pytracking.libs.operation as operation
4 | import pytracking.libs.fourier as fourier
5 | import pytracking.libs.dcf as dcf
6 | import pytracking.libs.optimization as optimization
7 | from pytracking.run_tracker import run_tracker
8 | from pytracking.run_webcam import run_webcam
9 |
--------------------------------------------------------------------------------
/pytracking/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/__pycache__/run_tracker.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/__pycache__/run_tracker.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/__pycache__/run_webcam.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/__pycache__/run_webcam.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from .otbdataset import OTBDataset
2 | from .nfsdataset import NFSDataset
3 | from .uavdataset import UAVDataset
4 | from .tpldataset import TPLDataset
5 | from .trackingnetdataset import TrackingNetDataset
6 | from .got10kdataset import GOT10KDatasetTest, GOT10KDatasetVal, GOT10KDatasetLTRVal
7 | from .lasotdataset import LaSOTDataset
8 | from .data import Sequence
9 | from .tracker import Tracker
10 | from .vot18dataset import VOT18Dataset
11 | from .vot19dataset import VOT19Dataset
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/data.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/data.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/environment.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/environment.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/got10kdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/got10kdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/lasotdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/lasotdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/local.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/local.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/nfsdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/nfsdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/nfstunedataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/nfstunedataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/otbdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/otbdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/otbtunedataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/otbtunedataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/running.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/running.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/tpldataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/tpldataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/tracker.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/tracker.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/trackingnetdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/trackingnetdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/trackingnettunedataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/trackingnettunedataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/uavdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/uavdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/vot18dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/vot18dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/vot19dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/vot19dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/__pycache__/votdataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/evaluation/__pycache__/votdataset.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/evaluation/data.py:
--------------------------------------------------------------------------------
1 | from pytracking.evaluation.environment import env_settings
2 |
3 |
4 | class BaseDataset:
5 | """Base class for all datasets."""
6 | def __init__(self):
7 | self.env_settings = env_settings()
8 |
9 | def __len__(self):
10 | """Overload this function in your dataset. This should return number of sequences in the dataset."""
11 | raise NotImplementedError
12 |
13 | def get_sequence_list(self):
14 | """Overload this in your dataset. Should return the list of sequences in the dataset."""
15 | raise NotImplementedError
16 |
17 |
18 | class Sequence:
19 | """Class for the sequence in an evaluation."""
20 | def __init__(self, name, frames, ground_truth_rect, object_class=None):
21 | self.name = name
22 | self.frames = frames
23 | self.ground_truth_rect = ground_truth_rect
24 | self.init_state = list(self.ground_truth_rect[0,:])
25 | self.object_class = object_class
26 |
27 |
28 | class SequenceList(list):
29 | """List of sequences. Supports the addition operator to concatenate sequence lists."""
30 | def __getitem__(self, item):
31 | if isinstance(item, str):
32 | for seq in self:
33 | if seq.name == item:
34 | return seq
35 | raise IndexError('Sequence name not in the dataset.')
36 | elif isinstance(item, int):
37 | return super(SequenceList, self).__getitem__(item)
38 | elif isinstance(item, (tuple, list)):
39 | return SequenceList([super(SequenceList, self).__getitem__(i) for i in item])
40 | else:
41 | return SequenceList(super(SequenceList, self).__getitem__(item))
42 |
43 | def __add__(self, other):
44 | return SequenceList(super(SequenceList, self).__add__(other))
45 |
46 | def copy(self):
47 | return SequenceList(super(SequenceList, self).copy())
--------------------------------------------------------------------------------
/pytracking/evaluation/environment.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | class EnvSettings:
6 | def __init__(self):
7 | pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
8 |
9 | self.results_path = '{}/tracking_results/'.format(pytracking_path)
10 | self.network_path = '{}/networks/'.format(pytracking_path)
11 | self.otb_path = ''
12 | self.nfs_path = ''
13 | self.uav_path = ''
14 | self.tpl_path = ''
15 | self.vot_path = ''
16 | self.got10k_path = ''
17 | self.lasot_path = ''
18 | self.trackingnet_path = ''
19 |
20 |
21 | def create_default_local_file():
22 | comment = {'results_path': 'Where to store tracking results',
23 | 'network_path': 'Where tracking networks are stored.'}
24 |
25 | path = os.path.join(os.path.dirname(__file__), 'local.py')
26 | with open(path, 'w') as f:
27 | settings = EnvSettings()
28 |
29 | f.write('from pytracking.evaluation.environment import EnvSettings\n\n')
30 | f.write('def local_env_settings():\n')
31 | f.write(' settings = EnvSettings()\n\n')
32 | f.write(' # Set your local paths here.\n\n')
33 |
34 | for attr in dir(settings):
35 | comment_str = None
36 | if attr in comment:
37 | comment_str = comment[attr]
38 | attr_val = getattr(settings, attr)
39 | if not attr.startswith('__') and not callable(attr_val):
40 | if comment_str is None:
41 | f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val))
42 | else:
43 | f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str))
44 | f.write('\n return settings\n\n')
45 |
46 |
47 | def env_settings():
48 | env_module_name = 'pytracking.evaluation.local'
49 | try:
50 | env_module = importlib.import_module(env_module_name)
51 | return env_module.local_env_settings()
52 | except:
53 | env_file = os.path.join(os.path.dirname(__file__), 'local.py')
54 |
55 | # Create a default file
56 | create_default_local_file()
57 | raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. '
58 | 'Then try to run again.'.format(env_file))
59 |
--------------------------------------------------------------------------------
/pytracking/evaluation/got10kdataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList
3 | import os
4 |
5 |
6 | def GOT10KDatasetTest():
7 | """ GOT-10k official test set"""
8 | return GOT10KDatasetClass('test').get_sequence_list()
9 |
10 |
11 | def GOT10KDatasetVal():
12 | """ GOT-10k official val set"""
13 | return GOT10KDatasetClass('val').get_sequence_list()
14 |
15 |
16 | def GOT10KDatasetLTRVal():
17 | """ GOT-10k val split from LTR (a subset of GOT-10k official train set)"""
18 | return GOT10KDatasetClass('ltrval').get_sequence_list()
19 |
20 |
21 | class GOT10KDatasetClass(BaseDataset):
22 | """ GOT-10k dataset.
23 |
24 | Publication:
25 | GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild
26 | Lianghua Huang, Xin Zhao, and Kaiqi Huang
27 | arXiv:1810.11981, 2018
28 | https://arxiv.org/pdf/1810.11981.pdf
29 |
30 | Download dataset from http://got-10k.aitestunion.com/downloads
31 | """
32 | def __init__(self, split):
33 | """
34 | args:
35 | split - Split to use. Can be i) 'test': official test set, ii) 'val': official val set, and iii) 'ltrval':
36 | a custom validation set, a subset of the official train set.
37 | """
38 | super().__init__()
39 | # Split can be test, val, or ltrval
40 | if split == 'test' or split == 'val':
41 | self.base_path = os.path.join(self.env_settings.got10k_path, split)
42 | else:
43 | self.base_path = os.path.join(self.env_settings.got10k_path, 'train')
44 |
45 | self.sequence_list = self._get_sequence_list(split)
46 | self.split = split
47 |
48 | def get_sequence_list(self):
49 | return SequenceList([self._construct_sequence(s) for s in self.sequence_list])
50 |
51 | def _construct_sequence(self, sequence_name):
52 | anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)
53 | try:
54 | ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)
55 | except:
56 | ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)
57 |
58 | frames_path = '{}/{}'.format(self.base_path, sequence_name)
59 | frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")]
60 | frame_list.sort(key=lambda f: int(f[:-4]))
61 | frames_list = [os.path.join(frames_path, frame) for frame in frame_list]
62 |
63 | return Sequence(sequence_name, frames_list, ground_truth_rect.reshape(-1, 4))
64 |
65 | def __len__(self):
66 | '''Overload this function in your evaluation. This should return number of sequences in the evaluation '''
67 | return len(self.sequence_list)
68 |
69 | def _get_sequence_list(self, split):
70 | with open('{}/list.txt'.format(self.base_path)) as f:
71 | sequence_list = f.read().splitlines()
72 |
73 | if split == 'ltrval':
74 | with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f:
75 | seq_ids = f.read().splitlines()
76 |
77 | sequence_list = [sequence_list[int(x)] for x in seq_ids]
78 | return sequence_list
79 |
--------------------------------------------------------------------------------
/pytracking/evaluation/local.py:
--------------------------------------------------------------------------------
1 | from pytracking.evaluation.environment import EnvSettings
2 |
3 | def local_env_settings():
4 | settings = EnvSettings()
5 |
6 | # Set your local paths here.
7 | settings.got10k_path = '/data/zhenglinyu/benchmarks/got10k/'
8 | settings.lasot_path = ''
9 | settings.network_path = '/home/zhenglinyu2/DCFST/pytracking/networks/' # Where tracking networks are stored.
10 | settings.nfs_path = '/data/zhenglinyu/benchmarks/nfs30/'
11 | settings.otb_path = '/data/zhenglinyu/benchmarks/otb100/'
12 | settings.results_path = '/home/zhenglinyu2/DCFST/pytracking/tracking_results/' # Where to store tracking results
13 | settings.tpl_path = ''
14 | settings.trackingnet_path = '/data/zhenglinyu/benchmarks/trackingnet/'
15 | settings.uav_path = ''
16 | settings.vot18_path = '/data/zhenglinyu/benchmarks/vot18/'
17 | settings.vot19_path = '/data/zhenglinyu/benchmarks/vot19/'
18 |
19 | return settings
20 |
21 |
--------------------------------------------------------------------------------
/pytracking/evaluation/running.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import multiprocessing
3 | import os
4 | from itertools import product
5 | from pytracking.evaluation import Sequence, Tracker
6 |
7 |
8 | def run_sequence(seq: Sequence, tracker: Tracker, debug=False):
9 | """Runs a tracker on a sequence."""
10 |
11 | base_results_path = '{}/{}'.format(tracker.results_dir, seq.name)
12 | results_path = '{}.txt'.format(base_results_path)
13 | times_path = '{}_time.txt'.format(base_results_path)
14 |
15 | if os.path.isfile(results_path) and not debug:
16 | return
17 |
18 | print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))
19 |
20 | if debug:
21 | tracked_bb, exec_times = tracker.run(seq, debug=debug)
22 | else:
23 | try:
24 | tracked_bb, exec_times = tracker.run(seq, debug=debug)
25 | except Exception as e:
26 | print(e)
27 | return
28 |
29 | tracked_bb = np.array(tracked_bb).astype(int)
30 | exec_times = np.array(exec_times).astype(float)
31 |
32 | print('FPS: {}'.format(len(exec_times) / exec_times.sum()))
33 | if not debug:
34 | np.savetxt(results_path, tracked_bb, delimiter='\t', fmt='%d')
35 | np.savetxt(times_path, exec_times, delimiter='\t', fmt='%f')
36 |
37 |
38 | def run_sequence_vot(seq: Sequence, tracker: Tracker, debug=False):
39 | """Runs a tracker on a sequence."""
40 |
41 | base_results_path = '{}/{}/{}/{}'.format(tracker.results_dir, 'baseline', seq.name, seq.name)
42 | results_path = '{}_001.txt'.format(base_results_path)
43 | times_path = '{}_time.txt'.format(base_results_path)
44 |
45 | if not os.path.exists('{}/{}'.format(tracker.results_dir, 'baseline')):
46 | os.mkdir('{}/{}'.format(tracker.results_dir, 'baseline'))
47 |
48 | if not os.path.exists('{}/{}/{}'.format(tracker.results_dir, 'baseline', seq.name)):
49 | os.mkdir('{}/{}/{}'.format(tracker.results_dir, 'baseline', seq.name))
50 |
51 | if os.path.isfile(results_path) and not debug:
52 | return
53 |
54 | print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))
55 |
56 | if debug:
57 | tracked_bb, exec_times = tracker.run(seq, debug=debug)
58 | else:
59 | try:
60 | tracked_bb, exec_times = tracker.run(seq, debug=debug)
61 | except Exception as e:
62 | print(e)
63 | return
64 |
65 | #tracked_bb = np.array(tracked_bb).astype(int)
66 | exec_times = np.array(exec_times).astype(float)
67 |
68 | with open(results_path, "w") as fin:
69 | for x in tracked_bb:
70 | if isinstance(x, int):
71 | fin.write("{:d}\n".format(x))
72 | else:
73 | p_bbox = x.copy()
74 | fin.write(','.join([str(i) for i in x]) + '\n')
75 |
76 | print('FPS: {}'.format(len(exec_times) / exec_times.sum()))
77 | if not debug:
78 | #np.savetxt(results_path, tracked_bb, delimiter='\t', fmt='%d')
79 | np.savetxt(times_path, exec_times, delimiter='\t', fmt='%f')
80 |
81 |
82 | def run_dataset(dataset, trackers, debug=False, threads=0):
83 | """Runs a list of trackers on a dataset.
84 | args:
85 | dataset: List of Sequence instances, forming a dataset.
86 | trackers: List of Tracker instances.
87 | debug: Debug level.
88 | threads: Number of threads to use (default 0).
89 | """
90 | if threads == 0:
91 | mode = 'sequential'
92 | else:
93 | mode = 'parallel'
94 |
95 | if mode == 'sequential':
96 | for seq in dataset:
97 | for tracker_info in trackers:
98 | run_sequence(seq, tracker_info, debug=debug) #except VOT
99 | #run_sequence_vot(seq, tracker_info, debug=debug) #VOT challenge
100 | elif mode == 'parallel':
101 | param_list = [(seq, tracker_info, debug) for seq, tracker_info in product(dataset, trackers)]
102 | with multiprocessing.Pool(processes=threads) as pool:
103 | pool.starmap(run_sequence, param_list)
104 | print('Done')
105 |
--------------------------------------------------------------------------------
/pytracking/evaluation/tracker.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 | import pickle
4 | from pytracking.evaluation.environment import env_settings
5 |
6 |
7 | class Tracker:
8 | """Wraps the tracker for evaluation and running purposes.
9 | args:
10 | name: Name of tracking method.
11 | parameter_name: Name of parameter file.
12 | run_id: The run id.
13 | """
14 |
15 | def __init__(self, name: str, parameter_name: str, run_id: int = None):
16 | self.name = name
17 | self.parameter_name = parameter_name
18 | self.run_id = run_id
19 |
20 | env = env_settings()
21 | if self.run_id is None:
22 | self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)
23 | else:
24 | self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)
25 | if not os.path.exists(self.results_dir):
26 | os.makedirs(self.results_dir)
27 |
28 | tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name))
29 |
30 | self.parameters = self.get_parameters()
31 | self.tracker_class = tracker_module.get_tracker_class()
32 |
33 | self.default_visualization = getattr(self.parameters, 'visualization', False)
34 | self.default_debug = getattr(self.parameters, 'debug', 0)
35 |
36 | def run(self, seq, visualization=None, debug=None):
37 | """Run tracker on sequence.
38 | args:
39 | seq: Sequence to run the tracker on.
40 | visualization: Set visualization flag (None means default value specified in the parameters).
41 | debug: Set debug level (None means default value specified in the parameters).
42 | """
43 | visualization_ = visualization
44 | debug_ = debug
45 | if debug is None:
46 | debug_ = self.default_debug
47 | if visualization is None:
48 | if debug is None:
49 | visualization_ = self.default_visualization
50 | else:
51 | visualization_ = True if debug else False
52 |
53 | self.parameters.visualization = visualization_
54 | self.parameters.debug = debug_
55 |
56 | tracker = self.tracker_class(self.parameters)
57 |
58 | output_bb, execution_times = tracker.track_sequence(seq) #except VOT
59 | #output_bb, execution_times = tracker.track_sequence_vot(seq) #VOT challenge
60 |
61 | self.parameters.free_memory()
62 |
63 | return output_bb, execution_times
64 |
65 | def run_webcam(self, debug=None):
66 | """Run the tracker with the webcam.
67 | args:
68 | debug: Debug level.
69 | """
70 |
71 | debug_ = debug
72 | if debug is None:
73 | debug_ = self.default_debug
74 | self.parameters.debug = debug_
75 |
76 | self.parameters.tracker_name = self.name
77 | self.parameters.param_name = self.parameter_name
78 | tracker = self.tracker_class(self.parameters)
79 |
80 | tracker.track_webcam()
81 |
82 | def get_parameters(self):
83 | """Get parameters."""
84 |
85 | parameter_file = '{}/parameters.pkl'.format(self.results_dir)
86 | if os.path.isfile(parameter_file):
87 | return pickle.load(open(parameter_file, 'rb'))
88 |
89 | param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name))
90 | params = param_module.parameters(self.run_id)
91 |
92 | '''
93 | if self.run_id is not None:
94 | pickle.dump(params, open(parameter_file, 'wb'))
95 | '''
96 |
97 | return params
98 |
99 |
100 |
--------------------------------------------------------------------------------
/pytracking/evaluation/vot18dataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList
3 |
4 |
5 | def VOT18Dataset():
6 | return VOT18DatasetClass().get_sequence_list()
7 |
8 |
9 | class VOT18DatasetClass(BaseDataset):
10 | """VOT2018 dataset
11 |
12 | Publication:
13 | The sixth Visual Object Tracking VOT2018 challenge results.
14 | Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,
15 | Goutam Bhat, Alan Lukezic et al.
16 | ECCV, 2018
17 | https://prints.vicos.si/publications/365
18 |
19 | Download the dataset from http://www.votchallenge.net/vot2018/dataset.html"""
20 | def __init__(self):
21 | super().__init__()
22 | self.base_path = self.env_settings.vot18_path
23 | self.sequence_list = self._get_sequence_list()
24 |
25 | def get_sequence_list(self):
26 | return SequenceList([self._construct_sequence(s) for s in self.sequence_list])
27 |
28 | def _construct_sequence(self, sequence_name):
29 | sequence_path = sequence_name
30 | nz = 8
31 | ext = 'jpg'
32 | start_frame = 1
33 |
34 | anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)
35 | try:
36 | ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)
37 | except:
38 | ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)
39 |
40 | end_frame = ground_truth_rect.shape[0]
41 |
42 | frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,
43 | sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext)
44 | for frame_num in range(start_frame, end_frame+1)]
45 |
46 | return Sequence(sequence_name, frames, ground_truth_rect)
47 |
48 | def __len__(self):
49 | return len(self.sequence_list)
50 |
51 | def _get_sequence_list(self):
52 | sequence_list= ['ants1',
53 | 'ants3',
54 | 'bag',
55 | 'ball1',
56 | 'ball2',
57 | 'basketball',
58 | 'birds1',
59 | 'blanket',
60 | 'bmx',
61 | 'bolt1',
62 | 'bolt2',
63 | 'book',
64 | 'butterfly',
65 | 'car1',
66 | 'conduction1',
67 | 'crabs1',
68 | 'crossing',
69 | 'dinosaur',
70 | 'drone_across',
71 | 'drone_flip',
72 | 'drone1',
73 | 'fernando',
74 | 'fish1',
75 | 'fish2',
76 | 'fish3',
77 | 'flamingo1',
78 | 'frisbee',
79 | 'girl',
80 | 'glove',
81 | 'godfather',
82 | 'graduate',
83 | 'gymnastics1',
84 | 'gymnastics2',
85 | 'gymnastics3',
86 | 'hand',
87 | 'handball1',
88 | 'handball2',
89 | 'helicopter',
90 | 'iceskater1',
91 | 'iceskater2',
92 | 'leaves',
93 | 'matrix',
94 | 'motocross1',
95 | 'motocross2',
96 | 'nature',
97 | 'pedestrian1',
98 | 'rabbit',
99 | 'racing',
100 | 'road',
101 | 'shaking',
102 | 'sheep',
103 | 'singer2',
104 | 'singer3',
105 | 'soccer1',
106 | 'soccer2',
107 | 'soldier',
108 | 'tiger',
109 | 'traffic',
110 | 'wiper',
111 | 'zebrafish1']
112 |
113 | return sequence_list
114 |
--------------------------------------------------------------------------------
/pytracking/evaluation/vot19dataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList
3 |
4 |
5 | def VOT19Dataset():
6 | return VOT19DatasetClass().get_sequence_list()
7 |
8 |
9 | class VOT19DatasetClass(BaseDataset):
10 | """VOT2018 dataset
11 |
12 | Publication:
13 | The sixth Visual Object Tracking VOT2018 challenge results.
14 | Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,
15 | Goutam Bhat, Alan Lukezic et al.
16 | ECCV, 2018
17 | https://prints.vicos.si/publications/365
18 |
19 | Download the dataset from http://www.votchallenge.net/vot2018/dataset.html"""
20 | def __init__(self):
21 | super().__init__()
22 | self.base_path = self.env_settings.vot19_path
23 | self.sequence_list = self._get_sequence_list()
24 |
25 | def get_sequence_list(self):
26 | return SequenceList([self._construct_sequence(s) for s in self.sequence_list])
27 |
28 | def _construct_sequence(self, sequence_name):
29 | sequence_path = sequence_name
30 | nz = 8
31 | ext = 'jpg'
32 | start_frame = 1
33 |
34 | anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)
35 | try:
36 | ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)
37 | except:
38 | ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)
39 |
40 | end_frame = ground_truth_rect.shape[0]
41 |
42 | frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,
43 | sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext)
44 | for frame_num in range(start_frame, end_frame+1)]
45 |
46 | # Convert gt
47 | if ground_truth_rect.shape[1] > 4:
48 | gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]]
49 | gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]]
50 |
51 | '''
52 | x1 = np.amin(gt_x_all, 1).reshape(-1,1)
53 | y1 = np.amin(gt_y_all, 1).reshape(-1,1)
54 | x2 = np.amax(gt_x_all, 1).reshape(-1,1)
55 | y2 = np.amax(gt_y_all, 1).reshape(-1,1)
56 |
57 | ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1)
58 | '''
59 | return Sequence(sequence_name, frames, ground_truth_rect)
60 |
61 | def __len__(self):
62 | return len(self.sequence_list)
63 |
64 | def _get_sequence_list(self):
65 | sequence_list= ['agility',
66 | 'book',
67 | 'zebrafish1',
68 | 'singer3',
69 | 'soccer1',
70 | 'road',
71 | 'shaking',
72 | 'tiger',
73 | 'gymnastics2',
74 | 'glove',
75 | 'dinosaur',
76 | 'helicopter',
77 | 'marathon',
78 | 'surfing',
79 | 'matrix',
80 | 'polo',
81 | 'fish1',
82 | 'crabs1',
83 | 'flamingo1',
84 | 'singer2',
85 | 'gymnastics3',
86 | 'godfather',
87 | 'hand2',
88 | 'handball1',
89 | 'fish2',
90 | 'pedestrian1',
91 | 'fernando',
92 | 'nature',
93 | 'dribble',
94 | 'car1',
95 | 'rowing',
96 | 'monkey',
97 | 'iceskater2',
98 | 'rabbit',
99 | 'butterfly',
100 | 'girl',
101 | 'graduate',
102 | 'wiper',
103 | 'motocross1',
104 | 'handball2',
105 | 'basketball',
106 | 'wheel',
107 | 'hand',
108 | 'frisbee',
109 | 'iceskater1',
110 | 'birds1',
111 | 'lamb',
112 | 'conduction1',
113 | 'soccer2',
114 | 'soldier',
115 | 'bolt1',
116 | 'ants1',
117 | 'leaves',
118 | 'rabbit2',
119 | 'ball2',
120 | 'drone_across',
121 | 'gymnastics1',
122 | 'drone_flip',
123 | 'drone1',
124 | 'ball3']
125 |
126 | return sequence_list
127 |
--------------------------------------------------------------------------------
/pytracking/features/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/augmentation.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/augmentation.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/deep.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/deep.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/extractor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/extractor.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/featurebase.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/featurebase.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/__pycache__/preprocessing.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/features/__pycache__/preprocessing.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/features/color.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from pytracking.features.featurebase import FeatureBase
3 |
4 |
5 | class RGB(FeatureBase):
6 | """RGB feature normalized to [-0.5, 0.5]."""
7 | def dim(self):
8 | return 3
9 |
10 | def stride(self):
11 | return self.pool_stride
12 |
13 | def extract(self, im: torch.Tensor):
14 | return im/255 - 0.5
15 |
16 |
17 | class Grayscale(FeatureBase):
18 | """Grayscale feature normalized to [-0.5, 0.5]."""
19 | def dim(self):
20 | return 1
21 |
22 | def stride(self):
23 | return self.pool_stride
24 |
25 | def extract(self, im: torch.Tensor):
26 | return torch.mean(im/255 - 0.5, 1, keepdim=True)
27 |
--------------------------------------------------------------------------------
/pytracking/features/extractor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from pytracking.features.preprocessing import sample_patch
3 | from pytracking import TensorList
4 |
5 |
6 | class ExtractorBase:
7 | """Base feature extractor class.
8 | args:
9 | features: List of features.
10 | """
11 | def __init__(self, features):
12 | self.features = features
13 |
14 | def initialize(self):
15 | for f in self.features:
16 | f.initialize()
17 |
18 | def free_memory(self):
19 | for f in self.features:
20 | f.free_memory()
21 |
22 |
23 | class SingleResolutionExtractor(ExtractorBase):
24 | """Single resolution feature extractor.
25 | args:
26 | features: List of features.
27 | """
28 | def __init__(self, features):
29 | super().__init__(features)
30 |
31 | self.feature_stride = self.features[0].stride()
32 | if isinstance(self.feature_stride, (list, TensorList)):
33 | self.feature_stride = self.feature_stride[0]
34 |
35 | def stride(self):
36 | return self.feature_stride
37 |
38 | def size(self, input_sz):
39 | return input_sz // self.stride()
40 |
41 | def extract(self, im, pos, scales, image_sz):
42 | if isinstance(scales, (int, float)):
43 | scales = [scales]
44 |
45 | # Get image patches
46 | im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])
47 |
48 | # Compute features
49 | feature_map = torch.cat(TensorList([f.get_feature(im_patches) for f in self.features]).unroll(), dim=1)
50 |
51 | return feature_map
52 |
53 |
54 | class MultiResolutionExtractor(ExtractorBase):
55 | """Multi-resolution feature extractor.
56 | args:
57 | features: List of features.
58 | """
59 | def __init__(self, features):
60 | super().__init__(features)
61 | self.is_color = None
62 |
63 | def stride(self):
64 | return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll())
65 |
66 | def size(self, input_sz):
67 | return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll()
68 |
69 | def dim(self):
70 | return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll()
71 |
72 | def get_fparams(self, name: str = None):
73 | if name is None:
74 | return [f.fparams for f in self.features if self._return_feature(f)]
75 | return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll()
76 |
77 | def get_attribute(self, name: str, ignore_missing: bool = False):
78 | if ignore_missing:
79 | return TensorList([getattr(f, name) for f in self.features if self._return_feature(f) and hasattr(f, name)])
80 | else:
81 | return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)])
82 |
83 | def get_unique_attribute(self, name: str):
84 | feat = None
85 | for f in self.features:
86 | if self._return_feature(f) and hasattr(f, name):
87 | if feat is not None:
88 | raise RuntimeError('The attribute was not unique.')
89 | feat = f
90 | if feat is None:
91 | raise RuntimeError('The attribute did not exist')
92 | return getattr(feat, name)
93 |
94 | def _return_feature(self, f):
95 | return self.is_color is None or self.is_color and f.use_for_color or not self.is_color and f.use_for_gray
96 |
97 | def set_is_color(self, is_color: bool):
98 | self.is_color = is_color
99 |
100 | def extract(self, im, pos, scales, image_sz):
101 | """Extract features.
102 | args:
103 | im: Image.
104 | pos: Center position for extraction.
105 | scales: Image scales to extract features from.
106 | image_sz: Size to resize the image samples to before extraction.
107 | """
108 | if isinstance(scales, (int, float)):
109 | scales = [scales]
110 |
111 | # Get image patches
112 | im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])
113 |
114 | # Compute features
115 | feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()
116 |
117 | return feature_map
118 |
119 | def extract_transformed(self, im, pos, scale, image_sz, transforms):
120 | """Extract features from a set of transformed image samples.
121 | args:
122 | im: Image.
123 | pos: Center position for extraction.
124 | scale: Image scale to extract features from.
125 | image_sz: Size to resize the image samples to before extraction.
126 | transforms: A set of image transforms to apply.
127 | """
128 |
129 | # Get image patche
130 | im_patch = sample_patch(im, pos, scale*image_sz, image_sz)
131 |
132 | # Apply transforms
133 | im_patches = torch.cat([T(im_patch) for T in transforms])
134 |
135 | # Compute features
136 | feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()
137 |
138 | return feature_map
139 |
--------------------------------------------------------------------------------
/pytracking/features/featurebase.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from pytracking import TensorList
4 |
5 |
6 | class FeatureBase:
7 | """Base feature class.
8 | args:
9 | fparams: Feature specific parameters.
10 | pool_stride: Amount of average pooling to apply do downsample the feature map.
11 | output_size: Alternatively, specify the output size of the feature map. Adaptive average pooling will be applied.
12 | normalize_power: The power exponent for the normalization. None means no normalization (default).
13 | use_for_color: Use this feature for color images.
14 | use_for_gray: Use this feature for grayscale images.
15 | """
16 | def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True):
17 | self.fparams = fparams
18 | self.pool_stride = 1 if pool_stride is None else pool_stride
19 | self.output_size = output_size
20 | self.normalize_power = normalize_power
21 | self.use_for_color = use_for_color
22 | self.use_for_gray = use_for_gray
23 |
24 | def initialize(self):
25 | pass
26 |
27 | def free_memory(self):
28 | pass
29 |
30 | def dim(self):
31 | raise NotImplementedError
32 |
33 | def stride(self):
34 | raise NotImplementedError
35 |
36 | def size(self, im_sz):
37 | if self.output_size is None:
38 | return im_sz // self.stride()
39 | if isinstance(im_sz, torch.Tensor):
40 | return torch.Tensor([self.output_size[0], self.output_size[1]])
41 | return self.output_size
42 |
43 | def extract(self, im):
44 | """Performs feature extraction."""
45 | raise NotImplementedError
46 |
47 | def get_feature(self, im: torch.Tensor):
48 | """Get the feature. Generally, call this function.
49 | args:
50 | im: image patch as a torch.Tensor.
51 | """
52 |
53 | # Return empty tensor if it should not be used
54 | is_color = im.shape[1] == 3
55 | if is_color and not self.use_for_color or not is_color and not self.use_for_gray:
56 | return torch.Tensor([])
57 |
58 | # Extract feature
59 | feat = self.extract(im)
60 |
61 | # Pool/downsample
62 | if self.output_size is not None:
63 | feat = F.adaptive_avg_pool2d(feat, self.output_size)
64 | elif self.pool_stride != 1:
65 | feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride)
66 |
67 | # Normalize
68 | if self.normalize_power is not None:
69 | feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /
70 | (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)
71 |
72 | return feat
73 |
74 |
75 | class MultiFeatureBase(FeatureBase):
76 | """Base class for features potentially having multiple feature blocks as output (like CNNs).
77 | See FeatureBase for more info.
78 | """
79 | def size(self, im_sz):
80 | if self.output_size is None:
81 | return TensorList([im_sz // s for s in self.stride()])
82 | if isinstance(im_sz, torch.Tensor):
83 | return TensorList([im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]]) for sz, s in zip(self.output_size, self.stride())])
84 |
85 | def get_feature(self, im: torch.Tensor):
86 | """Get the feature. Generally, call this function.
87 | args:
88 | im: image patch as a torch.Tensor.
89 | """
90 |
91 | # Return empty tensor if it should not be used
92 | is_color = im.shape[1] == 3
93 | if is_color and not self.use_for_color or not is_color and not self.use_for_gray:
94 | return torch.Tensor([])
95 |
96 | feat_list = self.extract(im)
97 |
98 | output_sz = [None]*len(feat_list) if self.output_size is None else self.output_size
99 |
100 | # Pool/downsample
101 | for i, (sz, s) in enumerate(zip(output_sz, self.pool_stride)):
102 | if sz is not None:
103 | feat_list[i] = F.adaptive_avg_pool2d(feat_list[i], sz)
104 | elif s != 1:
105 | feat_list[i] = F.avg_pool2d(feat_list[i], s, s)
106 |
107 | # Normalize
108 | if self.normalize_power is not None:
109 | for feat in feat_list:
110 | feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /
111 | (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)
112 |
113 | return feat_list
--------------------------------------------------------------------------------
/pytracking/features/preprocessing.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import numpy as np
4 |
5 |
6 | def numpy_to_torch(a: np.ndarray):
7 | return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)
8 |
9 |
10 | def torch_to_numpy(a: torch.Tensor):
11 | return a.squeeze(0).permute(1,2,0).numpy()
12 |
13 |
14 | def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None):
15 | """Sample an image patch.
16 |
17 | args:
18 | im: Image
19 | pos: center position of crop
20 | sample_sz: size to crop
21 | output_sz: size to resize to
22 | """
23 |
24 | # copy and convert
25 | posl = pos.long().clone()
26 |
27 | # Compute pre-downsampling factor
28 | if output_sz is not None:
29 | resize_factor = torch.min(sample_sz.float() / output_sz.float()).item()
30 | df = int(max(int(resize_factor - 0.1), 1))
31 | else:
32 | df = int(1)
33 |
34 | sz = sample_sz.float() / df # new size
35 |
36 | # Do downsampling
37 | if df > 1:
38 | os = posl % df # offset
39 | posl = (posl - os) / df # new position
40 | im2 = im[..., os[0].item()::df, os[1].item()::df] # downsample
41 | else:
42 | im2 = im
43 |
44 | # compute size to crop
45 | szl = torch.max(sz.round(), torch.Tensor([2])).long()
46 |
47 | # Extract top and bottom coordinates
48 | tl = posl - (szl - 1)/2
49 | br = posl + szl/2
50 |
51 | # Get image patch
52 | im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3] + 1, -tl[0].item(), br[0].item() - im2.shape[2] + 1), 'replicate')
53 |
54 | if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]):
55 | return im_patch
56 |
57 | # Resample
58 | im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear')
59 |
60 | return im_patch
--------------------------------------------------------------------------------
/pytracking/features/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from pytracking.features.featurebase import FeatureBase
3 |
4 |
5 | class Concatenate(FeatureBase):
6 | """A feature that concatenates other features.
7 | args:
8 | features: List of features to concatenate.
9 | """
10 | def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True):
11 | super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray)
12 | self.features = features
13 |
14 | self.input_stride = self.features[0].stride()
15 |
16 | for feat in self.features:
17 | if self.input_stride != feat.stride():
18 | raise ValueError('Strides for the features must be the same for a bultiresolution feature.')
19 |
20 | def dim(self):
21 | return sum([f.dim() for f in self.features])
22 |
23 | def stride(self):
24 | return self.pool_stride * self.input_stride
25 |
26 | def extract(self, im: torch.Tensor):
27 | return torch.cat([f.get_feature(im) for f in self.features], 1)
28 |
--------------------------------------------------------------------------------
/pytracking/libs/__init__.py:
--------------------------------------------------------------------------------
1 | from .tensorlist import TensorList
2 | from .tensordict import TensorDict
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/complex.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/complex.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/dcf.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/dcf.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/fourier.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/fourier.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/operation.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/operation.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/optimization.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/optimization.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/tensordict.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/tensordict.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/__pycache__/tensorlist.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/libs/__pycache__/tensorlist.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/libs/fourier.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from pytracking import complex, TensorList
4 | from pytracking.libs.tensorlist import tensor_operation
5 |
6 |
7 | @tensor_operation
8 | def rfftshift2(a: torch.Tensor):
9 | h = a.shape[2] + 2
10 | return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2)
11 |
12 |
13 | @tensor_operation
14 | def irfftshift2(a: torch.Tensor):
15 | mid = int((a.shape[2]-1)/2)
16 | return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2)
17 |
18 |
19 | @tensor_operation
20 | def cfft2(a):
21 | """Do FFT and center the low frequency component.
22 | Always produces odd (full) output sizes."""
23 |
24 | return rfftshift2(torch.rfft(a, 2))
25 |
26 |
27 | @tensor_operation
28 | def cifft2(a, signal_sizes=None):
29 | """Do inverse FFT corresponding to cfft2."""
30 |
31 | return torch.irfft(irfftshift2(a), 2, signal_sizes=signal_sizes)
32 |
33 |
34 | @tensor_operation
35 | def sample_fs(a: torch.Tensor, grid_sz: torch.Tensor = None, rescale = True):
36 | """Samples the Fourier series."""
37 |
38 | # Size of the fourier series
39 | sz = torch.Tensor([a.shape[2], 2*a.shape[3]-1]).float()
40 |
41 | # Default grid
42 | if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]:
43 | if rescale:
44 | return sz.prod().item() * cifft2(a)
45 | return cifft2(a)
46 |
47 | if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]:
48 | raise ValueError("Only grid sizes that are smaller than the Fourier series size are supported.")
49 |
50 | tot_pad = (grid_sz - sz).tolist()
51 | is_even = [s.item() % 2 == 0 for s in sz]
52 |
53 | # Compute paddings
54 | pad_top = int((tot_pad[0]+1)/2) if is_even[0] else int(tot_pad[0]/2)
55 | pad_bottom = int(tot_pad[0] - pad_top)
56 | pad_right = int((tot_pad[1]+1)/2)
57 |
58 | if rescale:
59 | return grid_sz.prod().item() * cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())
60 | else:
61 | return cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())
62 |
63 |
64 | def get_frequency_coord(sz, add_complex_dim = False, device='cpu'):
65 | """Frequency coordinates."""
66 |
67 | ky = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32, device=device).view(1,1,-1,1)
68 | kx = torch.arange(0, int(sz[1]/2+1), dtype=torch.float32, device=device).view(1,1,1,-1)
69 |
70 | if add_complex_dim:
71 | ky = ky.unsqueeze(-1)
72 | kx = kx.unsqueeze(-1)
73 |
74 | return ky, kx
75 |
76 |
77 | @tensor_operation
78 | def shift_fs(a: torch.Tensor, shift: torch.Tensor):
79 | """Shift a sample a in the Fourier domain.
80 | Params:
81 | a : The fourier coefficiens of the sample.
82 | shift : The shift to be performed normalized to the range [-pi, pi]."""
83 |
84 | if a.dim() != 5:
85 | raise ValueError('a must be the Fourier coefficients, a 5-dimensional tensor.')
86 |
87 | if shift[0] == 0 and shift[1] == 0:
88 | return a
89 |
90 | ky, kx = get_frequency_coord((a.shape[2], 2*a.shape[3]-1), device=a.device)
91 |
92 | return complex.mult(complex.mult(a, complex.exp_imag(shift[0].item()*ky)), complex.exp_imag(shift[1].item()*kx))
93 |
94 |
95 | def sum_fs(a: TensorList) -> torch.Tensor:
96 | """Sum a list of Fourier series expansions."""
97 |
98 | s = None
99 | mid = None
100 |
101 | for e in sorted(a, key=lambda elem: elem.shape[-3], reverse=True):
102 | if s is None:
103 | s = e.clone()
104 | mid = int((s.shape[-3] - 1) / 2)
105 | else:
106 | # Compute coordinates
107 | top = mid - int((e.shape[-3] - 1) / 2)
108 | bottom = mid + int(e.shape[-3] / 2) + 1
109 | right = e.shape[-2]
110 |
111 | # Add the data
112 | s[..., top:bottom, :right, :] += e
113 |
114 | return s
115 |
116 |
117 | def sum_fs12(a: TensorList) -> torch.Tensor:
118 | """Sum a list of Fourier series expansions."""
119 |
120 | s = None
121 | mid = None
122 |
123 | for e in sorted(a, key=lambda elem: elem.shape[0], reverse=True):
124 | if s is None:
125 | s = e.clone()
126 | mid = int((s.shape[0] - 1) / 2)
127 | else:
128 | # Compute coordinates
129 | top = mid - int((e.shape[0] - 1) / 2)
130 | bottom = mid + int(e.shape[0] / 2) + 1
131 | right = e.shape[1]
132 |
133 | # Add the data
134 | s[top:bottom, :right, ...] += e
135 |
136 | return s
137 |
138 |
139 | @tensor_operation
140 | def inner_prod_fs(a: torch.Tensor, b: torch.Tensor):
141 | if complex.is_complex(a) and complex.is_complex(b):
142 | return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0, :].reshape(-1) @ b[:, :, :, 0, :].reshape(-1)
143 | elif complex.is_real(a) and complex.is_real(b):
144 | return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0].reshape(-1) @ b[:, :, :, 0].reshape(-1)
145 | else:
146 | raise NotImplementedError('Not implemented for mixed real and complex.')
--------------------------------------------------------------------------------
/pytracking/libs/operation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from pytracking.libs.tensorlist import tensor_operation, TensorList
4 |
5 |
6 | @tensor_operation
7 | def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None):
8 | """Standard conv2d. Returns the input if weight=None."""
9 |
10 | if weight is None:
11 | return input
12 |
13 | ind = None
14 | if mode is not None:
15 | if padding != 0:
16 | raise ValueError('Cannot input both padding and mode.')
17 | if mode == 'same':
18 | padding = (weight.shape[2]//2, weight.shape[3]//2)
19 | if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0:
20 | ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None),
21 | slice(-1) if weight.shape[3] % 2 == 0 else slice(None))
22 | elif mode == 'valid':
23 | padding = (0, 0)
24 | elif mode == 'full':
25 | padding = (weight.shape[2]-1, weight.shape[3]-1)
26 | else:
27 | raise ValueError('Unknown mode for padding.')
28 |
29 | out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
30 | if ind is None:
31 | return out
32 | return out[:,:,ind[0],ind[1]]
33 |
34 |
35 | @tensor_operation
36 | def conv1x1(input: torch.Tensor, weight: torch.Tensor):
37 | """Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv."""
38 |
39 | if weight is None:
40 | return input
41 |
42 | return torch.matmul(weight.view(weight.shape[0], weight.shape[1]),
43 | input.view(input.shape[0], input.shape[1], -1)).view(input.shape[0], weight.shape[0], input.shape[2], input.shape[3])
44 |
--------------------------------------------------------------------------------
/pytracking/libs/tensordict.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | import torch
3 |
4 |
5 | class TensorDict(OrderedDict):
6 | """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality."""
7 |
8 | def concat(self, other):
9 | """Concatenates two dicts without copying internal data."""
10 | return TensorDict(self, **other)
11 |
12 | def copy(self):
13 | return TensorDict(super(TensorDict, self).copy())
14 |
15 | def __getattr__(self, name):
16 | if not hasattr(torch.Tensor, name):
17 | raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name))
18 |
19 | def apply_attr(*args, **kwargs):
20 | return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()})
21 | return apply_attr
22 |
23 | def attribute(self, attr: str, *args):
24 | return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()})
25 |
26 | def apply(self, fn, *args, **kwargs):
27 | return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()})
28 |
29 | @staticmethod
30 | def _iterable(a):
31 | return isinstance(a, (TensorDict, list))
32 |
33 |
--------------------------------------------------------------------------------
/pytracking/networks/network.txt:
--------------------------------------------------------------------------------
1 | place network
--------------------------------------------------------------------------------
/pytracking/parameter/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/pytracking/parameter/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/GOT10k.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # ------------------ CHANGED ------------------#
10 |
11 | # Output result images
12 | params.output_image = False
13 | params.output_image_path = './debug/result_image/'
14 |
15 | # Training parameters for locator
16 | params.regularization = 0.1 # Regularization term to train locator
17 | params.learning_rate = 0.016 # Learning rate to update locator features model
18 | params.train_skipping = 10 # How often to run training (every n-th frame)
19 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size
20 | params.target_not_found_threshold = 0.3 # Absolute score threshold to detect target missing
21 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
22 |
23 | # Hard negative samples mining
24 | params.hard_negative_mining = True # Perform hard negative samples mining
25 | params.hard_negative_threshold = 0.3 # Absolute threshold to find hard negative samples
26 | params.hard_negative_learning_rate = 0.15 # Learning rate if hard negative samples are detected
27 | params.hard_negative_distance_ratio = 0.15 # Detect hard negative samples range relative to image sample area
28 |
29 | # Windowing
30 | params.window_output = True # Perform windowing to output scores
31 | params.window_sigma_factor = 1.3 # Standard deviation of Gaussian output window relative to target size
32 | params.window_min = 0.5 # Min value of the output window
33 |
34 | # Scale update
35 | params.scale_damp = 0.3 # Linear interpolation coefficient for target scale update
36 |
37 | # Setup the tracking model
38 | params.model = deep.SBDTNet18(net_path='DCFST-18.pth')
39 |
40 | # GPU
41 | params.use_gpu = True
42 | params.device = 'cuda'
43 |
44 | # Patch sampling
45 | params.search_area_scale = 5 # Scale relative to target size
46 | params.img_sample_area = 288**2 # Area of the image sample
47 |
48 | # Locator proposals
49 | params.num_proposals_locator = 31**2 # Number of proposals in locator
50 |
51 | # Data augmentation
52 | params.augmentation = {'fliplr': True,
53 | 'rotate': [5, -5, 10, -10, 20, -20],
54 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
55 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
56 | params.use_augmentation = True # Whether to use augmentation
57 |
58 | # IoUNet
59 | params.iounet_k = 3 # Top-k average to estimate final box
60 | params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction
61 | params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes
62 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes
63 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio
64 | params.box_refinement_iter = 5 # Number of iterations for refining the boxes
65 | params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement
66 | params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay)
67 |
68 | # Scale bounds
69 | params.min_scale_factor = 0.2 # Min value of the scale bound
70 | params.max_scale_factor = 5.0 # Max value of the scale bound
71 |
72 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/NfS.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # ------------------ CHANGED ------------------#
10 |
11 | # Output result images
12 | params.output_image = False
13 | params.output_image_path = './debug/result_image/'
14 |
15 | # Training parameters for locator
16 | params.regularization = 0.1 # Regularization term to train locator
17 | params.learning_rate = 0.016 # Learning rate to update locator features model
18 | params.train_skipping = 10 # How often to run training (every n-th frame)
19 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size
20 | params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing
21 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
22 |
23 | # Hard negative samples mining
24 | params.hard_negative_mining = True # Perform hard negative samples mining
25 | params.hard_negative_threshold = 0.3 # Absolute threshold to find hard negative samples
26 | params.hard_negative_learning_rate = 0.125 # Learning rate if hard negative samples are detected
27 | params.hard_negative_distance_ratio = 0.15 # Detect hard negative samples range relative to image sample area
28 |
29 | # Windowing
30 | params.window_output = True # Perform windowing to output scores
31 | params.window_sigma_factor = 2.2 # Standard deviation of Gaussian output window relative to target size
32 | params.window_min = 0.8 # Min value of the output window
33 |
34 | # Scale update
35 | params.scale_damp = 0.3 # Linear interpolation coefficient for target scale update
36 |
37 | # Setup the tracking model
38 | params.model = deep.SBDTNet18(net_path='DCFST-18.pth')
39 |
40 | # GPU
41 | params.use_gpu = True
42 | params.device = 'cuda'
43 |
44 | # Patch sampling
45 | params.search_area_scale = 5 # Scale relative to target size
46 | params.img_sample_area = 288**2 # Area of the image sample
47 |
48 | # Locator proposals
49 | params.num_proposals_locator = 31**2 # Number of proposals in locator
50 |
51 | # Data augmentation
52 | params.augmentation = {'fliplr': True,
53 | 'rotate': [5, -5, 10, -10, 20, -20],
54 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
55 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
56 | params.use_augmentation = True # Whether to use augmentation
57 |
58 | # IoUNet
59 | params.iounet_k = 3 # Top-k average to estimate final box
60 | params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction
61 | params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes
62 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes
63 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio
64 | params.box_refinement_iter = 5 # Number of iterations for refining the boxes
65 | params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement
66 | params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay)
67 |
68 | # Scale bounds
69 | params.min_scale_factor = 0.2 # Min value of the scale bound
70 | params.max_scale_factor = 5.0 # Max value of the scale bound
71 |
72 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/OTB2015.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # ------------------ CHANGED ------------------#
10 |
11 | # Output result images
12 | params.output_image = False
13 | params.output_image_path = './debug/result_image/'
14 |
15 | # Training parameters for locator
16 | params.regularization = 0.1 # Regularization term to train locator
17 | params.learning_rate = 0.013 # Learning rate to update locator features model
18 | params.train_skipping = 10 # How often to run training (every n-th frame)
19 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size
20 | params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing
21 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
22 |
23 | # Hard negative samples mining
24 | params.hard_negative_mining = True # Perform hard negative samples mining
25 | params.hard_negative_threshold = 0.3 # Absolute threshold to find hard negative samples
26 | params.hard_negative_learning_rate = 0.125 # Learning rate if hard negative samples are detected
27 | params.hard_negative_distance_ratio = 0.15 # Detect hard negative samples range relative to image sample area
28 |
29 | # Windowing
30 | params.window_output = True # Perform windowing to output scores
31 | params.window_sigma_factor = 1.05 # Standard deviation of Gaussian output window relative to target size
32 | params.window_min = 0.5 # Min value of the output window
33 |
34 | # Scale update
35 | params.scale_damp = 0.7 # Linear interpolation coefficient for target scale update
36 |
37 | # Setup the tracking model
38 | params.model = deep.SBDTNet18(net_path='DCFST-18.pth')
39 |
40 | # GPU
41 | params.use_gpu = True
42 | params.device = 'cuda'
43 |
44 | # Patch sampling
45 | params.search_area_scale = 5 # Scale relative to target size
46 | params.img_sample_area = 288**2 # Area of the image sample
47 |
48 | # Locator proposals
49 | params.num_proposals_locator = 31**2 # Number of proposals in locator
50 |
51 | # Data augmentation
52 | params.augmentation = {'fliplr': True,
53 | 'rotate': [5, -5, 10, -10, 20, -20],
54 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
55 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
56 | params.use_augmentation = True # Whether to use augmentation
57 |
58 | # IoUNet
59 | params.iounet_k = 3 # Top-k average to estimate final box
60 | params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction
61 | params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes
62 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes
63 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio
64 | params.box_refinement_iter = 5 # Number of iterations for refining the boxes
65 | params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement
66 | params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay)
67 |
68 | # Scale bounds
69 | params.min_scale_factor = 0.2 # Min value of the scale bound
70 | params.max_scale_factor = 5.0 # Max value of the scale bound
71 |
72 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/TrackingNet.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # ------------------ CHANGED ------------------#
10 |
11 | # Output result images
12 | params.output_image = False
13 | params.output_image_path = './debug/result_image/'
14 |
15 | # Training parameters for locator
16 | params.regularization = 0.1 # Regularization term to train locator
17 | params.learning_rate = 0.01 # Learning rate to update locator features model
18 | params.train_skipping = 10 # How often to run training (every n-th frame)
19 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size
20 | params.target_not_found_threshold = 0.3 # Absolute score threshold to detect target missing
21 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
22 |
23 | # Hard negative samples mining
24 | params.hard_negative_mining = True # Perform hard negative samples mining
25 | params.hard_negative_threshold = 0.3 # Absolute threshold to find hard negative samples
26 | params.hard_negative_learning_rate = 0.1 # Learning rate if hard negative samples are detected
27 | params.hard_negative_distance_ratio = 0.15 # Detect hard negative samples range relative to image sample area
28 |
29 | # Windowing
30 | params.window_output = True # Perform windowing to output scores
31 | params.window_sigma_factor = 0.8 # Standard deviation of Gaussian output window relative to target size
32 | params.window_min = 0.6 # Min value of the output window
33 |
34 | # Scale update
35 | params.scale_damp = 0.5 # Linear interpolation coefficient for target scale update
36 |
37 | # Setup the tracking model
38 | params.model = deep.SBDTNet18(net_path='DCFST-18.pth')
39 |
40 | # GPU
41 | params.use_gpu = True
42 | params.device = 'cuda'
43 |
44 | # Patch sampling
45 | params.search_area_scale = 5 # Scale relative to target size
46 | params.img_sample_area = 288**2 # Area of the image sample
47 |
48 | # Locator proposals
49 | params.num_proposals_locator = 31**2 # Number of proposals in locator
50 |
51 | # Data augmentation
52 | params.augmentation = {'fliplr': True,
53 | 'rotate': [5, -5, 10, -10, 20, -20],
54 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
55 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
56 | params.use_augmentation = True # Whether to use augmentation
57 |
58 | # IoUNet
59 | params.iounet_k = 3 # Top-k average to estimate final box
60 | params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction
61 | params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes
62 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes
63 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio
64 | params.box_refinement_iter = 5 # Number of iterations for refining the boxes
65 | params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement
66 | params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay)
67 |
68 | # Scale bounds
69 | params.min_scale_factor = 0.2 # Min value of the scale bound
70 | params.max_scale_factor = 5.0 # Max value of the scale bound
71 |
72 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/VOT19.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # ------------------ CHANGED ------------------#
10 |
11 | # Output result images
12 | params.output_image = False
13 | params.output_image_path = './debug/result_image/'
14 |
15 | # Training parameters for locator
16 | params.regularization = 0.1 # Regularization term to train locator
17 | params.learning_rate = 0.018 # Learning rate to update locator features model
18 | params.train_skipping = 10 # How often to run training (every n-th frame)
19 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size
20 | params.target_not_found_threshold = 0.1 # Absolute score threshold to detect target missing
21 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
22 |
23 | # Hard negative samples mining
24 | params.hard_negative_mining = True # Perform hard negative samples mining
25 | params.hard_negative_threshold = 0.15 # Absolute threshold to find hard negative samples
26 | params.hard_negative_learning_rate = 0.16 # Learning rate if hard negative samples are detected
27 | params.hard_negative_distance_ratio = 0.15 # Detect hard negative samples range relative to image sample area
28 |
29 | # Windowing
30 | params.window_output = True # Perform windowing to output scores
31 | params.window_sigma_factor = 0.8 # Standard deviation of Gaussian output window relative to target size
32 | params.window_min = 0.4 # Min value of the output window
33 |
34 | # Scale update
35 | params.scale_damp = 0.5 # Linear interpolation coefficient for target scale update
36 |
37 | # Setup the tracking model
38 | params.model = deep.SBDTNet18(net_path='DCFST-18.pth')
39 |
40 | # GPU
41 | params.use_gpu = True
42 | params.device = 'cuda'
43 |
44 | # Patch sampling
45 | params.search_area_scale = 5 # Scale relative to target size
46 | params.img_sample_area = 288**2 # Area of the image sample
47 |
48 | # Locator proposals
49 | params.num_proposals_locator = 31**2 # Number of proposals in locator
50 |
51 | # Data augmentation
52 | params.augmentation = {'fliplr': True,
53 | 'rotate': [5, -5, 10, -10, 20, -20],
54 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
55 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
56 | params.use_augmentation = True # Whether to use augmentation
57 |
58 | # IoUNet
59 | params.iounet_k = 3 # Top-k average to estimate final box
60 | params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction
61 | params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes
62 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes
63 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio
64 | params.box_refinement_iter = 5 # Number of iterations for refining the boxes
65 | params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement
66 | params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay)
67 |
68 | # Scale bounds
69 | params.min_scale_factor = 0.166 # Min value of the scale bound
70 | params.max_scale_factor = 6.000 # Max value of the scale bound
71 |
72 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/GOT10k.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/GOT10k.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/NfS.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/NfS.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/OTB2015.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/OTB2015.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/TrackingNet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/TrackingNet.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/VOT19.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/VOT19.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/default.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/default.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/default2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/default2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/defaultGOT.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/defaultGOT.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/defaultNFS.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/defaultNFS.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/defaultNFS2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/defaultNFS2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt/__pycache__/norm_default2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt/__pycache__/norm_default2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/GOT10k.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # Parameters for debugging
10 | params.output_image = False
11 | params.output_image_path = './debug/result_image/'
12 |
13 | # Parameters for device and tracking model
14 | params.use_gpu = True
15 | params.device = 'cuda'
16 | params.model = deep.SBDTNet50(net_path='DCFST-50.pth')
17 |
18 | # Parameters for sampling search region
19 | params.search_padding = 5.0 # Sampling size relative to target size
20 | params.img_sample_area = 288**2 # Area of the search region image
21 |
22 | # Parameters for training locator
23 | params.regularization = 0.1 # Regularization term to train locator (train with 0.1)
24 | params.learning_rate = 0.02 # Learning rate to update locator
25 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size (train with 1/4)
26 | params.proposals_num = 31**2 # Number of uniform proposals in locator (train with 31**2)
27 | params.train_skipping = 10 # How often to run locator training (common: 10)
28 | params.target_not_found = 0.2 # Absolute score threshold to detect target missing
29 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
30 |
31 | # Parameters for hard negative samples mining
32 | params.hard_negative_mining = True # Whether to perform hard negative samples mining
33 | params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples (common: 0.5)
34 | params.hard_negative_learning_rate = 0.1 # Learning rate if hard negative samples are detected (small)
35 | params.hard_negative_distance_ratio = 0.75 # Scope to ignore the detection of hard negative samples relative to target size
36 |
37 | # Parameters for window
38 | params.window_output = True # Whether to perform window
39 | params.window_sigma_factor = 1.0 # Standard deviation of Gaussian window relative to target size (large)
40 | params.window_min_value = 0.5 # Min value of the output window (large)
41 |
42 | # Parameters for iounet refinement
43 | params.num_init_random_boxes = 19 # Number of random boxes for scale refinement (ATOM: 9)
44 | params.box_jitter_pos = 0.2 # How much to jitter the translation for random boxes (ATOM: 0.1)
45 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes (ATOM: 0.5)
46 | params.box_refinement_iter = 5 # Number of iterations for box refinement (ATOM: 5)
47 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio (ATOM: 6)
48 | params.iounet_k = 5 # Top-k average to estimate final box (ATOM: 3)
49 | params.scale_damp = 0.4 # Linear interpolation coefficient for target scale update (small)
50 |
51 | # Parameters for data augmentation
52 | params.augmentation = True # Whether to perform data augmentation
53 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
54 | params.augmentation_method = {'fliplr': True,
55 | 'rotate': [5, -5, 10, -10, 20, -20],
56 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
57 |
58 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/TrackingNet.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # Parameters for debugging
10 | params.output_image = False
11 | params.output_image_path = './debug/result_image/'
12 |
13 | # Parameters for device and tracking model
14 | params.use_gpu = True
15 | params.device = 'cuda'
16 | params.model = deep.SBDTNet50(net_path='DCFST-50.pth')
17 |
18 | # Parameters for sampling search region
19 | params.search_padding = 5.0 # Sampling size relative to target size
20 | params.img_sample_area = 288**2 # Area of the search region image
21 |
22 | # Parameters for training locator
23 | params.regularization = 0.1 # Regularization term to train locator (train with 0.1)
24 | params.learning_rate = 0.016 # Learning rate to update locator
25 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size (train with 1/4)
26 | params.proposals_num = 31**2 # Number of uniform proposals in locator (train with 31**2)
27 | params.train_skipping = 10 # How often to run locator training (common: 10)
28 | params.target_not_found = 0.2 # Absolute score threshold to detect target missing (small)
29 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
30 |
31 | # Parameters for hard negative samples mining
32 | params.hard_negative_mining = True # Whether to perform hard negative samples mining
33 | params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples (common: 0.5)
34 | params.hard_negative_learning_rate = 0.01 # Learning rate if hard negative samples are detected (small)
35 | params.hard_negative_distance_ratio = 0.75 # Scope to ignore the detection of hard negative samples relative to target size
36 |
37 | # Parameters for window
38 | params.window_output = True # Whether to perform window
39 | params.window_sigma_factor = 1.2 # Standard deviation of Gaussian window relative to target size (large)
40 | params.window_min_value = 0.5 # Min value of the output window (large)
41 |
42 | # Parameters for iounet refinement
43 | params.num_init_random_boxes = 19 # Number of random boxes for scale refinement (ATOM: 9)
44 | params.box_jitter_pos = 0.2 # How much to jitter the translation for random boxes (ATOM: 0.1)
45 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes (ATOM: 0.5)
46 | params.box_refinement_iter = 5 # Number of iterations for box refinement (ATOM: 5)
47 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio (ATOM: 6)
48 | params.iounet_k = 5 # Top-k average to estimate final box (ATOM: 3)
49 | params.scale_damp = 0.3 # Linear interpolation coefficient for target scale update (small)
50 |
51 | # Parameters for data augmentation
52 | params.augmentation = True # Whether to perform data augmentation
53 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
54 | params.augmentation_method = {'fliplr': True,
55 | 'rotate': [5, -5, 10, -10, 20, -20],
56 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
57 |
58 | return params
59 |
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/VOT18.py:
--------------------------------------------------------------------------------
1 | from pytracking.features import deep
2 | from pytracking.utils import TrackerParams
3 |
4 | def parameters(ID=None):
5 |
6 | # Tracker specific parameters
7 | params = TrackerParams()
8 |
9 | # Parameters for debugging
10 | params.output_image = False
11 | params.output_image_path = './debug/result_image/'
12 |
13 | # Parameters for device and tracking model
14 | params.use_gpu = True
15 | params.device = 'cuda'
16 | params.model = deep.SBDTNet50(net_path='DCFST-50.pth')
17 |
18 | # Parameters for sampling search region
19 | params.search_padding = 5.0 # Sampling size relative to target size
20 | params.img_sample_area = 288**2 # Area of the search region image
21 |
22 | # Parameters for training locator
23 | params.regularization = 0.1 # Regularization term to train locator (train with 0.1)
24 | params.learning_rate = 0.018 # Learning rate to update locator
25 | params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size (train with 1/4)
26 | params.proposals_num = 31**2 # Number of uniform proposals in locator (train with 31**2)
27 | params.train_skipping = 10 # How often to run locator training (common: 10)
28 | params.target_not_found = 0.0 # Absolute score threshold to detect target missing (small)
29 | params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples
30 |
31 | # Parameters for hard negative samples mining
32 | params.hard_negative_mining = True # Whether to perform hard negative samples mining
33 | params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples (common: 0.5)
34 | params.hard_negative_learning_rate = 0.22 # Learning rate if hard negative samples are detected (small)
35 | params.hard_negative_distance_ratio = 0.75 # Scope to ignore the detection of hard negative samples relative to target size
36 |
37 | # Parameters for window
38 | params.window_output = True # Whether to perform window
39 | params.window_sigma_factor = 0.9 # Standard deviation of Gaussian window relative to target size (large)
40 | params.window_min_value = 0.3 # Min value of the output window (large)
41 |
42 | # Parameters for iounet refinement
43 | params.num_init_random_boxes = 19 # Number of random boxes for scale refinement (ATOM: 9)
44 | params.box_jitter_pos = 0.2 # How much to jitter the translation for random boxes (ATOM: 0.1)
45 | params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes (ATOM: 0.5)
46 | params.box_refinement_iter = 5 # Number of iterations for box refinement (ATOM: 5)
47 | params.maximal_aspect_ratio = 6 # Limit on the aspect ratio (ATOM: 6)
48 | params.iounet_k = 5 # Top-k average to estimate final box (ATOM: 3)
49 | params.scale_damp = 0.6 # Linear interpolation coefficient for target scale update (small)
50 |
51 | # Parameters for data augmentation
52 | params.augmentation = True # Whether to perform data augmentation
53 | params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation
54 | params.augmentation_method = {'fliplr': True,
55 | 'rotate': [5, -5, 10, -10, 20, -20],
56 | 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)]}
57 |
58 | return params
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/GOT10k.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/GOT10k.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/TrackingNet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/TrackingNet.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/VOT18.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/VOT18.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/default.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/default.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/default2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/default2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/defaultGOT.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/defaultGOT.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/defaultNFS.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/defaultNFS.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/defaultNFS2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/defaultNFS2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/defaultTrackingNet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/defaultTrackingNet.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/parameter/sbdt50/__pycache__/norm_default2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/parameter/sbdt50/__pycache__/norm_default2.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/run_tracker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 |
5 | env_path = os.path.join(os.path.dirname(__file__), '..')
6 | if env_path not in sys.path:
7 | sys.path.append(env_path)
8 |
9 | from pytracking.evaluation.otbdataset import OTBDataset
10 | from pytracking.evaluation.nfsdataset import NFSDataset
11 | from pytracking.evaluation.uavdataset import UAVDataset
12 | from pytracking.evaluation.tpldataset import TPLDataset
13 | from pytracking.evaluation.lasotdataset import LaSOTDataset
14 | from pytracking.evaluation.trackingnetdataset import TrackingNetDataset
15 | from pytracking.evaluation.got10kdataset import GOT10KDatasetTest, GOT10KDatasetVal, GOT10KDatasetLTRVal
16 | from pytracking.evaluation.running import run_dataset
17 | from pytracking.evaluation import Tracker
18 | from pytracking.evaluation.vot18dataset import VOT18Dataset
19 | from pytracking.evaluation.vot19dataset import VOT19Dataset
20 |
21 | def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0):
22 | """Run tracker on sequence or dataset.
23 | args:
24 | tracker_name: Name of tracking method.
25 | tracker_param: Name of parameter file.
26 | run_id: The run id.
27 | dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).
28 | sequence: Sequence number or name.
29 | debug: Debug level.
30 | threads: Number of threads.
31 | """
32 | if dataset_name == 'otb':
33 | dataset = OTBDataset()
34 | elif dataset_name == 'nfs':
35 | dataset = NFSDataset()
36 | elif dataset_name == 'uav':
37 | dataset = UAVDataset()
38 | elif dataset_name == 'tpl':
39 | dataset = TPLDataset()
40 | elif dataset_name == 'vot18':
41 | dataset = VOT18Dataset()
42 | elif dataset_name == 'vot19':
43 | dataset = VOT19Dataset()
44 | elif dataset_name == 'tn':
45 | dataset = TrackingNetDataset()
46 | elif dataset_name == 'gott':
47 | dataset = GOT10KDatasetTest()
48 | elif dataset_name == 'gotv':
49 | dataset = GOT10KDatasetVal()
50 | elif dataset_name == 'gotlv':
51 | dataset = GOT10KDatasetLTRVal()
52 | elif dataset_name == 'lasot':
53 | dataset = LaSOTDataset()
54 | else:
55 | raise ValueError('Unknown dataset name')
56 |
57 | if sequence is not None:
58 | dataset = [dataset[sequence]]
59 |
60 | trackers = [Tracker(tracker_name, tracker_param, run_id)]
61 |
62 | run_dataset(dataset, trackers, debug, threads)
63 |
64 |
65 | def main():
66 | parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')
67 |
68 | parser.add_argument('tracker_name', type=str, help='Name of tracking method.')
69 | parser.add_argument('tracker_param', type=str, help='Name of parameter file.')
70 | parser.add_argument('--runid', type=int, default=None, help='The run id.')
71 | parser.add_argument('--dataset', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')
72 | parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.')
73 | parser.add_argument('--debug', type=int, default=0, help='Debug level.')
74 | parser.add_argument('--threads', type=int, default=0, help='Number of threads.')
75 |
76 | args = parser.parse_args()
77 |
78 | run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset, args.sequence, args.debug, args.threads)
79 |
80 |
81 | if __name__ == '__main__':
82 | main()
83 |
--------------------------------------------------------------------------------
/pytracking/run_webcam.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 |
5 | env_path = os.path.join(os.path.dirname(__file__), '..')
6 | if env_path not in sys.path:
7 | sys.path.append(env_path)
8 |
9 | from pytracking.evaluation import Tracker
10 |
11 |
12 | def run_webcam(tracker_name, tracker_param, debug=None):
13 | """Run the tracker on your webcam.
14 | args:
15 | tracker_name: Name of tracking method.
16 | tracker_param: Name of parameter file.
17 | debug: Debug level.
18 | """
19 | tracker = Tracker(tracker_name, tracker_param)
20 | tracker.run_webcam(debug)
21 |
22 |
23 | def main():
24 | parser = argparse.ArgumentParser(description='Run the tracker on your webcam.')
25 | parser.add_argument('tracker_name', type=str, help='Name of tracking method.')
26 | parser.add_argument('tracker_param', type=str, help='Name of parameter file.')
27 | parser.add_argument('--debug', type=int, default=0, help='Debug level.')
28 |
29 | args = parser.parse_args()
30 |
31 | run_webcam(args.tracker_name, args.tracker_param, args.debug)
32 |
33 |
34 | if __name__ == '__main__':
35 | main()
36 |
--------------------------------------------------------------------------------
/pytracking/tracker/__init__.py:
--------------------------------------------------------------------------------
1 | """ empty """
--------------------------------------------------------------------------------
/pytracking/tracker/base/__init__.py:
--------------------------------------------------------------------------------
1 | from .basetracker import BaseTracker
--------------------------------------------------------------------------------
/pytracking/tracker/base/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/base/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracker/base/__pycache__/basetracker.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/base/__pycache__/basetracker.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt/__init__.py:
--------------------------------------------------------------------------------
1 | from .sbdt import SBDT
2 |
3 | def get_tracker_class():
4 | return SBDT
5 |
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/sbdt/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt/__pycache__/sbdt.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/sbdt/__pycache__/sbdt.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt50/__init__.py:
--------------------------------------------------------------------------------
1 | from .sbdt import SBDT
2 |
3 | def get_tracker_class():
4 | return SBDT
5 |
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt50/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/sbdt50/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracker/sbdt50/__pycache__/sbdt.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/tracker/sbdt50/__pycache__/sbdt.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/tracking_results/save.txt:
--------------------------------------------------------------------------------
1 | save tracking results
--------------------------------------------------------------------------------
/pytracking/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # from .evaluation import *
2 | from .params import *
--------------------------------------------------------------------------------
/pytracking/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/utils/__pycache__/params.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/utils/__pycache__/params.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/utils/__pycache__/plotting.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CASIA-IVA-Lab/DCFST/ca881ba3aae1ce00e4a7a6db01d99e5f6efff68b/pytracking/utils/__pycache__/plotting.cpython-37.pyc
--------------------------------------------------------------------------------
/pytracking/utils/gdrive_download:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # The script taken from https://www.matthuisman.nz/2019/01/download-google-drive-files-wget-curl.html
4 |
5 | url=$1
6 | filename=$2
7 |
8 | [ -z "$url" ] && echo A URL or ID is required first argument && exit 1
9 |
10 | fileid=""
11 | declare -a patterns=("s/.*\/file\/d\/\(.*\)\/.*/\1/p" "s/.*id\=\(.*\)/\1/p" "s/\(.*\)/\1/p")
12 | for i in "${patterns[@]}"
13 | do
14 | fileid=$(echo $url | sed -n $i)
15 | [ ! -z "$fileid" ] && break
16 | done
17 |
18 | [ -z "$fileid" ] && echo Could not find Google ID && exit 1
19 |
20 | echo File ID: $fileid
21 |
22 | tmp_file="$filename.$$.file"
23 | tmp_cookies="$filename.$$.cookies"
24 | tmp_headers="$filename.$$.headers"
25 |
26 | url='https://docs.google.com/uc?export=download&id='$fileid
27 | echo Downloading: "$url > $tmp_file"
28 | wget --save-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file"
29 |
30 | if [[ ! $(find "$tmp_file" -type f -size +10000c 2>/dev/null) ]]; then
31 | confirm=$(cat "$tmp_file" | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1/p')
32 | fi
33 |
34 | if [ ! -z "$confirm" ]; then
35 | url='https://docs.google.com/uc?export=download&id='$fileid'&confirm='$confirm
36 | echo Downloading: "$url > $tmp_file"
37 | wget --load-cookies "$tmp_cookies" -q -S -O - $url 2> "$tmp_headers" 1> "$tmp_file"
38 | fi
39 |
40 | [ -z "$filename" ] && filename=$(cat "$tmp_headers" | sed -rn 's/.*filename=\"(.*)\".*/\1/p')
41 | [ -z "$filename" ] && filename="google_drive.file"
42 |
43 | echo Moving: "$tmp_file > $filename"
44 |
45 | mv "$tmp_file" "$filename"
46 |
47 | rm -f "$tmp_cookies" "$tmp_headers"
48 |
49 | echo Saved: "$filename"
50 | echo DONE!
51 |
52 | exit 0
53 |
--------------------------------------------------------------------------------
/pytracking/utils/params.py:
--------------------------------------------------------------------------------
1 | from pytracking import TensorList
2 | import random
3 |
4 |
5 | class TrackerParams:
6 | """Class for tracker parameters."""
7 | def free_memory(self):
8 | for a in dir(self):
9 | if not a.startswith('__') and hasattr(getattr(self, a), 'free_memory'):
10 | getattr(self, a).free_memory()
11 |
12 |
13 | class FeatureParams:
14 | """Class for feature specific parameters"""
15 | def __init__(self, *args, **kwargs):
16 | if len(args) > 0:
17 | raise ValueError
18 |
19 | for name, val in kwargs.items():
20 | if isinstance(val, list):
21 | setattr(self, name, TensorList(val))
22 | else:
23 | setattr(self, name, val)
24 |
25 |
26 | def Choice(*args):
27 | """Can be used to sample random parameter values."""
28 | return random.choice(args)
29 |
--------------------------------------------------------------------------------
/pytracking/utils/plotting.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | matplotlib.use('TkAgg')
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import torch
6 |
7 |
8 | def show_tensor(a: torch.Tensor, fig_num = None, title = None):
9 | """Display a 2D tensor.
10 | args:
11 | fig_num: Figure number.
12 | title: Title of figure.
13 | """
14 | a_np = a.squeeze().cpu().clone().detach().numpy()
15 | if a_np.ndim == 3:
16 | a_np = np.transpose(a_np, (1, 2, 0))
17 | plt.figure(fig_num)
18 | plt.tight_layout()
19 | plt.cla()
20 | plt.imshow(a_np)
21 | plt.axis('off')
22 | plt.axis('equal')
23 | if title is not None:
24 | plt.title(title)
25 | plt.draw()
26 | plt.pause(0.001)
27 |
28 |
29 | def plot_graph(a: torch.Tensor, fig_num = None, title = None):
30 | """Plot graph. Data is a 1D tensor.
31 | args:
32 | fig_num: Figure number.
33 | title: Title of figure.
34 | """
35 | a_np = a.squeeze().cpu().clone().detach().numpy()
36 | if a_np.ndim > 1:
37 | raise ValueError
38 | plt.figure(fig_num)
39 | # plt.tight_layout()
40 | plt.cla()
41 | plt.plot(a_np)
42 | if title is not None:
43 | plt.title(title)
44 | plt.draw()
45 | plt.pause(0.001)
46 |
--------------------------------------------------------------------------------