├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── examples
├── quick_examples.ipynb
└── quick_examples.py
├── got10k
├── __init__.py
├── datasets
│ ├── __init__.py
│ ├── dtb70.py
│ ├── got10k.py
│ ├── lasot.json
│ ├── lasot.py
│ ├── nfs.py
│ ├── otb.py
│ ├── tcolor128.py
│ ├── trackingnet.py
│ ├── uav123.json
│ ├── uav123.py
│ ├── vid.py
│ └── vot.py
├── experiments
│ ├── __init__.py
│ ├── dtb70.py
│ ├── got10k.py
│ ├── lasot.py
│ ├── nfs.py
│ ├── otb.py
│ ├── tcolor128.py
│ ├── trackingnet.py
│ ├── uav123.py
│ └── vot.py
├── trackers
│ ├── __init__.py
│ └── identity_tracker.py
└── utils
│ ├── __init__.py
│ ├── ioutils.py
│ ├── metrics.py
│ └── viz.py
├── requirements.txt
├── resources
└── sample_batch_run.jpg
├── setup.cfg
├── setup.py
└── tests
├── test_datasets.py
├── test_experiments.py
├── test_trackers.py
└── test_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .*
2 | *.pyc
3 | __pycache__/
4 | data
5 | data/
6 | cache/
7 | results/
8 | reports/
9 | profiles/
10 | venv/
11 | build/
12 | dist/
13 | *.egg-info
14 | !.gitignore
15 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include got10k/datasets/uav123.json
2 | include got10k/datasets/lasot.json
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GOT-10k Python Toolkit
2 |
3 | > UPDATE:
4 | > All common tracking datasets (GOT-10k, OTB, VOT, UAV, TColor, DTB, NfS, LaSOT and TrackingNet) are supported.
5 | > Support VOT2019 (ST/LT/RGBD/RGBT) downloading.
6 | > Fix the randomness in ImageNet-VID ([issue #13](https://github.com/got-10k/toolkit/issues/13)).
7 |
8 | _Run experimenets over common tracking benchmarks (code from [siamfc](https://github.com/got-10k/siamfc/blob/master/test.py)):_
9 |
10 |
11 |
12 | This repository contains the official python toolkit for running experiments and evaluate performance on [GOT-10k](http://got-10k.aitestunion.com/) benchmark. The code is written in pure python and is compile-free. Although we support both python2 and python3, we recommend python3 for better performance.
13 |
14 | For convenience, the toolkit also provides unofficial implementation of dataset interfaces and tracking pipelines for [OTB (2013/2015)](http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html), [VOT (2013~2018)](http://votchallenge.net), [DTB70](https://github.com/flyers/drone-tracking), [TColor128](http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html), [NfS (30/240 fps)](http://ci2cv.net/nfs/index.html), [UAV (123/20L)](https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx), [LaSOT](https://cis.temple.edu/lasot/) and [TrackingNet](https://tracking-net.org/) benchmarks. It also offers interfaces for [ILSVRC VID](https://image-net.org/challenges/LSVRC/2015/#vid) and [YouTube-BoundingBox](https://research.google.com/youtube-bb/) (comming soon!) datasets.
15 |
16 | [GOT-10k](http://got-10k.aitestunion.com/) is a large, high-diversity and one-shot database for training and evaluating generic purposed visual trackers. If you use the GOT-10k database or toolkits for a research publication, please consider citing:
17 |
18 | ```Bibtex
19 | @ARTICLE{8922619,
20 | author={Huang, Lianghua and Zhao, Xin and Huang, Kaiqi},
21 | journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
22 | title={GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild},
23 | year={2021},
24 | volume={43},
25 | number={5},
26 | pages={1562-1577},
27 | doi={10.1109/TPAMI.2019.2957464}}
28 | ```
29 |
30 | \[[Project](http://got-10k.aitestunion.com/)\]\[[PDF](https://arxiv.org/abs/1810.11981)\]\[[Bibtex](http://got-10k.aitestunion.com/bibtex)\]
31 |
32 | ## Table of Contents
33 |
34 | * [Installation](#installation)
35 | * [Quick Start: A Concise Example](#quick-start-a-concise-example)
36 | * [Quick Start: Jupyter Notebook for Off-the-Shelf Usage](#quick-start-jupyter-notebook-for-off-the-shelf-usage)
37 | * [How to Define a Tracker?](#how-to-define-a-tracker)
38 | * [How to Run Experiments on GOT-10k?](#how-to-run-experiments-on-got-10k)
39 | * [How to Evaluate Performance?](#how-to-evaluate-performance)
40 | * [How to Plot Success Curves?](#how-to-plot-success-curves)
41 | * [How to Loop Over GOT-10k Dataset?](#how-to-loop-over-got-10k-dataset)
42 | * [Issues](#issues)
43 | * [Contributors](#contributors)
44 |
45 | ### Installation
46 |
47 | Install the toolkit using `pip` (recommended):
48 |
49 | ```bash
50 | pip install --upgrade got10k
51 | ```
52 |
53 | Stay up-to-date:
54 |
55 | ```bash
56 | pip install --upgrade git+https://github.com/got-10k/toolkit.git@master
57 | ```
58 |
59 | Or, alternatively, clone the repository and install dependencies:
60 |
61 | ```
62 | git clone https://github.com/got-10k/toolkit.git
63 | cd toolkit
64 | pip install -r requirements.txt
65 | ```
66 |
67 | Then directly copy the `got10k` folder to your workspace to use it.
68 |
69 | ### Quick Start: A Concise Example
70 |
71 | Here is a simple example on how to use the toolkit to define a tracker, run experiments on GOT-10k and evaluate performance.
72 |
73 | ```Python
74 | from got10k.trackers import Tracker
75 | from got10k.experiments import ExperimentGOT10k
76 |
77 | class IdentityTracker(Tracker):
78 | def __init__(self):
79 | super(IdentityTracker, self).__init__(name='IdentityTracker')
80 |
81 | def init(self, image, box):
82 | self.box = box
83 |
84 | def update(self, image):
85 | return self.box
86 |
87 | if __name__ == '__main__':
88 | # setup tracker
89 | tracker = IdentityTracker()
90 |
91 | # run experiments on GOT-10k (validation subset)
92 | experiment = ExperimentGOT10k('data/GOT-10k', subset='val')
93 | experiment.run(tracker, visualize=True)
94 |
95 | # report performance
96 | experiment.report([tracker.name])
97 | ```
98 |
99 | To run experiments on [OTB](http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html), [VOT](http://votchallenge.net) or other benchmarks, simply change `ExperimentGOT10k`, e.g., to `ExperimentOTB` or `ExperimentVOT`, and `root_dir` to their corresponding paths for this purpose.
100 |
101 | ### Quick Start: Jupyter Notebook for Off-the-Shelf Usage
102 |
103 | Open [quick_examples.ipynb](https://github.com/got-10k/toolkit/tree/master/examples/quick_examples.ipynb) in [Jupyter Notebook](http://jupyter.org/) to see more examples on toolkit usage.
104 |
105 | ### How to Define a Tracker?
106 |
107 | To define a tracker using the toolkit, simply inherit and override `init` and `update` methods from the `Tracker` class. Here is a simple example:
108 |
109 | ```Python
110 | from got10k.trackers import Tracker
111 |
112 | class IdentityTracker(Tracker):
113 | def __init__(self):
114 | super(IdentityTracker, self).__init__(
115 | name='IdentityTracker', # tracker name
116 | is_deterministic=True # stochastic (False) or deterministic (True)
117 | )
118 |
119 | def init(self, image, box):
120 | self.box = box
121 |
122 | def update(self, image):
123 | return self.box
124 | ```
125 |
126 | ### How to Run Experiments on GOT-10k?
127 |
128 | Instantiate an `ExperimentGOT10k` object, and leave all experiment pipelines to its `run` method:
129 |
130 | ```Python
131 | from got10k.experiments import ExperimentGOT10k
132 |
133 | # ... tracker definition ...
134 |
135 | # instantiate a tracker
136 | tracker = IdentityTracker()
137 |
138 | # setup experiment (validation subset)
139 | experiment = ExperimentGOT10k(
140 | root_dir='data/GOT-10k', # GOT-10k's root directory
141 | subset='val', # 'train' | 'val' | 'test'
142 | result_dir='results', # where to store tracking results
143 | report_dir='reports' # where to store evaluation reports
144 | )
145 | experiment.run(tracker, visualize=True)
146 | ```
147 |
148 | The tracking results will be stored in `result_dir`.
149 |
150 | ### How to Evaluate Performance?
151 |
152 | Use the `report` method of `ExperimentGOT10k` for this purpose:
153 |
154 | ```Python
155 | # ... run experiments on GOT-10k ...
156 |
157 | # report tracking performance
158 | experiment.report([tracker.name])
159 | ```
160 |
161 | When evaluated on the __validation subset__, the scores and curves will be directly generated in `report_dir`.
162 |
163 | However, when evaluated on the __test subset__, since all groundtruths are withholded, you will have to submit your results to the [evaluation server](http://got-10k.aitestunion.com/submit_instructions) for evaluation. The `report` function will generate a `.zip` file which can be directly uploaded for submission. For more instructions, see [submission instruction](http://got-10k.aitestunion.com/submit_instructions).
164 |
165 | See public evaluation results on [GOT-10k's leaderboard](http://got-10k.aitestunion.com/leaderboard).
166 |
167 | ## How to Plot Success Curves?
168 |
169 | Assume that a list of all performance files (JSON files) are stored in `report_files`, here is an example showing how to plot success curves:
170 |
171 | ```Python
172 | from got10k.experiments import ExperimentGOT10k
173 |
174 | report_files = ['reports/GOT-10k/performance_25_entries.json']
175 | tracker_names = ['SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']
176 |
177 | # setup experiment and plot curves
178 | experiment = ExperimentGOT10k('data/GOT-10k', subset='test')
179 | experiment.plot_curves(report_files, tracker_names)
180 | ```
181 |
182 | The report file of 25 baseline entries can be downloaded from the [Downloads page](http://got-10k.aitestunion.com/downloads). You can also download single report file for each entry from the [Leaderboard page](http://got-10k.aitestunion.com/leaderboard).
183 |
184 | ### How to Loop Over GOT-10k Dataset?
185 |
186 | The `got10k.datasets.GOT10k` provides an iterable and indexable interface for GOT-10k's sequences. Here is an example:
187 |
188 | ```Python
189 | from PIL import Image
190 | from got10k.datasets import GOT10k
191 | from got10k.utils.viz import show_frame
192 |
193 | dataset = GOT10k(root_dir='data/GOT-10k', subset='train')
194 |
195 | # indexing
196 | img_file, anno = dataset[10]
197 |
198 | # for-loop
199 | for s, (img_files, anno) in enumerate(dataset):
200 | seq_name = dataset.seq_names[s]
201 | print('Sequence:', seq_name)
202 |
203 | # show all frames
204 | for f, img_file in enumerate(img_files):
205 | image = Image.open(img_file)
206 | show_frame(image, anno[f, :])
207 | ```
208 |
209 | To loop over `OTB` or `VOT` datasets, simply change `GOT10k` to `OTB` or `VOT` for this purpose.
210 |
211 | ### Issues
212 |
213 | Please report any problems or suggessions in the [Issues](https://github.com/got-10k/toolkit/issues) page.
214 |
215 | ### Contributors
216 |
217 | - [Lianghua Huang](https://github.com/huanglianghua)
218 |
--------------------------------------------------------------------------------
/examples/quick_examples.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import fire
4 | from PIL import Image
5 |
6 | from got10k.trackers import Tracker, IdentityTracker
7 | from got10k.experiments import ExperimentGOT10k
8 | from got10k.datasets import GOT10k
9 | from got10k.utils.viz import show_frame
10 |
11 |
12 | ROOT_DIR = 'data/GOT-10k'
13 |
14 |
15 | def example_track_val_set():
16 | # setup tracker
17 | tracker = IdentityTracker()
18 |
19 | # run experiment on validation set
20 | experiment = ExperimentGOT10k(
21 | root_dir=ROOT_DIR,
22 | subset='val',
23 | result_dir='results',
24 | report_dir='reports')
25 | experiment.run(tracker, visualize=False)
26 |
27 | # report performance
28 | experiment.report([tracker.name])
29 |
30 |
31 | def example_track_test_set():
32 | # setup tracker
33 | tracker = IdentityTracker()
34 |
35 | # run experiment on test set
36 | experiment = ExperimentGOT10k(
37 | root_dir=ROOT_DIR,
38 | subset='test',
39 | result_dir='results',
40 | report_dir='reports')
41 | experiment.run(tracker, visualize=False)
42 |
43 | # a ".zip" file will be generated ready for submission
44 | # follow the guide to submit your results to
45 | # http://got-10k.aitestunion.com/
46 | experiment.report([tracker.name])
47 |
48 |
49 | def example_plot_curves():
50 | # reports of 25 baseline entries can be downloaded from
51 | # http://got-10k.aitestunion.com/downloads
52 | report_files = [
53 | 'reports/GOT-10k/performance_25_entries.json']
54 | tracker_names = [
55 | 'SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']
56 |
57 | # setup experiment and plot curves
58 | experiment = ExperimentGOT10k('data/GOT-10k', subset='test')
59 | experiment.plot_curves(report_files, tracker_names)
60 |
61 |
62 | def example_loop_dataset():
63 | # setup dataset
64 | dataset = GOT10k(ROOT_DIR, subset='val')
65 |
66 | # loop over the complete dataset
67 | for s, (img_files, anno) in enumerate(dataset):
68 | seq_name = dataset.seq_names[s]
69 | print('Sequence:', seq_name)
70 |
71 | for f, img_file in enumerate(img_files):
72 | image = Image.open(img_file)
73 | box = anno[f, :] # (left, top, width, height)
74 | show_frame(image, box, colors='w')
75 |
76 |
77 | def example_show():
78 | # setup experiment
79 | experiment = ExperimentGOT10k(
80 | root_dir=ROOT_DIR,
81 | subset='test',
82 | result_dir='results',
83 | report_dir='reports')
84 |
85 | # visualize tracking results
86 | tracker_names = [
87 | 'SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']
88 | experiment.show(tracker_names)
89 |
90 |
91 | if __name__ == '__main__':
92 | # choose an example function to execute, e.g.,
93 | # > python quick_examples example_loop_dataset
94 | fire.Fire()
95 |
--------------------------------------------------------------------------------
/got10k/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/got-10k/toolkit/956e7286fdf209cbb125adac9a46376bd8297ffb/got10k/__init__.py
--------------------------------------------------------------------------------
/got10k/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .got10k import GOT10k
4 | from .otb import OTB
5 | from .vot import VOT
6 | from .dtb70 import DTB70
7 | from .tcolor128 import TColor128
8 | from .uav123 import UAV123
9 | from .nfs import NfS
10 | from .lasot import LaSOT
11 | from .trackingnet import TrackingNet
12 | from .vid import ImageNetVID
13 |
--------------------------------------------------------------------------------
/got10k/datasets/dtb70.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 |
8 |
9 | class DTB70(object):
10 | """`DTB70 `_ Dataset.
11 |
12 | Publication:
13 | ``Visual object tracking for unmanned aerial vehicles: A benchmark and new motion models``,
14 | Y. Wu, J. Lim and M.-H. Yang, IEEE TPAMI 2015.
15 |
16 | Args:
17 | root_dir (string): Root directory of dataset where sequence
18 | folders exist.
19 | """
20 | def __init__(self, root_dir):
21 | super(DTB70, self).__init__()
22 | self.root_dir = root_dir
23 | self._check_integrity(root_dir)
24 |
25 | self.anno_files = sorted(glob.glob(
26 | os.path.join(root_dir, '*/groundtruth_rect.txt')))
27 | self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]
28 | self.seq_names = [os.path.basename(d) for d in self.seq_dirs]
29 |
30 | def __getitem__(self, index):
31 | r"""
32 | Args:
33 | index (integer or string): Index or name of a sequence.
34 |
35 | Returns:
36 | tuple: (img_files, anno), where ``img_files`` is a list of
37 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
38 | """
39 | if isinstance(index, six.string_types):
40 | if not index in self.seq_names:
41 | raise Exception('Sequence {} not found.'.format(index))
42 | index = self.seq_names.index(index)
43 |
44 | img_files = sorted(glob.glob(
45 | os.path.join(self.seq_dirs[index], 'img/*.jpg')))
46 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
47 | assert len(img_files) == len(anno)
48 | assert anno.shape[1] == 4
49 |
50 | return img_files, anno
51 |
52 | def __len__(self):
53 | return len(self.seq_names)
54 |
55 | def _check_integrity(self, root_dir):
56 | seq_names = os.listdir(root_dir)
57 | seq_names = [n for n in seq_names if not n[0] == '.']
58 |
59 | if os.path.isdir(root_dir) and len(seq_names) > 0:
60 | # check each sequence folder
61 | for seq_name in seq_names:
62 | seq_dir = os.path.join(root_dir, seq_name)
63 | if not os.path.isdir(seq_dir):
64 | print('Warning: sequence %s not exists.' % seq_name)
65 | else:
66 | # dataset not exists
67 | raise Exception('Dataset not found or corrupted.')
68 |
--------------------------------------------------------------------------------
/got10k/datasets/got10k.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 |
8 |
9 | class GOT10k(object):
10 | r"""`GOT-10K `_ Dataset.
11 |
12 | Publication:
13 | ``GOT-10k: A Large High-Diversity Benchmark for Generic Object
14 | Tracking in the Wild``, L. Huang, X. Zhao and K. Huang, ArXiv 2018.
15 |
16 | Args:
17 | root_dir (string): Root directory of dataset where ``train``,
18 | ``val`` and ``test`` folders exist.
19 | subset (string, optional): Specify ``train``, ``val`` or ``test``
20 | subset of GOT-10k.
21 | return_meta (string, optional): If True, returns ``meta``
22 | of each sequence in ``__getitem__`` function, otherwise
23 | only returns ``img_files`` and ``anno``.
24 | list_file (string, optional): If provided, only read sequences
25 | specified by the file instead of all sequences in the subset.
26 | """
27 | def __init__(self, root_dir, subset='test', return_meta=False,
28 | list_file=None, check_integrity=True):
29 | super(GOT10k, self).__init__()
30 | assert subset in ['train', 'val', 'test'], 'Unknown subset.'
31 | self.root_dir = root_dir
32 | self.subset = subset
33 | self.return_meta = False if subset == 'test' else return_meta
34 |
35 | if list_file is None:
36 | list_file = os.path.join(root_dir, subset, 'list.txt')
37 | if check_integrity:
38 | self._check_integrity(root_dir, subset, list_file)
39 |
40 | with open(list_file, 'r') as f:
41 | self.seq_names = f.read().strip().split('\n')
42 | self.seq_dirs = [os.path.join(root_dir, subset, s)
43 | for s in self.seq_names]
44 | self.anno_files = [os.path.join(d, 'groundtruth.txt')
45 | for d in self.seq_dirs]
46 |
47 | def __getitem__(self, index):
48 | r"""
49 | Args:
50 | index (integer or string): Index or name of a sequence.
51 |
52 | Returns:
53 | tuple: (img_files, anno) if ``return_meta`` is False, otherwise
54 | (img_files, anno, meta), where ``img_files`` is a list of
55 | file names, ``anno`` is a N x 4 (rectangles) numpy array, while
56 | ``meta`` is a dict contains meta information about the sequence.
57 | """
58 | if isinstance(index, six.string_types):
59 | if not index in self.seq_names:
60 | raise Exception('Sequence {} not found.'.format(index))
61 | index = self.seq_names.index(index)
62 |
63 | img_files = sorted(glob.glob(os.path.join(
64 | self.seq_dirs[index], '*.jpg')))
65 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
66 |
67 | if self.subset == 'test' and anno.ndim == 1:
68 | assert len(anno) == 4
69 | anno = anno[np.newaxis, :]
70 | else:
71 | assert len(img_files) == len(anno)
72 |
73 | if self.return_meta:
74 | meta = self._fetch_meta(self.seq_dirs[index])
75 | return img_files, anno, meta
76 | else:
77 | return img_files, anno
78 |
79 | def __len__(self):
80 | return len(self.seq_names)
81 |
82 | def _check_integrity(self, root_dir, subset, list_file=None):
83 | assert subset in ['train', 'val', 'test']
84 | if list_file is None:
85 | list_file = os.path.join(root_dir, subset, 'list.txt')
86 |
87 | if os.path.isfile(list_file):
88 | with open(list_file, 'r') as f:
89 | seq_names = f.read().strip().split('\n')
90 |
91 | # check each sequence folder
92 | for seq_name in seq_names:
93 | seq_dir = os.path.join(root_dir, subset, seq_name)
94 | if not os.path.isdir(seq_dir):
95 | print('Warning: sequence %s not exists.' % seq_name)
96 | else:
97 | # dataset not exists
98 | raise Exception('Dataset not found or corrupted.')
99 |
100 | def _fetch_meta(self, seq_dir):
101 | # meta information
102 | meta_file = os.path.join(seq_dir, 'meta_info.ini')
103 | with open(meta_file) as f:
104 | meta = f.read().strip().split('\n')[1:]
105 | meta = [line.split(': ') for line in meta]
106 | meta = {line[0]: line[1] for line in meta}
107 |
108 | # attributes
109 | attributes = ['cover', 'absence', 'cut_by_image']
110 | for att in attributes:
111 | meta[att] = np.loadtxt(os.path.join(seq_dir, att + '.label'))
112 |
113 | return meta
114 |
--------------------------------------------------------------------------------
/got10k/datasets/lasot.json:
--------------------------------------------------------------------------------
1 | {
2 | "train": [
3 | "airplane-10",
4 | "airplane-11",
5 | "airplane-12",
6 | "airplane-14",
7 | "airplane-16",
8 | "airplane-17",
9 | "airplane-18",
10 | "airplane-19",
11 | "airplane-2",
12 | "airplane-20",
13 | "airplane-3",
14 | "airplane-4",
15 | "airplane-5",
16 | "airplane-6",
17 | "airplane-7",
18 | "airplane-8",
19 | "basketball-10",
20 | "basketball-12",
21 | "basketball-13",
22 | "basketball-14",
23 | "basketball-15",
24 | "basketball-16",
25 | "basketball-17",
26 | "basketball-18",
27 | "basketball-19",
28 | "basketball-2",
29 | "basketball-20",
30 | "basketball-3",
31 | "basketball-4",
32 | "basketball-5",
33 | "basketball-8",
34 | "basketball-9",
35 | "bear-1",
36 | "bear-10",
37 | "bear-11",
38 | "bear-12",
39 | "bear-13",
40 | "bear-14",
41 | "bear-15",
42 | "bear-16",
43 | "bear-18",
44 | "bear-19",
45 | "bear-20",
46 | "bear-3",
47 | "bear-5",
48 | "bear-7",
49 | "bear-8",
50 | "bear-9",
51 | "bicycle-1",
52 | "bicycle-10",
53 | "bicycle-11",
54 | "bicycle-12",
55 | "bicycle-13",
56 | "bicycle-14",
57 | "bicycle-15",
58 | "bicycle-16",
59 | "bicycle-17",
60 | "bicycle-19",
61 | "bicycle-20",
62 | "bicycle-3",
63 | "bicycle-4",
64 | "bicycle-5",
65 | "bicycle-6",
66 | "bicycle-8",
67 | "bird-1",
68 | "bird-10",
69 | "bird-11",
70 | "bird-12",
71 | "bird-13",
72 | "bird-14",
73 | "bird-16",
74 | "bird-18",
75 | "bird-19",
76 | "bird-20",
77 | "bird-4",
78 | "bird-5",
79 | "bird-6",
80 | "bird-7",
81 | "bird-8",
82 | "bird-9",
83 | "boat-1",
84 | "boat-10",
85 | "boat-11",
86 | "boat-13",
87 | "boat-14",
88 | "boat-15",
89 | "boat-16",
90 | "boat-18",
91 | "boat-19",
92 | "boat-2",
93 | "boat-20",
94 | "boat-5",
95 | "boat-6",
96 | "boat-7",
97 | "boat-8",
98 | "boat-9",
99 | "book-1",
100 | "book-12",
101 | "book-13",
102 | "book-14",
103 | "book-15",
104 | "book-16",
105 | "book-17",
106 | "book-18",
107 | "book-2",
108 | "book-20",
109 | "book-4",
110 | "book-5",
111 | "book-6",
112 | "book-7",
113 | "book-8",
114 | "book-9",
115 | "bottle-10",
116 | "bottle-11",
117 | "bottle-13",
118 | "bottle-15",
119 | "bottle-16",
120 | "bottle-17",
121 | "bottle-19",
122 | "bottle-2",
123 | "bottle-20",
124 | "bottle-3",
125 | "bottle-4",
126 | "bottle-5",
127 | "bottle-6",
128 | "bottle-7",
129 | "bottle-8",
130 | "bottle-9",
131 | "bus-1",
132 | "bus-10",
133 | "bus-11",
134 | "bus-12",
135 | "bus-13",
136 | "bus-14",
137 | "bus-15",
138 | "bus-16",
139 | "bus-18",
140 | "bus-20",
141 | "bus-3",
142 | "bus-4",
143 | "bus-6",
144 | "bus-7",
145 | "bus-8",
146 | "bus-9",
147 | "car-1",
148 | "car-10",
149 | "car-11",
150 | "car-12",
151 | "car-13",
152 | "car-14",
153 | "car-15",
154 | "car-16",
155 | "car-18",
156 | "car-19",
157 | "car-20",
158 | "car-3",
159 | "car-4",
160 | "car-5",
161 | "car-7",
162 | "car-8",
163 | "cat-10",
164 | "cat-11",
165 | "cat-12",
166 | "cat-13",
167 | "cat-14",
168 | "cat-15",
169 | "cat-16",
170 | "cat-17",
171 | "cat-19",
172 | "cat-2",
173 | "cat-4",
174 | "cat-5",
175 | "cat-6",
176 | "cat-7",
177 | "cat-8",
178 | "cat-9",
179 | "cattle-1",
180 | "cattle-10",
181 | "cattle-11",
182 | "cattle-14",
183 | "cattle-15",
184 | "cattle-16",
185 | "cattle-17",
186 | "cattle-18",
187 | "cattle-19",
188 | "cattle-20",
189 | "cattle-3",
190 | "cattle-4",
191 | "cattle-5",
192 | "cattle-6",
193 | "cattle-8",
194 | "cattle-9",
195 | "chameleon-1",
196 | "chameleon-10",
197 | "chameleon-12",
198 | "chameleon-13",
199 | "chameleon-14",
200 | "chameleon-15",
201 | "chameleon-16",
202 | "chameleon-17",
203 | "chameleon-18",
204 | "chameleon-19",
205 | "chameleon-2",
206 | "chameleon-4",
207 | "chameleon-5",
208 | "chameleon-7",
209 | "chameleon-8",
210 | "chameleon-9",
211 | "coin-1",
212 | "coin-10",
213 | "coin-11",
214 | "coin-12",
215 | "coin-13",
216 | "coin-14",
217 | "coin-15",
218 | "coin-16",
219 | "coin-17",
220 | "coin-19",
221 | "coin-2",
222 | "coin-20",
223 | "coin-4",
224 | "coin-5",
225 | "coin-8",
226 | "coin-9",
227 | "crab-1",
228 | "crab-10",
229 | "crab-11",
230 | "crab-13",
231 | "crab-14",
232 | "crab-15",
233 | "crab-16",
234 | "crab-17",
235 | "crab-19",
236 | "crab-2",
237 | "crab-20",
238 | "crab-4",
239 | "crab-5",
240 | "crab-7",
241 | "crab-8",
242 | "crab-9",
243 | "crocodile-1",
244 | "crocodile-11",
245 | "crocodile-12",
246 | "crocodile-13",
247 | "crocodile-15",
248 | "crocodile-16",
249 | "crocodile-17",
250 | "crocodile-18",
251 | "crocodile-19",
252 | "crocodile-2",
253 | "crocodile-20",
254 | "crocodile-5",
255 | "crocodile-6",
256 | "crocodile-7",
257 | "crocodile-8",
258 | "crocodile-9",
259 | "cup-10",
260 | "cup-11",
261 | "cup-12",
262 | "cup-13",
263 | "cup-14",
264 | "cup-15",
265 | "cup-16",
266 | "cup-18",
267 | "cup-19",
268 | "cup-2",
269 | "cup-20",
270 | "cup-3",
271 | "cup-5",
272 | "cup-6",
273 | "cup-8",
274 | "cup-9",
275 | "deer-1",
276 | "deer-11",
277 | "deer-12",
278 | "deer-13",
279 | "deer-15",
280 | "deer-16",
281 | "deer-17",
282 | "deer-18",
283 | "deer-19",
284 | "deer-2",
285 | "deer-20",
286 | "deer-3",
287 | "deer-5",
288 | "deer-6",
289 | "deer-7",
290 | "deer-9",
291 | "dog-10",
292 | "dog-11",
293 | "dog-12",
294 | "dog-13",
295 | "dog-14",
296 | "dog-16",
297 | "dog-17",
298 | "dog-18",
299 | "dog-2",
300 | "dog-20",
301 | "dog-3",
302 | "dog-4",
303 | "dog-5",
304 | "dog-6",
305 | "dog-8",
306 | "dog-9",
307 | "drone-1",
308 | "drone-10",
309 | "drone-11",
310 | "drone-12",
311 | "drone-14",
312 | "drone-16",
313 | "drone-17",
314 | "drone-18",
315 | "drone-19",
316 | "drone-20",
317 | "drone-3",
318 | "drone-4",
319 | "drone-5",
320 | "drone-6",
321 | "drone-8",
322 | "drone-9",
323 | "electricfan-11",
324 | "electricfan-12",
325 | "electricfan-13",
326 | "electricfan-14",
327 | "electricfan-15",
328 | "electricfan-16",
329 | "electricfan-17",
330 | "electricfan-19",
331 | "electricfan-2",
332 | "electricfan-3",
333 | "electricfan-4",
334 | "electricfan-5",
335 | "electricfan-6",
336 | "electricfan-7",
337 | "electricfan-8",
338 | "electricfan-9",
339 | "elephant-10",
340 | "elephant-11",
341 | "elephant-13",
342 | "elephant-14",
343 | "elephant-15",
344 | "elephant-17",
345 | "elephant-19",
346 | "elephant-2",
347 | "elephant-20",
348 | "elephant-3",
349 | "elephant-4",
350 | "elephant-5",
351 | "elephant-6",
352 | "elephant-7",
353 | "elephant-8",
354 | "elephant-9",
355 | "flag-1",
356 | "flag-10",
357 | "flag-11",
358 | "flag-12",
359 | "flag-13",
360 | "flag-14",
361 | "flag-15",
362 | "flag-16",
363 | "flag-17",
364 | "flag-18",
365 | "flag-19",
366 | "flag-20",
367 | "flag-4",
368 | "flag-6",
369 | "flag-7",
370 | "flag-8",
371 | "fox-1",
372 | "fox-10",
373 | "fox-11",
374 | "fox-12",
375 | "fox-13",
376 | "fox-14",
377 | "fox-15",
378 | "fox-16",
379 | "fox-17",
380 | "fox-18",
381 | "fox-19",
382 | "fox-4",
383 | "fox-6",
384 | "fox-7",
385 | "fox-8",
386 | "fox-9",
387 | "frog-1",
388 | "frog-10",
389 | "frog-11",
390 | "frog-12",
391 | "frog-13",
392 | "frog-14",
393 | "frog-15",
394 | "frog-16",
395 | "frog-17",
396 | "frog-18",
397 | "frog-19",
398 | "frog-2",
399 | "frog-5",
400 | "frog-6",
401 | "frog-7",
402 | "frog-8",
403 | "gametarget-10",
404 | "gametarget-11",
405 | "gametarget-12",
406 | "gametarget-14",
407 | "gametarget-15",
408 | "gametarget-16",
409 | "gametarget-17",
410 | "gametarget-18",
411 | "gametarget-19",
412 | "gametarget-20",
413 | "gametarget-3",
414 | "gametarget-4",
415 | "gametarget-5",
416 | "gametarget-6",
417 | "gametarget-8",
418 | "gametarget-9",
419 | "gecko-10",
420 | "gecko-11",
421 | "gecko-12",
422 | "gecko-13",
423 | "gecko-14",
424 | "gecko-15",
425 | "gecko-17",
426 | "gecko-18",
427 | "gecko-2",
428 | "gecko-20",
429 | "gecko-3",
430 | "gecko-4",
431 | "gecko-6",
432 | "gecko-7",
433 | "gecko-8",
434 | "gecko-9",
435 | "giraffe-1",
436 | "giraffe-11",
437 | "giraffe-12",
438 | "giraffe-14",
439 | "giraffe-16",
440 | "giraffe-17",
441 | "giraffe-18",
442 | "giraffe-19",
443 | "giraffe-20",
444 | "giraffe-3",
445 | "giraffe-4",
446 | "giraffe-5",
447 | "giraffe-6",
448 | "giraffe-7",
449 | "giraffe-8",
450 | "giraffe-9",
451 | "goldfish-1",
452 | "goldfish-11",
453 | "goldfish-12",
454 | "goldfish-13",
455 | "goldfish-14",
456 | "goldfish-15",
457 | "goldfish-16",
458 | "goldfish-17",
459 | "goldfish-18",
460 | "goldfish-19",
461 | "goldfish-2",
462 | "goldfish-20",
463 | "goldfish-4",
464 | "goldfish-5",
465 | "goldfish-6",
466 | "goldfish-9",
467 | "gorilla-1",
468 | "gorilla-10",
469 | "gorilla-11",
470 | "gorilla-12",
471 | "gorilla-14",
472 | "gorilla-15",
473 | "gorilla-16",
474 | "gorilla-17",
475 | "gorilla-18",
476 | "gorilla-19",
477 | "gorilla-2",
478 | "gorilla-20",
479 | "gorilla-3",
480 | "gorilla-5",
481 | "gorilla-7",
482 | "gorilla-8",
483 | "guitar-1",
484 | "guitar-11",
485 | "guitar-12",
486 | "guitar-13",
487 | "guitar-14",
488 | "guitar-15",
489 | "guitar-17",
490 | "guitar-18",
491 | "guitar-19",
492 | "guitar-2",
493 | "guitar-20",
494 | "guitar-4",
495 | "guitar-5",
496 | "guitar-6",
497 | "guitar-7",
498 | "guitar-9",
499 | "hand-1",
500 | "hand-10",
501 | "hand-11",
502 | "hand-12",
503 | "hand-13",
504 | "hand-14",
505 | "hand-15",
506 | "hand-17",
507 | "hand-18",
508 | "hand-19",
509 | "hand-20",
510 | "hand-4",
511 | "hand-5",
512 | "hand-6",
513 | "hand-7",
514 | "hand-8",
515 | "hat-10",
516 | "hat-11",
517 | "hat-12",
518 | "hat-13",
519 | "hat-14",
520 | "hat-15",
521 | "hat-16",
522 | "hat-17",
523 | "hat-19",
524 | "hat-20",
525 | "hat-3",
526 | "hat-4",
527 | "hat-6",
528 | "hat-7",
529 | "hat-8",
530 | "hat-9",
531 | "helmet-1",
532 | "helmet-10",
533 | "helmet-12",
534 | "helmet-14",
535 | "helmet-15",
536 | "helmet-16",
537 | "helmet-17",
538 | "helmet-18",
539 | "helmet-2",
540 | "helmet-20",
541 | "helmet-3",
542 | "helmet-4",
543 | "helmet-6",
544 | "helmet-7",
545 | "helmet-8",
546 | "helmet-9",
547 | "hippo-10",
548 | "hippo-11",
549 | "hippo-12",
550 | "hippo-13",
551 | "hippo-14",
552 | "hippo-15",
553 | "hippo-16",
554 | "hippo-17",
555 | "hippo-18",
556 | "hippo-19",
557 | "hippo-2",
558 | "hippo-3",
559 | "hippo-4",
560 | "hippo-5",
561 | "hippo-6",
562 | "hippo-8",
563 | "horse-10",
564 | "horse-11",
565 | "horse-13",
566 | "horse-14",
567 | "horse-16",
568 | "horse-17",
569 | "horse-18",
570 | "horse-19",
571 | "horse-2",
572 | "horse-20",
573 | "horse-3",
574 | "horse-5",
575 | "horse-6",
576 | "horse-7",
577 | "horse-8",
578 | "horse-9",
579 | "kangaroo-1",
580 | "kangaroo-10",
581 | "kangaroo-12",
582 | "kangaroo-13",
583 | "kangaroo-15",
584 | "kangaroo-16",
585 | "kangaroo-17",
586 | "kangaroo-18",
587 | "kangaroo-19",
588 | "kangaroo-20",
589 | "kangaroo-3",
590 | "kangaroo-4",
591 | "kangaroo-6",
592 | "kangaroo-7",
593 | "kangaroo-8",
594 | "kangaroo-9",
595 | "kite-1",
596 | "kite-11",
597 | "kite-12",
598 | "kite-13",
599 | "kite-14",
600 | "kite-16",
601 | "kite-17",
602 | "kite-18",
603 | "kite-19",
604 | "kite-2",
605 | "kite-20",
606 | "kite-3",
607 | "kite-5",
608 | "kite-7",
609 | "kite-8",
610 | "kite-9",
611 | "leopard-10",
612 | "leopard-11",
613 | "leopard-12",
614 | "leopard-13",
615 | "leopard-14",
616 | "leopard-15",
617 | "leopard-17",
618 | "leopard-18",
619 | "leopard-19",
620 | "leopard-2",
621 | "leopard-3",
622 | "leopard-4",
623 | "leopard-5",
624 | "leopard-6",
625 | "leopard-8",
626 | "leopard-9",
627 | "licenseplate-1",
628 | "licenseplate-10",
629 | "licenseplate-11",
630 | "licenseplate-14",
631 | "licenseplate-16",
632 | "licenseplate-17",
633 | "licenseplate-18",
634 | "licenseplate-19",
635 | "licenseplate-2",
636 | "licenseplate-20",
637 | "licenseplate-3",
638 | "licenseplate-4",
639 | "licenseplate-5",
640 | "licenseplate-7",
641 | "licenseplate-8",
642 | "licenseplate-9",
643 | "lion-10",
644 | "lion-11",
645 | "lion-13",
646 | "lion-14",
647 | "lion-15",
648 | "lion-16",
649 | "lion-17",
650 | "lion-18",
651 | "lion-19",
652 | "lion-2",
653 | "lion-3",
654 | "lion-4",
655 | "lion-6",
656 | "lion-7",
657 | "lion-8",
658 | "lion-9",
659 | "lizard-10",
660 | "lizard-11",
661 | "lizard-12",
662 | "lizard-14",
663 | "lizard-15",
664 | "lizard-16",
665 | "lizard-17",
666 | "lizard-18",
667 | "lizard-19",
668 | "lizard-2",
669 | "lizard-20",
670 | "lizard-4",
671 | "lizard-5",
672 | "lizard-7",
673 | "lizard-8",
674 | "lizard-9",
675 | "microphone-1",
676 | "microphone-10",
677 | "microphone-11",
678 | "microphone-12",
679 | "microphone-13",
680 | "microphone-15",
681 | "microphone-17",
682 | "microphone-18",
683 | "microphone-19",
684 | "microphone-20",
685 | "microphone-3",
686 | "microphone-4",
687 | "microphone-5",
688 | "microphone-7",
689 | "microphone-8",
690 | "microphone-9",
691 | "monkey-1",
692 | "monkey-10",
693 | "monkey-11",
694 | "monkey-12",
695 | "monkey-13",
696 | "monkey-14",
697 | "monkey-15",
698 | "monkey-16",
699 | "monkey-18",
700 | "monkey-19",
701 | "monkey-2",
702 | "monkey-20",
703 | "monkey-5",
704 | "monkey-6",
705 | "monkey-7",
706 | "monkey-8",
707 | "motorcycle-10",
708 | "motorcycle-11",
709 | "motorcycle-12",
710 | "motorcycle-13",
711 | "motorcycle-14",
712 | "motorcycle-15",
713 | "motorcycle-16",
714 | "motorcycle-17",
715 | "motorcycle-19",
716 | "motorcycle-2",
717 | "motorcycle-20",
718 | "motorcycle-4",
719 | "motorcycle-5",
720 | "motorcycle-6",
721 | "motorcycle-7",
722 | "motorcycle-8",
723 | "mouse-10",
724 | "mouse-11",
725 | "mouse-12",
726 | "mouse-13",
727 | "mouse-14",
728 | "mouse-15",
729 | "mouse-16",
730 | "mouse-18",
731 | "mouse-19",
732 | "mouse-2",
733 | "mouse-20",
734 | "mouse-3",
735 | "mouse-4",
736 | "mouse-5",
737 | "mouse-6",
738 | "mouse-7",
739 | "person-11",
740 | "person-13",
741 | "person-14",
742 | "person-15",
743 | "person-16",
744 | "person-17",
745 | "person-18",
746 | "person-19",
747 | "person-2",
748 | "person-20",
749 | "person-3",
750 | "person-4",
751 | "person-6",
752 | "person-7",
753 | "person-8",
754 | "person-9",
755 | "pig-1",
756 | "pig-11",
757 | "pig-12",
758 | "pig-14",
759 | "pig-15",
760 | "pig-16",
761 | "pig-17",
762 | "pig-19",
763 | "pig-20",
764 | "pig-3",
765 | "pig-4",
766 | "pig-5",
767 | "pig-6",
768 | "pig-7",
769 | "pig-8",
770 | "pig-9",
771 | "pool-1",
772 | "pool-10",
773 | "pool-11",
774 | "pool-13",
775 | "pool-14",
776 | "pool-16",
777 | "pool-17",
778 | "pool-18",
779 | "pool-19",
780 | "pool-2",
781 | "pool-20",
782 | "pool-4",
783 | "pool-5",
784 | "pool-6",
785 | "pool-8",
786 | "pool-9",
787 | "rabbit-1",
788 | "rabbit-11",
789 | "rabbit-12",
790 | "rabbit-14",
791 | "rabbit-15",
792 | "rabbit-16",
793 | "rabbit-18",
794 | "rabbit-2",
795 | "rabbit-20",
796 | "rabbit-3",
797 | "rabbit-4",
798 | "rabbit-5",
799 | "rabbit-6",
800 | "rabbit-7",
801 | "rabbit-8",
802 | "rabbit-9",
803 | "racing-1",
804 | "racing-11",
805 | "racing-12",
806 | "racing-13",
807 | "racing-14",
808 | "racing-17",
809 | "racing-18",
810 | "racing-19",
811 | "racing-2",
812 | "racing-3",
813 | "racing-4",
814 | "racing-5",
815 | "racing-6",
816 | "racing-7",
817 | "racing-8",
818 | "racing-9",
819 | "robot-10",
820 | "robot-11",
821 | "robot-12",
822 | "robot-13",
823 | "robot-14",
824 | "robot-15",
825 | "robot-16",
826 | "robot-17",
827 | "robot-18",
828 | "robot-2",
829 | "robot-20",
830 | "robot-3",
831 | "robot-4",
832 | "robot-6",
833 | "robot-7",
834 | "robot-9",
835 | "rubicCube-10",
836 | "rubicCube-11",
837 | "rubicCube-12",
838 | "rubicCube-13",
839 | "rubicCube-15",
840 | "rubicCube-16",
841 | "rubicCube-17",
842 | "rubicCube-18",
843 | "rubicCube-2",
844 | "rubicCube-20",
845 | "rubicCube-3",
846 | "rubicCube-4",
847 | "rubicCube-5",
848 | "rubicCube-7",
849 | "rubicCube-8",
850 | "rubicCube-9",
851 | "sepia-1",
852 | "sepia-10",
853 | "sepia-11",
854 | "sepia-12",
855 | "sepia-14",
856 | "sepia-15",
857 | "sepia-17",
858 | "sepia-18",
859 | "sepia-19",
860 | "sepia-2",
861 | "sepia-20",
862 | "sepia-3",
863 | "sepia-4",
864 | "sepia-5",
865 | "sepia-7",
866 | "sepia-9",
867 | "shark-1",
868 | "shark-10",
869 | "shark-11",
870 | "shark-12",
871 | "shark-13",
872 | "shark-14",
873 | "shark-15",
874 | "shark-16",
875 | "shark-17",
876 | "shark-18",
877 | "shark-19",
878 | "shark-20",
879 | "shark-4",
880 | "shark-7",
881 | "shark-8",
882 | "shark-9",
883 | "sheep-1",
884 | "sheep-10",
885 | "sheep-11",
886 | "sheep-12",
887 | "sheep-13",
888 | "sheep-14",
889 | "sheep-15",
890 | "sheep-16",
891 | "sheep-17",
892 | "sheep-18",
893 | "sheep-19",
894 | "sheep-2",
895 | "sheep-20",
896 | "sheep-4",
897 | "sheep-6",
898 | "sheep-8",
899 | "skateboard-1",
900 | "skateboard-10",
901 | "skateboard-11",
902 | "skateboard-12",
903 | "skateboard-13",
904 | "skateboard-14",
905 | "skateboard-15",
906 | "skateboard-17",
907 | "skateboard-18",
908 | "skateboard-2",
909 | "skateboard-20",
910 | "skateboard-4",
911 | "skateboard-5",
912 | "skateboard-6",
913 | "skateboard-7",
914 | "skateboard-9",
915 | "spider-1",
916 | "spider-10",
917 | "spider-11",
918 | "spider-12",
919 | "spider-13",
920 | "spider-15",
921 | "spider-17",
922 | "spider-19",
923 | "spider-2",
924 | "spider-3",
925 | "spider-4",
926 | "spider-5",
927 | "spider-6",
928 | "spider-7",
929 | "spider-8",
930 | "spider-9",
931 | "squirrel-1",
932 | "squirrel-10",
933 | "squirrel-12",
934 | "squirrel-14",
935 | "squirrel-15",
936 | "squirrel-16",
937 | "squirrel-17",
938 | "squirrel-18",
939 | "squirrel-2",
940 | "squirrel-20",
941 | "squirrel-3",
942 | "squirrel-4",
943 | "squirrel-5",
944 | "squirrel-6",
945 | "squirrel-7",
946 | "squirrel-9",
947 | "surfboard-1",
948 | "surfboard-10",
949 | "surfboard-11",
950 | "surfboard-13",
951 | "surfboard-14",
952 | "surfboard-15",
953 | "surfboard-16",
954 | "surfboard-17",
955 | "surfboard-18",
956 | "surfboard-19",
957 | "surfboard-2",
958 | "surfboard-20",
959 | "surfboard-3",
960 | "surfboard-6",
961 | "surfboard-7",
962 | "surfboard-9",
963 | "swing-1",
964 | "swing-11",
965 | "swing-12",
966 | "swing-13",
967 | "swing-15",
968 | "swing-16",
969 | "swing-18",
970 | "swing-19",
971 | "swing-2",
972 | "swing-3",
973 | "swing-4",
974 | "swing-5",
975 | "swing-6",
976 | "swing-7",
977 | "swing-8",
978 | "swing-9",
979 | "tank-1",
980 | "tank-10",
981 | "tank-11",
982 | "tank-12",
983 | "tank-13",
984 | "tank-15",
985 | "tank-17",
986 | "tank-18",
987 | "tank-19",
988 | "tank-2",
989 | "tank-20",
990 | "tank-3",
991 | "tank-4",
992 | "tank-5",
993 | "tank-7",
994 | "tank-8",
995 | "tiger-1",
996 | "tiger-10",
997 | "tiger-11",
998 | "tiger-13",
999 | "tiger-14",
1000 | "tiger-15",
1001 | "tiger-16",
1002 | "tiger-17",
1003 | "tiger-19",
1004 | "tiger-2",
1005 | "tiger-20",
1006 | "tiger-3",
1007 | "tiger-5",
1008 | "tiger-7",
1009 | "tiger-8",
1010 | "tiger-9",
1011 | "train-10",
1012 | "train-12",
1013 | "train-13",
1014 | "train-14",
1015 | "train-15",
1016 | "train-16",
1017 | "train-17",
1018 | "train-18",
1019 | "train-19",
1020 | "train-2",
1021 | "train-3",
1022 | "train-4",
1023 | "train-5",
1024 | "train-6",
1025 | "train-8",
1026 | "train-9",
1027 | "truck-1",
1028 | "truck-10",
1029 | "truck-11",
1030 | "truck-12",
1031 | "truck-13",
1032 | "truck-14",
1033 | "truck-15",
1034 | "truck-17",
1035 | "truck-18",
1036 | "truck-19",
1037 | "truck-2",
1038 | "truck-20",
1039 | "truck-4",
1040 | "truck-5",
1041 | "truck-8",
1042 | "truck-9",
1043 | "turtle-1",
1044 | "turtle-10",
1045 | "turtle-11",
1046 | "turtle-12",
1047 | "turtle-13",
1048 | "turtle-14",
1049 | "turtle-15",
1050 | "turtle-17",
1051 | "turtle-18",
1052 | "turtle-19",
1053 | "turtle-2",
1054 | "turtle-20",
1055 | "turtle-3",
1056 | "turtle-4",
1057 | "turtle-6",
1058 | "turtle-7",
1059 | "umbrella-1",
1060 | "umbrella-10",
1061 | "umbrella-11",
1062 | "umbrella-12",
1063 | "umbrella-13",
1064 | "umbrella-14",
1065 | "umbrella-15",
1066 | "umbrella-16",
1067 | "umbrella-18",
1068 | "umbrella-20",
1069 | "umbrella-3",
1070 | "umbrella-4",
1071 | "umbrella-5",
1072 | "umbrella-6",
1073 | "umbrella-7",
1074 | "umbrella-8",
1075 | "volleyball-10",
1076 | "volleyball-11",
1077 | "volleyball-12",
1078 | "volleyball-14",
1079 | "volleyball-15",
1080 | "volleyball-16",
1081 | "volleyball-17",
1082 | "volleyball-2",
1083 | "volleyball-20",
1084 | "volleyball-3",
1085 | "volleyball-4",
1086 | "volleyball-5",
1087 | "volleyball-6",
1088 | "volleyball-7",
1089 | "volleyball-8",
1090 | "volleyball-9",
1091 | "yoyo-1",
1092 | "yoyo-10",
1093 | "yoyo-11",
1094 | "yoyo-12",
1095 | "yoyo-13",
1096 | "yoyo-14",
1097 | "yoyo-16",
1098 | "yoyo-18",
1099 | "yoyo-2",
1100 | "yoyo-20",
1101 | "yoyo-3",
1102 | "yoyo-4",
1103 | "yoyo-5",
1104 | "yoyo-6",
1105 | "yoyo-8",
1106 | "yoyo-9",
1107 | "zebra-1",
1108 | "zebra-11",
1109 | "zebra-12",
1110 | "zebra-13",
1111 | "zebra-15",
1112 | "zebra-18",
1113 | "zebra-19",
1114 | "zebra-2",
1115 | "zebra-20",
1116 | "zebra-3",
1117 | "zebra-4",
1118 | "zebra-5",
1119 | "zebra-6",
1120 | "zebra-7",
1121 | "zebra-8",
1122 | "zebra-9"
1123 | ],
1124 | "test": [
1125 | "airplane-1",
1126 | "airplane-9",
1127 | "airplane-13",
1128 | "airplane-15",
1129 | "basketball-1",
1130 | "basketball-6",
1131 | "basketball-7",
1132 | "basketball-11",
1133 | "bear-2",
1134 | "bear-4",
1135 | "bear-6",
1136 | "bear-17",
1137 | "bicycle-2",
1138 | "bicycle-7",
1139 | "bicycle-9",
1140 | "bicycle-18",
1141 | "bird-2",
1142 | "bird-3",
1143 | "bird-15",
1144 | "bird-17",
1145 | "boat-3",
1146 | "boat-4",
1147 | "boat-12",
1148 | "boat-17",
1149 | "book-3",
1150 | "book-10",
1151 | "book-11",
1152 | "book-19",
1153 | "bottle-1",
1154 | "bottle-12",
1155 | "bottle-14",
1156 | "bottle-18",
1157 | "bus-2",
1158 | "bus-5",
1159 | "bus-17",
1160 | "bus-19",
1161 | "car-2",
1162 | "car-6",
1163 | "car-9",
1164 | "car-17",
1165 | "cat-1",
1166 | "cat-3",
1167 | "cat-18",
1168 | "cat-20",
1169 | "cattle-2",
1170 | "cattle-7",
1171 | "cattle-12",
1172 | "cattle-13",
1173 | "spider-14",
1174 | "spider-16",
1175 | "spider-18",
1176 | "spider-20",
1177 | "coin-3",
1178 | "coin-6",
1179 | "coin-7",
1180 | "coin-18",
1181 | "crab-3",
1182 | "crab-6",
1183 | "crab-12",
1184 | "crab-18",
1185 | "surfboard-12",
1186 | "surfboard-4",
1187 | "surfboard-5",
1188 | "surfboard-8",
1189 | "cup-1",
1190 | "cup-4",
1191 | "cup-7",
1192 | "cup-17",
1193 | "deer-4",
1194 | "deer-8",
1195 | "deer-10",
1196 | "deer-14",
1197 | "dog-1",
1198 | "dog-7",
1199 | "dog-15",
1200 | "dog-19",
1201 | "guitar-3",
1202 | "guitar-8",
1203 | "guitar-10",
1204 | "guitar-16",
1205 | "person-1",
1206 | "person-5",
1207 | "person-10",
1208 | "person-12",
1209 | "pig-2",
1210 | "pig-10",
1211 | "pig-13",
1212 | "pig-18",
1213 | "rubicCube-1",
1214 | "rubicCube-6",
1215 | "rubicCube-14",
1216 | "rubicCube-19",
1217 | "swing-10",
1218 | "swing-14",
1219 | "swing-17",
1220 | "swing-20",
1221 | "drone-13",
1222 | "drone-15",
1223 | "drone-2",
1224 | "drone-7",
1225 | "pool-12",
1226 | "pool-15",
1227 | "pool-3",
1228 | "pool-7",
1229 | "rabbit-10",
1230 | "rabbit-13",
1231 | "rabbit-17",
1232 | "rabbit-19",
1233 | "racing-10",
1234 | "racing-15",
1235 | "racing-16",
1236 | "racing-20",
1237 | "robot-1",
1238 | "robot-19",
1239 | "robot-5",
1240 | "robot-8",
1241 | "sepia-13",
1242 | "sepia-16",
1243 | "sepia-6",
1244 | "sepia-8",
1245 | "sheep-3",
1246 | "sheep-5",
1247 | "sheep-7",
1248 | "sheep-9",
1249 | "skateboard-16",
1250 | "skateboard-19",
1251 | "skateboard-3",
1252 | "skateboard-8",
1253 | "tank-14",
1254 | "tank-16",
1255 | "tank-6",
1256 | "tank-9",
1257 | "tiger-12",
1258 | "tiger-18",
1259 | "tiger-4",
1260 | "tiger-6",
1261 | "train-1",
1262 | "train-11",
1263 | "train-20",
1264 | "train-7",
1265 | "truck-16",
1266 | "truck-3",
1267 | "truck-6",
1268 | "truck-7",
1269 | "turtle-16",
1270 | "turtle-5",
1271 | "turtle-8",
1272 | "turtle-9",
1273 | "umbrella-17",
1274 | "umbrella-19",
1275 | "umbrella-2",
1276 | "umbrella-9",
1277 | "yoyo-15",
1278 | "yoyo-17",
1279 | "yoyo-19",
1280 | "yoyo-7",
1281 | "zebra-10",
1282 | "zebra-14",
1283 | "zebra-16",
1284 | "zebra-17",
1285 | "elephant-1",
1286 | "elephant-12",
1287 | "elephant-16",
1288 | "elephant-18",
1289 | "goldfish-3",
1290 | "goldfish-7",
1291 | "goldfish-8",
1292 | "goldfish-10",
1293 | "hat-1",
1294 | "hat-2",
1295 | "hat-5",
1296 | "hat-18",
1297 | "kite-4",
1298 | "kite-6",
1299 | "kite-10",
1300 | "kite-15",
1301 | "motorcycle-1",
1302 | "motorcycle-3",
1303 | "motorcycle-9",
1304 | "motorcycle-18",
1305 | "mouse-1",
1306 | "mouse-8",
1307 | "mouse-9",
1308 | "mouse-17",
1309 | "flag-3",
1310 | "flag-9",
1311 | "flag-5",
1312 | "flag-2",
1313 | "frog-3",
1314 | "frog-4",
1315 | "frog-20",
1316 | "frog-9",
1317 | "gametarget-1",
1318 | "gametarget-2",
1319 | "gametarget-7",
1320 | "gametarget-13",
1321 | "hand-2",
1322 | "hand-3",
1323 | "hand-9",
1324 | "hand-16",
1325 | "helmet-5",
1326 | "helmet-11",
1327 | "helmet-19",
1328 | "helmet-13",
1329 | "licenseplate-6",
1330 | "licenseplate-12",
1331 | "licenseplate-13",
1332 | "licenseplate-15",
1333 | "electricfan-1",
1334 | "electricfan-10",
1335 | "electricfan-18",
1336 | "electricfan-20",
1337 | "chameleon-3",
1338 | "chameleon-6",
1339 | "chameleon-11",
1340 | "chameleon-20",
1341 | "crocodile-3",
1342 | "crocodile-4",
1343 | "crocodile-10",
1344 | "crocodile-14",
1345 | "gecko-1",
1346 | "gecko-5",
1347 | "gecko-16",
1348 | "gecko-19",
1349 | "fox-2",
1350 | "fox-3",
1351 | "fox-5",
1352 | "fox-20",
1353 | "giraffe-2",
1354 | "giraffe-10",
1355 | "giraffe-13",
1356 | "giraffe-15",
1357 | "gorilla-4",
1358 | "gorilla-6",
1359 | "gorilla-9",
1360 | "gorilla-13",
1361 | "hippo-1",
1362 | "hippo-7",
1363 | "hippo-9",
1364 | "hippo-20",
1365 | "horse-1",
1366 | "horse-4",
1367 | "horse-12",
1368 | "horse-15",
1369 | "kangaroo-2",
1370 | "kangaroo-5",
1371 | "kangaroo-11",
1372 | "kangaroo-14",
1373 | "leopard-1",
1374 | "leopard-7",
1375 | "leopard-16",
1376 | "leopard-20",
1377 | "lion-1",
1378 | "lion-5",
1379 | "lion-12",
1380 | "lion-20",
1381 | "lizard-1",
1382 | "lizard-3",
1383 | "lizard-6",
1384 | "lizard-13",
1385 | "microphone-2",
1386 | "microphone-6",
1387 | "microphone-14",
1388 | "microphone-16",
1389 | "monkey-3",
1390 | "monkey-4",
1391 | "monkey-9",
1392 | "monkey-17",
1393 | "shark-2",
1394 | "shark-3",
1395 | "shark-5",
1396 | "shark-6",
1397 | "squirrel-8",
1398 | "squirrel-11",
1399 | "squirrel-13",
1400 | "squirrel-19",
1401 | "volleyball-1",
1402 | "volleyball-13",
1403 | "volleyball-18",
1404 | "volleyball-19"
1405 | ]
1406 | }
--------------------------------------------------------------------------------
/got10k/datasets/lasot.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import json
6 | import numpy as np
7 | import six
8 |
9 |
10 | class LaSOT(object):
11 | r"""`LaSOT `_ Datasets.
12 |
13 | Publication:
14 | ``LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking``,
15 | H. Fan, L. Lin, F. Yang, P. Chu, G. Deng, S. Yu, H. Bai,
16 | Y. Xu, C. Liao, and H. Ling., CVPR 2019.
17 |
18 | Args:
19 | root_dir (string): Root directory of dataset where sequence
20 | folders exist.
21 | subset (string, optional): Specify ``train`` or ``test``
22 | subset of LaSOT.
23 | """
24 | def __init__(self, root_dir, subset='test', return_meta=False):
25 | super(LaSOT, self).__init__()
26 | subset = subset.split('_')
27 | assert set(subset).issubset({'train', 'test'}), 'Unknown subset.'
28 |
29 | self.root_dir = root_dir
30 | self.subset = subset
31 | self.return_meta = return_meta
32 | self._check_integrity(root_dir)
33 |
34 | self.anno_files = sorted(glob.glob(
35 | os.path.join(root_dir, '*/*/groundtruth.txt')))
36 | self.seq_dirs = [os.path.join(
37 | os.path.dirname(f), 'img') for f in self.anno_files]
38 | self.seq_names = [os.path.basename(
39 | os.path.dirname(f)) for f in self.anno_files]
40 |
41 | # load subset sequence names
42 | split_file = os.path.join(
43 | os.path.dirname(__file__), 'lasot.json')
44 | with open(split_file, 'r') as f:
45 | splits = json.load(f)
46 | self.seq_names = []
47 | for s in subset:
48 | self.seq_names.extend(splits[s])
49 |
50 | # image and annotation paths
51 | self.seq_dirs = [os.path.join(
52 | root_dir, n[:n.rfind('-')], n, 'img')
53 | for n in self.seq_names]
54 | self.anno_files = [os.path.join(
55 | os.path.dirname(d), 'groundtruth.txt')
56 | for d in self.seq_dirs]
57 |
58 | def __getitem__(self, index):
59 | r"""
60 | Args:
61 | index (integer or string): Index or name of a sequence.
62 |
63 | Returns:
64 | tuple: (img_files, anno) if ``return_meta`` is False, otherwise
65 | (img_files, anno, meta), where ``img_files`` is a list of
66 | file names, ``anno`` is a N x 4 (rectangles) numpy array, while
67 | ``meta`` is a dict contains meta information about the sequence.
68 | """
69 | if isinstance(index, six.string_types):
70 | if not index in self.seq_names:
71 | raise Exception('Sequence {} not found.'.format(index))
72 | index = self.seq_names.index(index)
73 |
74 | img_files = sorted(glob.glob(os.path.join(
75 | self.seq_dirs[index], '*.jpg')))
76 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
77 |
78 | if self.return_meta:
79 | meta = self._fetch_meta(self.seq_dirs[index])
80 | return img_files, anno, meta
81 | else:
82 | return img_files, anno
83 |
84 | def __len__(self):
85 | return len(self.seq_names)
86 |
87 | def _check_integrity(self, root_dir):
88 | seq_names = os.listdir(root_dir)
89 | seq_names = [n for n in seq_names if not n[0] == '.']
90 |
91 | if os.path.isdir(root_dir) and len(seq_names) > 0:
92 | # check each sequence folder
93 | for seq_name in seq_names:
94 | seq_dir = os.path.join(root_dir, seq_name)
95 | if not os.path.isdir(seq_dir):
96 | print('Warning: sequence %s not exists.' % seq_name)
97 | else:
98 | # dataset not exists
99 | raise Exception('Dataset not found or corrupted.')
100 |
101 | def _fetch_meta(self, seq_dir):
102 | seq_dir = os.path.dirname(seq_dir)
103 | meta = {}
104 |
105 | # attributes
106 | for att in ['full_occlusion', 'out_of_view']:
107 | att_file = os.path.join(seq_dir, att + '.txt')
108 | meta[att] = np.loadtxt(att_file, delimiter=',')
109 |
110 | # nlp
111 | nlp_file = os.path.join(seq_dir, 'nlp.txt')
112 | with open(nlp_file, 'r') as f:
113 | meta['nlp'] = f.read().strip()
114 |
115 | return meta
116 |
--------------------------------------------------------------------------------
/got10k/datasets/nfs.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function, division
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 |
8 |
9 | class NfS(object):
10 | """`NfS `_ Dataset.
11 |
12 | Publication:
13 | ``Need for Speed: A Benchmark for Higher Frame Rate Object Tracking``,
14 | H. K. Galoogahi, A. Fagg, C. Huang, D. Ramanan and S. Lucey, ICCV 2017.
15 |
16 | Args:
17 | root_dir (string): Root directory of dataset where sequence
18 | folders exist.
19 | fps (integer): Sequence frame rate. Two options ``30`` and ``240``
20 | are available. Default is 240.
21 | """
22 | def __init__(self, root_dir, fps=240):
23 | super(NfS, self).__init__()
24 | assert fps in [30, 240]
25 | self.fps = fps
26 | self.root_dir = root_dir
27 | self._check_integrity(root_dir)
28 |
29 | self.anno_files = sorted(glob.glob(
30 | os.path.join(root_dir, '*/%d/*.txt' % fps)))
31 | self.seq_names = [
32 | os.path.basename(f)[:-4] for f in self.anno_files]
33 | self.seq_dirs = [os.path.join(
34 | os.path.dirname(f), n)
35 | for f, n in zip(self.anno_files, self.seq_names)]
36 |
37 | def __getitem__(self, index):
38 | r"""
39 | Args:
40 | index (integer or string): Index or name of a sequence.
41 |
42 | Returns:
43 | tuple: (img_files, anno), where ``img_files`` is a list of
44 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
45 | """
46 | if isinstance(index, six.string_types):
47 | if not index in self.seq_names:
48 | raise Exception('Sequence {} not found.'.format(index))
49 | index = self.seq_names.index(index)
50 |
51 | img_files = sorted(glob.glob(
52 | os.path.join(self.seq_dirs[index], '*.jpg')))
53 | anno = np.loadtxt(self.anno_files[index], dtype=str)
54 | anno = anno[:, 1:5].astype(float) # [left, top, right, bottom]
55 | anno[:, 2:] -= anno[:, :2] # [left, top, width, height]
56 |
57 | # handle inconsistent lengths
58 | if not len(img_files) == len(anno):
59 | if abs(len(anno) / len(img_files) - 8) < 1:
60 | anno = anno[0::8, :]
61 | diff = abs(len(img_files) - len(anno))
62 | if diff > 0 and diff <= 1:
63 | n = min(len(img_files), len(anno))
64 | anno = anno[:n]
65 | img_files = img_files[:n]
66 | assert len(img_files) == len(anno)
67 |
68 | return img_files, anno
69 |
70 | def __len__(self):
71 | return len(self.seq_names)
72 |
73 | def _check_integrity(self, root_dir):
74 | seq_names = os.listdir(root_dir)
75 | seq_names = [n for n in seq_names if not n[0] == '.']
76 |
77 | if os.path.isdir(root_dir) and len(seq_names) > 0:
78 | # check each sequence folder
79 | for seq_name in seq_names:
80 | seq_dir = os.path.join(root_dir, seq_name)
81 | if not os.path.isdir(seq_dir):
82 | print('Warning: sequence %s not exists.' % seq_name)
83 | else:
84 | # dataset not exists
85 | raise Exception('Dataset not found or corrupted.')
86 |
--------------------------------------------------------------------------------
/got10k/datasets/otb.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function, unicode_literals
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import io
7 | import six
8 | from itertools import chain
9 |
10 | from ..utils.ioutils import download, extract
11 |
12 |
13 | class OTB(object):
14 | r"""`OTB `_ Datasets.
15 |
16 | Publication:
17 | ``Object Tracking Benchmark``, Y. Wu, J. Lim and M.-H. Yang, IEEE TPAMI 2015.
18 |
19 | Args:
20 | root_dir (string): Root directory of dataset where sequence
21 | folders exist.
22 | version (integer or string): Specify the benchmark version, specify as one of
23 | ``2013``, ``2015``, ``tb50`` and ``tb100``.
24 | download (boolean, optional): If True, downloads the dataset from the internet
25 | and puts it in root directory. If dataset is downloaded, it is not
26 | downloaded again.
27 | """
28 | __otb13_seqs = ['Basketball', 'Bolt', 'Boy', 'Car4', 'CarDark',
29 | 'CarScale', 'Coke', 'Couple', 'Crossing', 'David',
30 | 'David2', 'David3', 'Deer', 'Dog1', 'Doll', 'Dudek',
31 | 'FaceOcc1', 'FaceOcc2', 'Fish', 'FleetFace',
32 | 'Football', 'Football1', 'Freeman1', 'Freeman3',
33 | 'Freeman4', 'Girl', 'Ironman', 'Jogging', 'Jumping',
34 | 'Lemming', 'Liquor', 'Matrix', 'Mhyang', 'MotorRolling',
35 | 'MountainBike', 'Shaking', 'Singer1', 'Singer2',
36 | 'Skating1', 'Skiing', 'Soccer', 'Subway', 'Suv',
37 | 'Sylvester', 'Tiger1', 'Tiger2', 'Trellis', 'Walking',
38 | 'Walking2', 'Woman']
39 |
40 | __tb50_seqs = ['Basketball', 'Biker', 'Bird1', 'BlurBody', 'BlurCar2',
41 | 'BlurFace', 'BlurOwl', 'Bolt', 'Box', 'Car1', 'Car4',
42 | 'CarDark', 'CarScale', 'ClifBar', 'Couple', 'Crowds',
43 | 'David', 'Deer', 'Diving', 'DragonBaby', 'Dudek',
44 | 'Football', 'Freeman4', 'Girl', 'Human3', 'Human4',
45 | 'Human6', 'Human9', 'Ironman', 'Jump', 'Jumping',
46 | 'Liquor', 'Matrix', 'MotorRolling', 'Panda', 'RedTeam',
47 | 'Shaking', 'Singer2', 'Skating1', 'Skating2', 'Skiing',
48 | 'Soccer', 'Surfer', 'Sylvester', 'Tiger2', 'Trellis',
49 | 'Walking', 'Walking2', 'Woman']
50 |
51 | __tb100_seqs = ['Bird2', 'BlurCar1', 'BlurCar3', 'BlurCar4', 'Board',
52 | 'Bolt2', 'Boy', 'Car2', 'Car24', 'Coke', 'Coupon',
53 | 'Crossing', 'Dancer', 'Dancer2', 'David2', 'David3',
54 | 'Dog', 'Dog1', 'Doll', 'FaceOcc1', 'FaceOcc2', 'Fish',
55 | 'FleetFace', 'Football1', 'Freeman1', 'Freeman3',
56 | 'Girl2', 'Gym', 'Human2', 'Human5', 'Human7', 'Human8',
57 | 'Jogging', 'KiteSurf', 'Lemming', 'Man', 'Mhyang',
58 | 'MountainBike', 'Rubik', 'Singer1', 'Skater',
59 | 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans',
60 | 'Twinnings', 'Vase'] + __tb50_seqs
61 |
62 | __otb15_seqs = __tb100_seqs
63 |
64 | __version_dict = {
65 | 2013: __otb13_seqs,
66 | 2015: __otb15_seqs,
67 | 'otb2013': __otb13_seqs,
68 | 'otb2015': __otb15_seqs,
69 | 'tb50': __tb50_seqs,
70 | 'tb100': __tb100_seqs}
71 |
72 | def __init__(self, root_dir, version=2015, download=True):
73 | super(OTB, self).__init__()
74 | assert version in self.__version_dict
75 |
76 | self.root_dir = root_dir
77 | self.version = version
78 | if download:
79 | self._download(root_dir, version)
80 | self._check_integrity(root_dir, version)
81 |
82 | valid_seqs = self.__version_dict[version]
83 | self.anno_files = sorted(list(chain.from_iterable(glob.glob(
84 | os.path.join(root_dir, s, 'groundtruth*.txt')) for s in valid_seqs)))
85 | # remove empty annotation files
86 | # (e.g., groundtruth_rect.1.txt of Human4)
87 | self.anno_files = self._filter_files(self.anno_files)
88 | self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]
89 | self.seq_names = [os.path.basename(d) for d in self.seq_dirs]
90 | # rename repeated sequence names
91 | # (e.g., Jogging and Skating2)
92 | self.seq_names = self._rename_seqs(self.seq_names)
93 |
94 | def __getitem__(self, index):
95 | r"""
96 | Args:
97 | index (integer or string): Index or name of a sequence.
98 |
99 | Returns:
100 | tuple: (img_files, anno), where ``img_files`` is a list of
101 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
102 | """
103 | if isinstance(index, six.string_types):
104 | if not index in self.seq_names:
105 | raise Exception('Sequence {} not found.'.format(index))
106 | index = self.seq_names.index(index)
107 |
108 | img_files = sorted(glob.glob(
109 | os.path.join(self.seq_dirs[index], 'img/*.jpg')))
110 |
111 | # special sequences
112 | # (visit http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html for detail)
113 | seq_name = self.seq_names[index]
114 | if seq_name.lower() == 'david':
115 | img_files = img_files[300-1:770]
116 | elif seq_name.lower() == 'football1':
117 | img_files = img_files[:74]
118 | elif seq_name.lower() == 'freeman3':
119 | img_files = img_files[:460]
120 | elif seq_name.lower() == 'freeman4':
121 | img_files = img_files[:283]
122 | elif seq_name.lower() == 'diving':
123 | img_files = img_files[:215]
124 |
125 | # to deal with different delimeters
126 | with open(self.anno_files[index], 'r') as f:
127 | anno = np.loadtxt(io.StringIO(f.read().replace(',', ' ')))
128 | assert len(img_files) == len(anno)
129 | assert anno.shape[1] == 4
130 |
131 | return img_files, anno
132 |
133 | def __len__(self):
134 | return len(self.seq_names)
135 |
136 | def _filter_files(self, filenames):
137 | filtered_files = []
138 | for filename in filenames:
139 | with open(filename, 'r') as f:
140 | if f.read().strip() == '':
141 | print('Warning: %s is empty.' % filename)
142 | else:
143 | filtered_files.append(filename)
144 |
145 | return filtered_files
146 |
147 | def _rename_seqs(self, seq_names):
148 | # in case some sequences may have multiple targets
149 | renamed_seqs = []
150 | for i, seq_name in enumerate(seq_names):
151 | if seq_names.count(seq_name) == 1:
152 | renamed_seqs.append(seq_name)
153 | else:
154 | ind = seq_names[:i + 1].count(seq_name)
155 | renamed_seqs.append('%s.%d' % (seq_name, ind))
156 |
157 | return renamed_seqs
158 |
159 | def _download(self, root_dir, version):
160 | assert version in self.__version_dict
161 | seq_names = self.__version_dict[version]
162 |
163 | if not os.path.isdir(root_dir):
164 | os.makedirs(root_dir)
165 | elif all([os.path.isdir(os.path.join(root_dir, s)) for s in seq_names]):
166 | print('Files already downloaded.')
167 | return
168 |
169 | url_fmt = 'http://cvlab.hanyang.ac.kr/tracker_benchmark/seq/%s.zip'
170 | for seq_name in seq_names:
171 | seq_dir = os.path.join(root_dir, seq_name)
172 | if os.path.isdir(seq_dir):
173 | continue
174 | url = url_fmt % seq_name
175 | zip_file = os.path.join(root_dir, seq_name + '.zip')
176 | print('Downloading to %s...' % zip_file)
177 | download(url, zip_file)
178 | print('\nExtracting to %s...' % root_dir)
179 | extract(zip_file, root_dir)
180 |
181 | return root_dir
182 |
183 | def _check_integrity(self, root_dir, version):
184 | assert version in self.__version_dict
185 | seq_names = self.__version_dict[version]
186 |
187 | if os.path.isdir(root_dir) and len(os.listdir(root_dir)) > 0:
188 | # check each sequence folder
189 | for seq_name in seq_names:
190 | seq_dir = os.path.join(root_dir, seq_name)
191 | if not os.path.isdir(seq_dir):
192 | print('Warning: sequence %s not exists.' % seq_name)
193 | else:
194 | # dataset not exists
195 | raise Exception('Dataset not found or corrupted. ' +
196 | 'You can use download=True to download it.')
197 |
--------------------------------------------------------------------------------
/got10k/datasets/tcolor128.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 |
8 | from ..utils.ioutils import download, extract
9 |
10 |
11 | class TColor128(object):
12 | """`TColor128 `_ Dataset.
13 |
14 | Publication:
15 | ``Encoding color information for visual tracking: algorithms and benchmark``,
16 | P. Liang, E. Blasch and H. Ling, TIP, 2015.
17 |
18 | Args:
19 | root_dir (string): Root directory of dataset where sequence
20 | folders exist.
21 | """
22 | def __init__(self, root_dir, download=True):
23 | super(TColor128, self).__init__()
24 | self.root_dir = root_dir
25 | if download:
26 | self._download(root_dir)
27 | self._check_integrity(root_dir)
28 |
29 | self.anno_files = sorted(glob.glob(
30 | os.path.join(root_dir, '*/*_gt.txt')))
31 | self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]
32 | self.seq_names = [os.path.basename(d) for d in self.seq_dirs]
33 | # valid frame range for each sequence
34 | self.range_files = [glob.glob(
35 | os.path.join(d, '*_frames.txt'))[0]
36 | for d in self.seq_dirs]
37 |
38 | def __getitem__(self, index):
39 | r"""
40 | Args:
41 | index (integer or string): Index or name of a sequence.
42 |
43 | Returns:
44 | tuple: (img_files, anno), where ``img_files`` is a list of
45 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
46 | """
47 | if isinstance(index, six.string_types):
48 | if not index in self.seq_names:
49 | raise Exception('Sequence {} not found.'.format(index))
50 | index = self.seq_names.index(index)
51 |
52 | # load valid frame range
53 | frames = np.loadtxt(
54 | self.range_files[index], dtype=int, delimiter=',')
55 | img_files = [os.path.join(
56 | self.seq_dirs[index], 'img/%04d.jpg' % f)
57 | for f in range(frames[0], frames[1] + 1)]
58 |
59 | # load annotations
60 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
61 | assert len(img_files) == len(anno)
62 | assert anno.shape[1] == 4
63 |
64 | return img_files, anno
65 |
66 | def __len__(self):
67 | return len(self.seq_names)
68 |
69 | def _download(self, root_dir):
70 | if not os.path.isdir(root_dir):
71 | os.makedirs(root_dir)
72 | elif len(os.listdir(root_dir)) > 100:
73 | print('Files already downloaded.')
74 | return
75 |
76 | url = 'http://www.dabi.temple.edu/~hbling/data/TColor-128/Temple-color-128.zip'
77 | zip_file = os.path.join(root_dir, 'Temple-color-128.zip')
78 | print('Downloading to %s...' % zip_file)
79 | download(url, zip_file)
80 | print('\nExtracting to %s...' % root_dir)
81 | extract(zip_file, root_dir)
82 |
83 | return root_dir
84 |
85 | def _check_integrity(self, root_dir):
86 | seq_names = os.listdir(root_dir)
87 | seq_names = [n for n in seq_names if not n[0] == '.']
88 |
89 | if os.path.isdir(root_dir) and len(seq_names) > 0:
90 | # check each sequence folder
91 | for seq_name in seq_names:
92 | seq_dir = os.path.join(root_dir, seq_name)
93 | if not os.path.isdir(seq_dir):
94 | print('Warning: sequence %s not exists.' % seq_name)
95 | else:
96 | # dataset not exists
97 | raise Exception('Dataset not found or corrupted. ' +
98 | 'You can use download=True to download it.')
99 |
--------------------------------------------------------------------------------
/got10k/datasets/trackingnet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import six
6 | import numpy as np
7 |
8 |
9 | class TrackingNet(object):
10 | r"""`TrackingNet `_ Datasets.
11 |
12 | Publication:
13 | ``TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.``,
14 | M. Muller, A. Bibi, S. Giancola, S. Al-Subaihi and B. Ghanem, ECCV 2018.
15 |
16 | Args:
17 | root_dir (string): Root directory of dataset where sequence
18 | folders exist.
19 | subset (string, optional): Specify ``train`` or ``test``
20 | subset of TrackingNet.
21 | """
22 | def __init__(self, root_dir, subset='test', *args, **kwargs):
23 | super(TrackingNet, self).__init__()
24 | assert subset in ['train', 'test'], 'Unknown subset.'
25 |
26 | self.root_dir = root_dir
27 | self.subset = subset
28 | if subset == 'test':
29 | self.subset_dirs = ['TEST']
30 | elif subset == 'train':
31 | self.subset_dirs = ['TRAIN_%d' % c for c in range(12)]
32 | self._check_integrity(root_dir, self.subset_dirs)
33 |
34 | self.anno_files = [glob.glob(os.path.join(
35 | root_dir, c, 'anno/*.txt')) for c in self.subset_dirs]
36 | self.anno_files = sorted(sum(self.anno_files, []))
37 | self.seq_dirs = [os.path.join(
38 | os.path.dirname(os.path.dirname(f)),
39 | 'frames',
40 | os.path.basename(f)[:-4])
41 | for f in self.anno_files]
42 | self.seq_names = [os.path.basename(d) for d in self.seq_dirs]
43 |
44 | def __getitem__(self, index):
45 | r"""
46 | Args:
47 | index (integer or string): Index or name of a sequence.
48 |
49 | Returns:
50 | tuple: (img_files, anno), where ``img_files`` is a list of
51 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
52 | """
53 | if isinstance(index, six.string_types):
54 | if not index in self.seq_names:
55 | raise Exception('Sequence {} not found.'.format(index))
56 | index = self.seq_names.index(index)
57 |
58 | img_files = glob.glob(
59 | os.path.join(self.seq_dirs[index], '*.jpg'))
60 | img_files = sorted(img_files, key=lambda x: int(os.path.basename(x)[:-4]))
61 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
62 | # from IPython import embed;embed()
63 | if self.subset=='train':
64 | assert len(img_files) == len(anno)
65 | assert anno.shape[1] == 4
66 | elif self.subset=='test':
67 | assert anno.shape[0] == 4
68 | anno = anno.reshape(-1, 4)
69 |
70 | return img_files, anno
71 |
72 | def __len__(self):
73 | return len(self.seq_names)
74 |
75 | def _check_integrity(self, root_dir, subset_dirs):
76 | # check each subset path
77 | for c in subset_dirs:
78 | subset_dir = os.path.join(root_dir, c)
79 |
80 | # check data and annotation folders
81 | for folder in ['anno', 'frames']:
82 | if not os.path.isdir(os.path.join(subset_dir, folder)):
83 | raise Exception('Dataset not found or corrupted.')
84 |
--------------------------------------------------------------------------------
/got10k/datasets/uav123.json:
--------------------------------------------------------------------------------
1 | {
2 | "UAV123": {
3 | "bike1": {
4 | "start_frame": 1,
5 | "end_frame": 3085,
6 | "folder_name": "bike1"
7 | },
8 | "bike2": {
9 | "start_frame": 1,
10 | "end_frame": 553,
11 | "folder_name": "bike2"
12 | },
13 | "bike3": {
14 | "start_frame": 1,
15 | "end_frame": 433,
16 | "folder_name": "bike3"
17 | },
18 | "bird1_1": {
19 | "start_frame": 1,
20 | "end_frame": 253,
21 | "folder_name": "bird1"
22 | },
23 | "bird1_2": {
24 | "start_frame": 775,
25 | "end_frame": 1477,
26 | "folder_name": "bird1"
27 | },
28 | "bird1_3": {
29 | "start_frame": 1573,
30 | "end_frame": 2437,
31 | "folder_name": "bird1"
32 | },
33 | "boat1": {
34 | "start_frame": 1,
35 | "end_frame": 901,
36 | "folder_name": "boat1"
37 | },
38 | "boat2": {
39 | "start_frame": 1,
40 | "end_frame": 799,
41 | "folder_name": "boat2"
42 | },
43 | "boat3": {
44 | "start_frame": 1,
45 | "end_frame": 901,
46 | "folder_name": "boat3"
47 | },
48 | "boat4": {
49 | "start_frame": 1,
50 | "end_frame": 553,
51 | "folder_name": "boat4"
52 | },
53 | "boat5": {
54 | "start_frame": 1,
55 | "end_frame": 505,
56 | "folder_name": "boat5"
57 | },
58 | "boat6": {
59 | "start_frame": 1,
60 | "end_frame": 805,
61 | "folder_name": "boat6"
62 | },
63 | "boat7": {
64 | "start_frame": 1,
65 | "end_frame": 535,
66 | "folder_name": "boat7"
67 | },
68 | "boat8": {
69 | "start_frame": 1,
70 | "end_frame": 685,
71 | "folder_name": "boat8"
72 | },
73 | "boat9": {
74 | "start_frame": 1,
75 | "end_frame": 1399,
76 | "folder_name": "boat9"
77 | },
78 | "building1": {
79 | "start_frame": 1,
80 | "end_frame": 469,
81 | "folder_name": "building1"
82 | },
83 | "building2": {
84 | "start_frame": 1,
85 | "end_frame": 577,
86 | "folder_name": "building2"
87 | },
88 | "building3": {
89 | "start_frame": 1,
90 | "end_frame": 829,
91 | "folder_name": "building3"
92 | },
93 | "building4": {
94 | "start_frame": 1,
95 | "end_frame": 787,
96 | "folder_name": "building4"
97 | },
98 | "building5": {
99 | "start_frame": 1,
100 | "end_frame": 481,
101 | "folder_name": "building5"
102 | },
103 | "car1_1": {
104 | "start_frame": 1,
105 | "end_frame": 751,
106 | "folder_name": "car1"
107 | },
108 | "car1_2": {
109 | "start_frame": 751,
110 | "end_frame": 1627,
111 | "folder_name": "car1"
112 | },
113 | "car1_3": {
114 | "start_frame": 1627,
115 | "end_frame": 2629,
116 | "folder_name": "car1"
117 | },
118 | "car2": {
119 | "start_frame": 1,
120 | "end_frame": 1321,
121 | "folder_name": "car2"
122 | },
123 | "car3": {
124 | "start_frame": 1,
125 | "end_frame": 1717,
126 | "folder_name": "car3"
127 | },
128 | "car4": {
129 | "start_frame": 1,
130 | "end_frame": 1345,
131 | "folder_name": "car4"
132 | },
133 | "car5": {
134 | "start_frame": 1,
135 | "end_frame": 745,
136 | "folder_name": "car5"
137 | },
138 | "car6_1": {
139 | "start_frame": 1,
140 | "end_frame": 487,
141 | "folder_name": "car6"
142 | },
143 | "car6_2": {
144 | "start_frame": 487,
145 | "end_frame": 1807,
146 | "folder_name": "car6"
147 | },
148 | "car6_3": {
149 | "start_frame": 1807,
150 | "end_frame": 2953,
151 | "folder_name": "car6"
152 | },
153 | "car6_4": {
154 | "start_frame": 2953,
155 | "end_frame": 3925,
156 | "folder_name": "car6"
157 | },
158 | "car6_5": {
159 | "start_frame": 3925,
160 | "end_frame": 4861,
161 | "folder_name": "car6"
162 | },
163 | "car7": {
164 | "start_frame": 1,
165 | "end_frame": 1033,
166 | "folder_name": "car7"
167 | },
168 | "car8_1": {
169 | "start_frame": 1,
170 | "end_frame": 1357,
171 | "folder_name": "car8"
172 | },
173 | "car8_2": {
174 | "start_frame": 1357,
175 | "end_frame": 2575,
176 | "folder_name": "car8"
177 | },
178 | "car9": {
179 | "start_frame": 1,
180 | "end_frame": 1879,
181 | "folder_name": "car9"
182 | },
183 | "car10": {
184 | "start_frame": 1,
185 | "end_frame": 1405,
186 | "folder_name": "car10"
187 | },
188 | "car11": {
189 | "start_frame": 1,
190 | "end_frame": 337,
191 | "folder_name": "car11"
192 | },
193 | "car12": {
194 | "start_frame": 1,
195 | "end_frame": 499,
196 | "folder_name": "car12"
197 | },
198 | "car13": {
199 | "start_frame": 1,
200 | "end_frame": 415,
201 | "folder_name": "car13"
202 | },
203 | "car14": {
204 | "start_frame": 1,
205 | "end_frame": 1327,
206 | "folder_name": "car14"
207 | },
208 | "car15": {
209 | "start_frame": 1,
210 | "end_frame": 469,
211 | "folder_name": "car15"
212 | },
213 | "car16_1": {
214 | "start_frame": 1,
215 | "end_frame": 415,
216 | "folder_name": "car16"
217 | },
218 | "car16_2": {
219 | "start_frame": 415,
220 | "end_frame": 1993,
221 | "folder_name": "car16"
222 | },
223 | "car17": {
224 | "start_frame": 1,
225 | "end_frame": 1057,
226 | "folder_name": "car17"
227 | },
228 | "car18": {
229 | "start_frame": 1,
230 | "end_frame": 1207,
231 | "folder_name": "car18"
232 | },
233 | "group1_1": {
234 | "start_frame": 1,
235 | "end_frame": 1333,
236 | "folder_name": "group1"
237 | },
238 | "group1_2": {
239 | "start_frame": 1333,
240 | "end_frame": 2515,
241 | "folder_name": "group1"
242 | },
243 | "group1_3": {
244 | "start_frame": 2515,
245 | "end_frame": 3925,
246 | "folder_name": "group1"
247 | },
248 | "group1_4": {
249 | "start_frame": 3925,
250 | "end_frame": 4873,
251 | "folder_name": "group1"
252 | },
253 | "group2_1": {
254 | "start_frame": 1,
255 | "end_frame": 907,
256 | "folder_name": "group2"
257 | },
258 | "group2_2": {
259 | "start_frame": 907,
260 | "end_frame": 1771,
261 | "folder_name": "group2"
262 | },
263 | "group2_3": {
264 | "start_frame": 1771,
265 | "end_frame": 2683,
266 | "folder_name": "group2"
267 | },
268 | "group3_1": {
269 | "start_frame": 1,
270 | "end_frame": 1567,
271 | "folder_name": "group3"
272 | },
273 | "group3_2": {
274 | "start_frame": 1567,
275 | "end_frame": 2827,
276 | "folder_name": "group3"
277 | },
278 | "group3_3": {
279 | "start_frame": 2827,
280 | "end_frame": 4369,
281 | "folder_name": "group3"
282 | },
283 | "group3_4": {
284 | "start_frame": 4369,
285 | "end_frame": 5527,
286 | "folder_name": "group3"
287 | },
288 | "person1": {
289 | "start_frame": 1,
290 | "end_frame": 799,
291 | "folder_name": "person1"
292 | },
293 | "person2_1": {
294 | "start_frame": 1,
295 | "end_frame": 1189,
296 | "folder_name": "person2"
297 | },
298 | "person2_2": {
299 | "start_frame": 1189,
300 | "end_frame": 2623,
301 | "folder_name": "person2"
302 | },
303 | "person3": {
304 | "start_frame": 1,
305 | "end_frame": 643,
306 | "folder_name": "person3"
307 | },
308 | "person4_1": {
309 | "start_frame": 1,
310 | "end_frame": 1501,
311 | "folder_name": "person4"
312 | },
313 | "person4_2": {
314 | "start_frame": 1501,
315 | "end_frame": 2743,
316 | "folder_name": "person4"
317 | },
318 | "person5_1": {
319 | "start_frame": 1,
320 | "end_frame": 877,
321 | "folder_name": "person5"
322 | },
323 | "person5_2": {
324 | "start_frame": 877,
325 | "end_frame": 2101,
326 | "folder_name": "person5"
327 | },
328 | "person6": {
329 | "start_frame": 1,
330 | "end_frame": 901,
331 | "folder_name": "person6"
332 | },
333 | "person7_1": {
334 | "start_frame": 1,
335 | "end_frame": 1249,
336 | "folder_name": "person7"
337 | },
338 | "person7_2": {
339 | "start_frame": 1249,
340 | "end_frame": 2065,
341 | "folder_name": "person7"
342 | },
343 | "person8_1": {
344 | "start_frame": 1,
345 | "end_frame": 1075,
346 | "folder_name": "person8"
347 | },
348 | "person8_2": {
349 | "start_frame": 1075,
350 | "end_frame": 1525,
351 | "folder_name": "person8"
352 | },
353 | "person9": {
354 | "start_frame": 1,
355 | "end_frame": 661,
356 | "folder_name": "person9"
357 | },
358 | "person10": {
359 | "start_frame": 1,
360 | "end_frame": 1021,
361 | "folder_name": "person10"
362 | },
363 | "person11": {
364 | "start_frame": 1,
365 | "end_frame": 721,
366 | "folder_name": "person11"
367 | },
368 | "person12_1": {
369 | "start_frame": 1,
370 | "end_frame": 601,
371 | "folder_name": "person12"
372 | },
373 | "person12_2": {
374 | "start_frame": 601,
375 | "end_frame": 1621,
376 | "folder_name": "person12"
377 | },
378 | "person13": {
379 | "start_frame": 1,
380 | "end_frame": 883,
381 | "folder_name": "person13"
382 | },
383 | "person14_1": {
384 | "start_frame": 1,
385 | "end_frame": 847,
386 | "folder_name": "person14"
387 | },
388 | "person14_2": {
389 | "start_frame": 847,
390 | "end_frame": 1813,
391 | "folder_name": "person14"
392 | },
393 | "person14_3": {
394 | "start_frame": 1813,
395 | "end_frame": 2923,
396 | "folder_name": "person14"
397 | },
398 | "person15": {
399 | "start_frame": 1,
400 | "end_frame": 1339,
401 | "folder_name": "person15"
402 | },
403 | "person16": {
404 | "start_frame": 1,
405 | "end_frame": 1147,
406 | "folder_name": "person16"
407 | },
408 | "person17_1": {
409 | "start_frame": 1,
410 | "end_frame": 1501,
411 | "folder_name": "person17"
412 | },
413 | "person17_2": {
414 | "start_frame": 1501,
415 | "end_frame": 2347,
416 | "folder_name": "person17"
417 | },
418 | "person18": {
419 | "start_frame": 1,
420 | "end_frame": 1393,
421 | "folder_name": "person18"
422 | },
423 | "person19_1": {
424 | "start_frame": 1,
425 | "end_frame": 1243,
426 | "folder_name": "person19"
427 | },
428 | "person19_2": {
429 | "start_frame": 1243,
430 | "end_frame": 2791,
431 | "folder_name": "person19"
432 | },
433 | "person19_3": {
434 | "start_frame": 2791,
435 | "end_frame": 4357,
436 | "folder_name": "person19"
437 | },
438 | "person20": {
439 | "start_frame": 1,
440 | "end_frame": 1783,
441 | "folder_name": "person20"
442 | },
443 | "person21": {
444 | "start_frame": 1,
445 | "end_frame": 487,
446 | "folder_name": "person21"
447 | },
448 | "person22": {
449 | "start_frame": 1,
450 | "end_frame": 199,
451 | "folder_name": "person22"
452 | },
453 | "person23": {
454 | "start_frame": 1,
455 | "end_frame": 397,
456 | "folder_name": "person23"
457 | },
458 | "truck1": {
459 | "start_frame": 1,
460 | "end_frame": 463,
461 | "folder_name": "truck1"
462 | },
463 | "truck2": {
464 | "start_frame": 1,
465 | "end_frame": 385,
466 | "folder_name": "truck2"
467 | },
468 | "truck3": {
469 | "start_frame": 1,
470 | "end_frame": 535,
471 | "folder_name": "truck3"
472 | },
473 | "truck4_1": {
474 | "start_frame": 1,
475 | "end_frame": 577,
476 | "folder_name": "truck4"
477 | },
478 | "truck4_2": {
479 | "start_frame": 577,
480 | "end_frame": 1261,
481 | "folder_name": "truck4"
482 | },
483 | "uav1_1": {
484 | "start_frame": 1,
485 | "end_frame": 1555,
486 | "folder_name": "uav1"
487 | },
488 | "uav1_2": {
489 | "start_frame": 1555,
490 | "end_frame": 2377,
491 | "folder_name": "uav1"
492 | },
493 | "uav1_3": {
494 | "start_frame": 2473,
495 | "end_frame": 3469,
496 | "folder_name": "uav1"
497 | },
498 | "uav2": {
499 | "start_frame": 1,
500 | "end_frame": 133,
501 | "folder_name": "uav2"
502 | },
503 | "uav3": {
504 | "start_frame": 1,
505 | "end_frame": 265,
506 | "folder_name": "uav3"
507 | },
508 | "uav4": {
509 | "start_frame": 1,
510 | "end_frame": 157,
511 | "folder_name": "uav4"
512 | },
513 | "uav5": {
514 | "start_frame": 1,
515 | "end_frame": 139,
516 | "folder_name": "uav5"
517 | },
518 | "uav6": {
519 | "start_frame": 1,
520 | "end_frame": 109,
521 | "folder_name": "uav6"
522 | },
523 | "uav7": {
524 | "start_frame": 1,
525 | "end_frame": 373,
526 | "folder_name": "uav7"
527 | },
528 | "uav8": {
529 | "start_frame": 1,
530 | "end_frame": 301,
531 | "folder_name": "uav8"
532 | },
533 | "wakeboard1": {
534 | "start_frame": 1,
535 | "end_frame": 421,
536 | "folder_name": "wakeboard1"
537 | },
538 | "wakeboard2": {
539 | "start_frame": 1,
540 | "end_frame": 733,
541 | "folder_name": "wakeboard2"
542 | },
543 | "wakeboard3": {
544 | "start_frame": 1,
545 | "end_frame": 823,
546 | "folder_name": "wakeboard3"
547 | },
548 | "wakeboard4": {
549 | "start_frame": 1,
550 | "end_frame": 697,
551 | "folder_name": "wakeboard4"
552 | },
553 | "wakeboard5": {
554 | "start_frame": 1,
555 | "end_frame": 1675,
556 | "folder_name": "wakeboard5"
557 | },
558 | "wakeboard6": {
559 | "start_frame": 1,
560 | "end_frame": 1165,
561 | "folder_name": "wakeboard6"
562 | },
563 | "wakeboard7": {
564 | "start_frame": 1,
565 | "end_frame": 199,
566 | "folder_name": "wakeboard7"
567 | },
568 | "wakeboard8": {
569 | "start_frame": 1,
570 | "end_frame": 1543,
571 | "folder_name": "wakeboard8"
572 | },
573 | "wakeboard9": {
574 | "start_frame": 1,
575 | "end_frame": 355,
576 | "folder_name": "wakeboard9"
577 | },
578 | "wakeboard10": {
579 | "start_frame": 1,
580 | "end_frame": 469,
581 | "folder_name": "wakeboard10"
582 | },
583 | "car1_s": {
584 | "start_frame": 1,
585 | "end_frame": 1475,
586 | "folder_name": "car1_s"
587 | },
588 | "car2_s": {
589 | "start_frame": 1,
590 | "end_frame": 320,
591 | "folder_name": "car2_s"
592 | },
593 | "car3_s": {
594 | "start_frame": 1,
595 | "end_frame": 1300,
596 | "folder_name": "car3_s"
597 | },
598 | "car4_s": {
599 | "start_frame": 1,
600 | "end_frame": 830,
601 | "folder_name": "car4_s"
602 | },
603 | "person1_s": {
604 | "start_frame": 1,
605 | "end_frame": 1600,
606 | "folder_name": "person1_s"
607 | },
608 | "person2_s": {
609 | "start_frame": 1,
610 | "end_frame": 250,
611 | "folder_name": "person2_s"
612 | },
613 | "person3_s": {
614 | "start_frame": 1,
615 | "end_frame": 505,
616 | "folder_name": "person3_s"
617 | }
618 | },
619 | "UAV20L": {
620 | "bike1": {
621 | "start_frame": 1,
622 | "end_frame": 3085,
623 | "folder_name": "bike1"
624 | },
625 | "bird1": {
626 | "start_frame": 1,
627 | "end_frame": 2437,
628 | "folder_name": "bird1"
629 | },
630 | "car1": {
631 | "start_frame": 1,
632 | "end_frame": 2629,
633 | "folder_name": "car1"
634 | },
635 | "car3": {
636 | "start_frame": 1,
637 | "end_frame": 1717,
638 | "folder_name": "car3"
639 | },
640 | "car6": {
641 | "start_frame": 1,
642 | "end_frame": 4861,
643 | "folder_name": "car6"
644 | },
645 | "car8": {
646 | "start_frame": 1,
647 | "end_frame": 2575,
648 | "folder_name": "car8"
649 | },
650 | "car9": {
651 | "start_frame": 1,
652 | "end_frame": 1879,
653 | "folder_name": "car9"
654 | },
655 | "car16": {
656 | "start_frame": 1,
657 | "end_frame": 1993,
658 | "folder_name": "car16"
659 | },
660 | "group1": {
661 | "start_frame": 1,
662 | "end_frame": 4873,
663 | "folder_name": "group1"
664 | },
665 | "group2": {
666 | "start_frame": 1,
667 | "end_frame": 2683,
668 | "folder_name": "group2"
669 | },
670 | "group3": {
671 | "start_frame": 1,
672 | "end_frame": 5527,
673 | "folder_name": "group3"
674 | },
675 | "person2": {
676 | "start_frame": 1,
677 | "end_frame": 2623,
678 | "folder_name": "person2"
679 | },
680 | "person4": {
681 | "start_frame": 1,
682 | "end_frame": 2743,
683 | "folder_name": "person4"
684 | },
685 | "person5": {
686 | "start_frame": 1,
687 | "end_frame": 2101,
688 | "folder_name": "person5"
689 | },
690 | "person7": {
691 | "start_frame": 1,
692 | "end_frame": 2065,
693 | "folder_name": "person7"
694 | },
695 | "person14": {
696 | "start_frame": 1,
697 | "end_frame": 2923,
698 | "folder_name": "person14"
699 | },
700 | "person17": {
701 | "start_frame": 1,
702 | "end_frame": 2347,
703 | "folder_name": "person17"
704 | },
705 | "person19": {
706 | "start_frame": 1,
707 | "end_frame": 4357,
708 | "folder_name": "person19"
709 | },
710 | "person20": {
711 | "start_frame": 1,
712 | "end_frame": 1783,
713 | "folder_name": "person20"
714 | },
715 | "uav1": {
716 | "start_frame": 1,
717 | "end_frame": 3469,
718 | "folder_name": "uav1"
719 | }
720 | }
721 | }
--------------------------------------------------------------------------------
/got10k/datasets/uav123.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 | import json
8 |
9 |
10 | class UAV123(object):
11 | """`UAV123 `_ Dataset.
12 |
13 | Publication:
14 | ``A Benchmark and Simulator for UAV Tracking``,
15 | M. Mueller, N. Smith and B. Ghanem, ECCV 2016.
16 |
17 | Args:
18 | root_dir (string): Root directory of dataset where sequence
19 | folders exist.
20 | version (integer or string): Specify the benchmark version, specify as one of
21 | ``UAV123`` and ``UAV20L``.
22 | """
23 | def __init__(self, root_dir, version='UAV123'):
24 | super(UAV123, self).__init__()
25 | assert version.upper() in ['UAV20L', 'UAV123']
26 |
27 | self.root_dir = root_dir
28 | self.version = version.upper()
29 | self._check_integrity(root_dir, version)
30 |
31 | # sequence meta information
32 | meta_file = os.path.join(
33 | os.path.dirname(__file__), 'uav123.json')
34 | with open(meta_file) as f:
35 | self.seq_metas = json.load(f)
36 |
37 | # sequence and annotation paths
38 | self.anno_files = sorted(glob.glob(
39 | os.path.join(root_dir, 'anno/%s/*.txt' % version)))
40 | self.seq_names = [
41 | os.path.basename(f)[:-4] for f in self.anno_files]
42 | self.seq_dirs = [os.path.join(
43 | root_dir, 'data_seq/UAV123/%s' % \
44 | self.seq_metas[version][n]['folder_name'])
45 | for n in self.seq_names]
46 |
47 | def __getitem__(self, index):
48 | r"""
49 | Args:
50 | index (integer or string): Index or name of a sequence.
51 |
52 | Returns:
53 | tuple: (img_files, anno), where ``img_files`` is a list of
54 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
55 | """
56 | if isinstance(index, six.string_types):
57 | if not index in self.seq_names:
58 | raise Exception('Sequence {} not found.'.format(index))
59 | index = self.seq_names.index(index)
60 |
61 | # valid frame range
62 | start_frame = self.seq_metas[self.version][
63 | self.seq_names[index]]['start_frame']
64 | end_frame = self.seq_metas[self.version][
65 | self.seq_names[index]]['end_frame']
66 | img_files = [os.path.join(
67 | self.seq_dirs[index], '%06d.jpg' % f)
68 | for f in range(start_frame, end_frame + 1)]
69 |
70 | # load annotations
71 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
72 | assert len(img_files) == len(anno)
73 | assert anno.shape[1] == 4
74 |
75 | return img_files, anno
76 |
77 | def __len__(self):
78 | return len(self.seq_names)
79 |
80 | def _check_integrity(self, root_dir, version):
81 | # sequence meta information
82 | meta_file = os.path.join(
83 | os.path.dirname(__file__), 'uav123.json')
84 | with open(meta_file) as f:
85 | seq_metas = json.load(f)
86 | seq_names = list(seq_metas[version].keys())
87 |
88 | if os.path.isdir(root_dir) and len(os.listdir(root_dir)) > 3:
89 | # check each sequence folder
90 | for seq_name in seq_names:
91 | seq_dir = os.path.join(
92 | root_dir, 'data_seq/UAV123/%s' % \
93 | seq_metas[version][seq_name]['folder_name'])
94 | if not os.path.isdir(seq_dir):
95 | print('Warning: sequence %s not exists.' % seq_name)
96 | else:
97 | # dataset not exists
98 | raise Exception('Dataset not found or corrupted.')
99 |
--------------------------------------------------------------------------------
/got10k/datasets/vid.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function
2 |
3 | import os
4 | import glob
5 | import six
6 | import numpy as np
7 | import xml.etree.ElementTree as ET
8 | import json
9 | from collections import OrderedDict
10 |
11 |
12 | class ImageNetVID(object):
13 | r"""`ImageNet Video Image Detection (VID) `_ Dataset.
14 |
15 | Publication:
16 | ``ImageNet Large Scale Visual Recognition Challenge``, O. Russakovsky,
17 | J. deng, H. Su, etc. IJCV, 2015.
18 |
19 | Args:
20 | root_dir (string): Root directory of dataset where ``Data``, and
21 | ``Annotation`` folders exist.
22 | subset (string, optional): Specify ``train``, ``val`` or (``train``, ``val``)
23 | subset(s) of ImageNet-VID. Default is a tuple (``train``, ``val``).
24 | cache_dir (string, optional): Directory for caching the paths and annotations
25 | for speeding up loading. Default is ``cache/imagenet_vid``.
26 | """
27 | def __init__(self, root_dir, subset=('train', 'val'),
28 | cache_dir='cache/imagenet_vid'):
29 | self.root_dir = root_dir
30 | self.cache_dir = cache_dir
31 | if isinstance(subset, str):
32 | assert subset in ['train', 'val']
33 | self.subset = [subset]
34 | elif isinstance(subset, (list, tuple)):
35 | assert all([s in ['train', 'val'] for s in subset])
36 | self.subset = subset
37 | else:
38 | raise Exception('Unknown subset')
39 |
40 | # cache filenames and annotations to speed up training
41 | self.seq_dict = self._cache_meta()
42 | self.seq_names = [n for n in self.seq_dict]
43 |
44 | def __getitem__(self, index):
45 | r"""
46 | Args:
47 | index (integer or string): Index or name of a sequence.
48 |
49 | Returns:
50 | tuple: (img_files, anno), where ``img_files`` is a list of
51 | file names and ``anno`` is a N x 4 (rectangles) numpy array.
52 | """
53 | if isinstance(index, six.string_types):
54 | seq_name = index
55 | else:
56 | seq_name = self.seq_names[index]
57 |
58 | seq_dir, frames, anno_file = self.seq_dict[seq_name]
59 | img_files = [os.path.join(
60 | seq_dir, '%06d.JPEG' % f) for f in frames]
61 | anno = np.loadtxt(anno_file, delimiter=',')
62 |
63 | return img_files, anno
64 |
65 | def __len__(self):
66 | return len(self.seq_dict)
67 |
68 | def _cache_meta(self):
69 | cache_file = os.path.join(self.cache_dir, 'seq_dict.json')
70 | if os.path.isfile(cache_file):
71 | print('Dataset already cached.')
72 | with open(cache_file) as f:
73 | seq_dict = json.load(f, object_pairs_hook=OrderedDict)
74 | return seq_dict
75 |
76 | # image and annotation paths
77 | print('Gather sequence paths...')
78 | seq_dirs = []
79 | anno_dirs = []
80 | if 'train' in self.subset:
81 | seq_dirs_ = sorted(glob.glob(os.path.join(
82 | self.root_dir, 'Data/VID/train/ILSVRC*/ILSVRC*')))
83 | anno_dirs_ = [os.path.join(
84 | self.root_dir, 'Annotations/VID/train',
85 | *s.split('/')[-2:]) for s in seq_dirs_]
86 | seq_dirs += seq_dirs_
87 | anno_dirs += anno_dirs_
88 | if 'val' in self.subset:
89 | seq_dirs_ = sorted(glob.glob(os.path.join(
90 | self.root_dir, 'Data/VID/val/ILSVRC2015_val_*')))
91 | anno_dirs_ = [os.path.join(
92 | self.root_dir, 'Annotations/VID/val',
93 | s.split('/')[-1]) for s in seq_dirs_]
94 | seq_dirs += seq_dirs_
95 | anno_dirs += anno_dirs_
96 | seq_names = [os.path.basename(s) for s in seq_dirs]
97 |
98 | # cache paths and annotations
99 | print('Caching annotations to %s, ' % self.cache_dir + \
100 | 'it may take a few minutes...')
101 | seq_dict = OrderedDict()
102 | cache_anno_dir = os.path.join(self.cache_dir, 'anno')
103 | if not os.path.isdir(cache_anno_dir):
104 | os.makedirs(cache_anno_dir)
105 |
106 | for s, seq_name in enumerate(seq_names):
107 | if s % 100 == 0 or s == len(seq_names) - 1:
108 | print('--Caching sequence %d/%d: %s' % \
109 | (s + 1, len(seq_names), seq_name))
110 | anno_files = sorted(glob.glob(os.path.join(
111 | anno_dirs[s], '*.xml')))
112 | objects = [ET.ElementTree(file=f).findall('object')
113 | for f in anno_files]
114 |
115 | # find all track ids
116 | track_ids, counts = np.unique([
117 | obj.find('trackid').text for group in objects
118 | for obj in group], return_counts=True)
119 |
120 | # fetch paths and annotations for each track id
121 | for t, track_id in enumerate(track_ids):
122 | if counts[t] < 2:
123 | continue
124 | frames = []
125 | anno = []
126 | for f, group in enumerate(objects):
127 | for obj in group:
128 | if not obj.find('trackid').text == track_id:
129 | continue
130 | frames.append(f)
131 | anno.append([
132 | int(obj.find('bndbox/xmin').text),
133 | int(obj.find('bndbox/ymin').text),
134 | int(obj.find('bndbox/xmax').text),
135 | int(obj.find('bndbox/ymax').text)])
136 | anno = np.array(anno, dtype=int)
137 | anno[:, 2:] -= anno[:, :2] - 1
138 |
139 | # store annotations
140 | key = '%s.%d' % (seq_name, int(track_id))
141 | cache_anno_file = os.path.join(cache_anno_dir, key + '.txt')
142 | np.savetxt(cache_anno_file, anno, fmt='%d', delimiter=',')
143 |
144 | # store paths
145 | seq_dict.update([(key, [
146 | seq_dirs[s], frames, cache_anno_file])])
147 |
148 | # store seq_dict
149 | with open(cache_file, 'w') as f:
150 | json.dump(seq_dict, f)
151 |
152 | return seq_dict
153 |
--------------------------------------------------------------------------------
/got10k/datasets/vot.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, print_function, division
2 |
3 | import os
4 | import glob
5 | import numpy as np
6 | import six
7 | import json
8 | import hashlib
9 |
10 | from ..utils.ioutils import download, extract
11 |
12 |
13 | class VOT(object):
14 | r"""`VOT `_ Datasets.
15 |
16 | Publication:
17 | ``The Visual Object Tracking VOT2017 challenge results``, M. Kristan, A. Leonardis
18 | and J. Matas, etc. 2017.
19 |
20 | Args:
21 | root_dir (string): Root directory of dataset where sequence
22 | folders exist.
23 | version (integer, optional): Specify the benchmark version. Specify as
24 | one of 2013~2018. Default is 2017.
25 | anno_type (string, optional): Returned annotation types, chosen as one of
26 | ``rect`` and ``corner``. Default is ``rect``.
27 | download (boolean, optional): If True, downloads the dataset from the internet
28 | and puts it in root directory. If dataset is downloaded, it is not
29 | downloaded again.
30 | return_meta (string, optional): If True, returns ``meta``
31 | of each sequence in ``__getitem__`` function, otherwise
32 | only returns ``img_files`` and ``anno``.
33 | list_file (string, optional): If provided, only read sequences
34 | specified by the file.
35 | """
36 | __valid_versions = [2013, 2014, 2015, 2016, 2017, 2018, 'LT2018',
37 | 2019, 'LT2019', 'RGBD2019', 'RGBT2019']
38 |
39 | def __init__(self, root_dir, version=2017, anno_type='rect',
40 | download=True, return_meta=False, list_file=None):
41 | super(VOT, self).__init__()
42 | assert version in self.__valid_versions, 'Unsupport VOT version.'
43 | assert anno_type in ['default', 'rect'], 'Unknown annotation type.'
44 |
45 | self.root_dir = root_dir
46 | self.version = version
47 | self.anno_type = anno_type
48 | if download:
49 | self._download(root_dir, version)
50 | self.return_meta = return_meta
51 |
52 | if list_file is None:
53 | list_file = os.path.join(root_dir, 'list.txt')
54 | self._check_integrity(root_dir, version, list_file)
55 |
56 | with open(list_file, 'r') as f:
57 | self.seq_names = f.read().strip().split('\n')
58 | self.seq_dirs = [os.path.join(root_dir, s) for s in self.seq_names]
59 | self.anno_files = [os.path.join(s, 'groundtruth.txt')
60 | for s in self.seq_dirs]
61 |
62 | def __getitem__(self, index):
63 | r"""
64 | Args:
65 | index (integer or string): Index or name of a sequence.
66 |
67 | Returns:
68 | tuple: (img_files, anno) if ``return_meta`` is False, otherwise
69 | (img_files, anno, meta), where ``img_files`` is a list of
70 | file names, ``anno`` is a N x 4 (rectangles) or N x 8 (corners) numpy array,
71 | while ``meta`` is a dict contains meta information about the sequence.
72 | """
73 | if isinstance(index, six.string_types):
74 | if not index in self.seq_names:
75 | raise Exception('Sequence {} not found.'.format(index))
76 | index = self.seq_names.index(index)
77 |
78 | img_files = sorted(glob.glob(
79 | os.path.join(self.seq_dirs[index], 'color', '*.jpg')))
80 | anno = np.loadtxt(self.anno_files[index], delimiter=',')
81 | assert len(img_files) == len(anno), (len(img_files), len(anno))
82 | assert anno.shape[1] in [4, 8]
83 | if self.anno_type == 'rect' and anno.shape[1] == 8:
84 | anno = self._corner2rect(anno)
85 |
86 | if self.return_meta:
87 | meta = self._fetch_meta(
88 | self.seq_dirs[index], len(img_files))
89 | return img_files, anno, meta
90 | else:
91 | return img_files, anno
92 |
93 | def __len__(self):
94 | return len(self.seq_names)
95 |
96 | def _download(self, root_dir, version):
97 | assert version in self.__valid_versions
98 |
99 | if not os.path.isdir(root_dir):
100 | os.makedirs(root_dir)
101 | elif os.path.isfile(os.path.join(root_dir, 'list.txt')):
102 | with open(os.path.join(root_dir, 'list.txt')) as f:
103 | seq_names = f.read().strip().split('\n')
104 | if all([os.path.isdir(os.path.join(root_dir, s)) for s in seq_names]):
105 | print('Files already downloaded.')
106 | return
107 |
108 | url = 'http://data.votchallenge.net/'
109 | if version in range(2013, 2015 + 1):
110 | # main challenge (2013~2015)
111 | homepage = url + 'vot{}/dataset/'.format(version)
112 | elif version in range(2015, 2019 + 1):
113 | # main challenge (2016~2019)
114 | homepage = url + 'vot{}/main/'.format(version)
115 | elif version.startswith('LT'):
116 | # long-term tracking challenge
117 | year = int(version[2:])
118 | homepage = url + 'vot{}/longterm/'.format(year)
119 | elif version.startswith('RGBD'):
120 | # RGBD tracking challenge
121 | year = int(version[4:])
122 | homepage = url + 'vot{}/rgbd/'.format(year)
123 | elif version.startswith('RGBT'):
124 | # RGBT tracking challenge
125 | year = int(version[4:])
126 | url = url + 'vot{}/rgbtir/'.format(year)
127 | homepage = url + 'meta/'
128 |
129 | # download description file
130 | bundle_url = homepage + 'description.json'
131 | bundle_file = os.path.join(root_dir, 'description.json')
132 | if not os.path.isfile(bundle_file):
133 | print('Downloading description file...')
134 | download(bundle_url, bundle_file)
135 |
136 | # read description file
137 | print('\nParsing description file...')
138 | with open(bundle_file) as f:
139 | bundle = json.load(f)
140 |
141 | # md5 generator
142 | def md5(filename):
143 | hash_md5 = hashlib.md5()
144 | with open(filename, 'rb') as f:
145 | for chunk in iter(lambda: f.read(4096), b""):
146 | hash_md5.update(chunk)
147 | return hash_md5.hexdigest()
148 |
149 | # download all sequences
150 | seq_names = []
151 | for seq in bundle['sequences']:
152 | seq_name = seq['name']
153 | seq_names.append(seq_name)
154 |
155 | # download channel (color/depth/ir) files
156 | channels = seq['channels'].keys()
157 | seq_files = []
158 | for cn in channels:
159 | seq_url = seq['channels'][cn]['url']
160 | if not seq_url.startswith(('http', 'https')):
161 | seq_url = url + seq_url[seq_url.find('sequence'):]
162 | seq_file = os.path.join(
163 | root_dir,
164 | '{}_{}.zip'.format(seq_name, cn))
165 | if not os.path.isfile(seq_file) or \
166 | md5(seq_file) != seq['channels'][cn]['checksum']:
167 | print('\nDownloading %s...' % seq_name)
168 | download(seq_url, seq_file)
169 | seq_files.append(seq_file)
170 |
171 | # download annotations
172 | anno_url = homepage + '%s.zip' % seq_name
173 | anno_file = os.path.join(root_dir, seq_name + '_anno.zip')
174 | if not os.path.isfile(anno_file) or \
175 | md5(anno_file) != seq['annotations']['checksum']:
176 | download(anno_url, anno_file)
177 |
178 | # unzip compressed files
179 | seq_dir = os.path.join(root_dir, seq_name)
180 | if not os.path.isfile(seq_dir) or len(os.listdir(seq_dir)) < 10:
181 | print('Extracting %s...' % seq_name)
182 | os.makedirs(seq_dir)
183 | for seq_file in seq_files:
184 | extract(seq_file, seq_dir)
185 | extract(anno_file, seq_dir)
186 |
187 | # save list.txt
188 | list_file = os.path.join(root_dir, 'list.txt')
189 | with open(list_file, 'w') as f:
190 | f.write(str.join('\n', seq_names))
191 |
192 | return root_dir
193 |
194 | def _check_integrity(self, root_dir, version, list_file=None):
195 | assert version in self.__valid_versions
196 | if list_file is None:
197 | list_file = os.path.join(root_dir, 'list.txt')
198 |
199 | if os.path.isfile(list_file):
200 | with open(list_file, 'r') as f:
201 | seq_names = f.read().strip().split('\n')
202 |
203 | # check each sequence folder
204 | for seq_name in seq_names:
205 | seq_dir = os.path.join(root_dir, seq_name)
206 | if not os.path.isdir(seq_dir):
207 | print('Warning: sequence %s not exists.' % seq_name)
208 | else:
209 | # dataset not exists
210 | raise Exception('Dataset not found or corrupted. ' +
211 | 'You can use download=True to download it.')
212 |
213 | def _corner2rect(self, corners, center=False):
214 | cx = np.mean(corners[:, 0::2], axis=1)
215 | cy = np.mean(corners[:, 1::2], axis=1)
216 |
217 | x1 = np.min(corners[:, 0::2], axis=1)
218 | x2 = np.max(corners[:, 0::2], axis=1)
219 | y1 = np.min(corners[:, 1::2], axis=1)
220 | y2 = np.max(corners[:, 1::2], axis=1)
221 |
222 | area1 = np.linalg.norm(corners[:, 0:2] - corners[:, 2:4], axis=1) * \
223 | np.linalg.norm(corners[:, 2:4] - corners[:, 4:6], axis=1)
224 | area2 = (x2 - x1) * (y2 - y1)
225 | scale = np.sqrt(area1 / area2)
226 | w = scale * (x2 - x1) + 1
227 | h = scale * (y2 - y1) + 1
228 |
229 | if center:
230 | return np.array([cx, cy, w, h]).T
231 | else:
232 | return np.array([cx - w / 2, cy - h / 2, w, h]).T
233 |
234 | def _fetch_meta(self, seq_dir, frame_num):
235 | meta = {}
236 |
237 | # attributes
238 | tag_files = glob.glob(os.path.join(seq_dir, '*.label')) + \
239 | glob.glob(os.path.join(seq_dir, '*.tag'))
240 | for f in tag_files:
241 | tag = os.path.basename(f)
242 | tag = tag[:tag.rfind('.')]
243 | meta[tag] = np.loadtxt(f)
244 |
245 | # practical
246 | practical_file = os.path.join(seq_dir, 'practical')
247 | if os.path.isfile(practical_file + '.value'):
248 | meta['practical'] = np.loadtxt(practical_file + '.value')
249 | if os.path.isfile(practical_file + '.txt'):
250 | meta['practical_txt'] = np.loadtxt(practical_file + '.txt')
251 |
252 | # pad zeros if necessary
253 | for tag, val in meta.items():
254 | if len(val) < frame_num:
255 | meta[tag] = np.pad(
256 | val, (0, frame_num - len(val)), 'constant')
257 |
258 | return meta
259 |
--------------------------------------------------------------------------------
/got10k/experiments/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .got10k import ExperimentGOT10k
4 | from .otb import ExperimentOTB
5 | from .vot import ExperimentVOT
6 | from .dtb70 import ExperimentDTB70
7 | from .uav123 import ExperimentUAV123
8 | from .nfs import ExperimentNfS
9 | from .tcolor128 import ExperimentTColor128
10 |
11 | from .lasot import ExperimentLaSOT
12 | from .trackingnet import ExperimentTrackingNet
13 |
--------------------------------------------------------------------------------
/got10k/experiments/dtb70.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 |
5 | from .otb import ExperimentOTB
6 | from ..datasets import DTB70
7 |
8 |
9 | class ExperimentDTB70(ExperimentOTB):
10 | r"""Experiment pipeline and evaluation toolkit for DTB70 dataset.
11 |
12 | Args:
13 | root_dir (string): Root directory of DTB70 dataset.
14 | result_dir (string, optional): Directory for storing tracking
15 | results. Default is ``./results``.
16 | report_dir (string, optional): Directory for storing performance
17 | evaluation results. Default is ``./reports``.
18 | """
19 | def __init__(self, root_dir,
20 | result_dir='results', report_dir='reports'):
21 | self.dataset = DTB70(root_dir)
22 | self.result_dir = os.path.join(result_dir, 'DTB70')
23 | self.report_dir = os.path.join(report_dir, 'DTB70')
24 | # as nbins_iou increases, the success score
25 | # converges to the average overlap (AO)
26 | self.nbins_iou = 21
27 | self.nbins_ce = 51
28 |
--------------------------------------------------------------------------------
/got10k/experiments/got10k.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import os
4 | import numpy as np
5 | import glob
6 | import ast
7 | import json
8 | import time
9 | import matplotlib.pyplot as plt
10 | import matplotlib
11 | from PIL import Image
12 | import cv2
13 |
14 | from ..datasets import GOT10k
15 | from ..utils.metrics import rect_iou
16 | from ..utils.viz import show_frame
17 | from ..utils.ioutils import compress
18 |
19 |
20 | class ExperimentGOT10k(object):
21 | r"""Experiment pipeline and evaluation toolkit for GOT-10k dataset.
22 |
23 | Args:
24 | root_dir (string): Root directory of GOT-10k dataset where
25 | ``train``, ``val`` and ``test`` folders exist.
26 | subset (string): Specify ``train``, ``val`` or ``test``
27 | subset of GOT-10k.
28 | list_file (string, optional): If provided, only run experiments on
29 | sequences specified by this file.
30 | result_dir (string, optional): Directory for storing tracking
31 | results. Default is ``./results``.
32 | report_dir (string, optional): Directory for storing performance
33 | evaluation results. Default is ``./reports``.
34 | """
35 | def __init__(self, root_dir, subset='val', list_file=None,
36 | result_dir='results', report_dir='reports', use_dataset=True):
37 | super(ExperimentGOT10k, self).__init__()
38 | assert subset in ['val', 'test']
39 | self.subset = subset
40 | if use_dataset:
41 | self.dataset = GOT10k(
42 | root_dir, subset=subset, list_file=list_file)
43 | self.result_dir = os.path.join(result_dir, 'GOT-10k')
44 | self.report_dir = os.path.join(report_dir, 'GOT-10k')
45 | self.nbins_iou = 101
46 | self.repetitions = 3
47 |
48 | def run(self, tracker, visualize=False, save_video=False, overwrite_result=True):
49 | if self.subset == 'test':
50 | print('\033[93m[WARNING]:\n' \
51 | 'The groundtruths of GOT-10k\'s test set is withholded.\n' \
52 | 'You will have to submit your results to\n' \
53 | '[http://got-10k.aitestunion.com/]' \
54 | '\nto access the performance.\033[0m')
55 | time.sleep(2)
56 |
57 | print('Running tracker %s on GOT-10k...' % tracker.name)
58 | self.dataset.return_meta = False
59 |
60 | # loop over the complete dataset
61 | for s, (img_files, anno) in enumerate(self.dataset):
62 | seq_name = self.dataset.seq_names[s]
63 | print('--Sequence %d/%d: %s' % (
64 | s + 1, len(self.dataset), seq_name))
65 |
66 | # run multiple repetitions for each sequence
67 | for r in range(self.repetitions):
68 | # check if the tracker is deterministic
69 | if r > 0 and tracker.is_deterministic:
70 | break
71 | elif r == 3 and self._check_deterministic(
72 | tracker.name, seq_name):
73 | print(' Detected a deterministic tracker, ' +
74 | 'skipping remaining trials.')
75 | break
76 | print(' Repetition: %d' % (r + 1))
77 |
78 | # skip if results exist
79 | record_file = os.path.join(
80 | self.result_dir, tracker.name, seq_name,
81 | '%s_%03d.txt' % (seq_name, r + 1))
82 | if os.path.exists(record_file) and not overwrite_result:
83 | print(' Found results, skipping', seq_name)
84 | continue
85 |
86 | # tracking loop
87 | boxes, times = tracker.track(
88 | img_files, anno[0, :], visualize=visualize)
89 |
90 | # record results
91 | self._record(record_file, boxes, times)
92 |
93 | # save videos
94 | if save_video:
95 | video_dir = os.path.join(os.path.dirname(os.path.dirname(self.result_dir)),
96 | 'videos', 'GOT-10k', tracker.name)
97 | video_file = os.path.join(video_dir, '%s.avi' % seq_name)
98 |
99 | if not os.path.isdir(video_dir):
100 | os.makedirs(video_dir)
101 | image = Image.open(img_files[0])
102 | img_W, img_H = image.size
103 | out_video = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MJPG'), 10, (img_W, img_H))
104 | for ith, (img_file, pred) in enumerate(zip(img_files, boxes)):
105 | image = Image.open(img_file)
106 | if not image.mode == 'RGB':
107 | image = image.convert('RGB')
108 | img = np.array(image)[:, :, ::-1].copy()
109 | pred = pred.astype(int)
110 | cv2.rectangle(img, (pred[0], pred[1]), (pred[0] + pred[2], pred[1] + pred[3]), self.color['pred'], 2)
111 | if ith < anno.shape[0]:
112 | gt = anno[ith].astype(int)
113 | cv2.rectangle(img, (gt[0], gt[1]), (gt[0] + gt[2], gt[1] + gt[3]), self.color['gt'], 2)
114 | out_video.write(img)
115 | out_video.release()
116 | print(' Videos saved at', video_file)
117 |
118 | def report(self, tracker_names, plot_curves=True):
119 | assert isinstance(tracker_names, (list, tuple))
120 |
121 | if self.subset == 'test':
122 | pwd = os.getcwd()
123 |
124 | # generate compressed submission file for each tracker
125 | for tracker_name in tracker_names:
126 | # compress all tracking results
127 | result_dir = os.path.join(self.result_dir, tracker_name)
128 | os.chdir(result_dir)
129 | save_file = '../%s' % tracker_name
130 | compress('.', save_file)
131 | print('Records saved at', save_file + '.zip')
132 |
133 | # print submission guides
134 | print('\033[93mLogin and follow instructions on')
135 | print('http://got-10k.aitestunion.com/submit_instructions')
136 | print('to upload and evaluate your tracking results\033[0m')
137 |
138 | # switch back to previous working directory
139 | os.chdir(pwd)
140 |
141 | return None
142 | elif self.subset == 'val':
143 | # meta information is useful when evaluation
144 | self.dataset.return_meta = True
145 |
146 | # assume tracker_names[0] is your tracker
147 | report_dir = os.path.join(self.report_dir, tracker_names[0])
148 | if not os.path.exists(report_dir):
149 | os.makedirs(report_dir)
150 | report_file = os.path.join(report_dir, 'performance.json')
151 |
152 | # visible ratios of all sequences
153 | seq_names = self.dataset.seq_names
154 | covers = {s: self.dataset[s][2]['cover'][1:] for s in seq_names}
155 |
156 | performance = {}
157 | for name in tracker_names:
158 | print('Evaluating', name)
159 | ious = {}
160 | times = {}
161 | performance.update({name: {
162 | 'overall': {},
163 | 'seq_wise': {}}})
164 |
165 | for s, (_, anno, meta) in enumerate(self.dataset):
166 | seq_name = self.dataset.seq_names[s]
167 | record_files = glob.glob(os.path.join(
168 | self.result_dir, name, seq_name,
169 | '%s_[0-9]*.txt' % seq_name))
170 | if len(record_files) == 0:
171 | raise Exception('Results for sequence %s not found.' % seq_name)
172 |
173 | # read results of all repetitions
174 | boxes = [np.loadtxt(f, delimiter=',') for f in record_files]
175 | assert all([b.shape == anno.shape for b in boxes])
176 |
177 | # calculate and stack all ious
178 | bound = ast.literal_eval(meta['resolution'])
179 | seq_ious = [rect_iou(b[1:], anno[1:], bound=bound) for b in boxes]
180 | # only consider valid frames where targets are visible
181 | seq_ious = [t[covers[seq_name] > 0] for t in seq_ious]
182 | seq_ious = np.concatenate(seq_ious)
183 | ious[seq_name] = seq_ious
184 |
185 | # stack all tracking times
186 | times[seq_name] = []
187 | time_file = os.path.join(
188 | self.result_dir, name, seq_name,
189 | '%s_time.txt' % seq_name)
190 | if os.path.exists(time_file):
191 | seq_times = np.loadtxt(time_file, delimiter=',')
192 | seq_times = seq_times[~np.isnan(seq_times)]
193 | seq_times = seq_times[seq_times > 0]
194 | if len(seq_times) > 0:
195 | times[seq_name] = seq_times
196 |
197 | # store sequence-wise performance
198 | ao, sr, speed, _ = self._evaluate(seq_ious, seq_times)
199 | performance[name]['seq_wise'].update({seq_name: {
200 | 'ao': ao,
201 | 'sr': sr,
202 | 'speed_fps': speed,
203 | 'length': len(anno) - 1}})
204 |
205 | ious = np.concatenate(list(ious.values()))
206 | times = np.concatenate(list(times.values()))
207 |
208 | # store overall performance
209 | ao, sr, speed, succ_curve = self._evaluate(ious, times)
210 | performance[name].update({'overall': {
211 | 'ao': ao,
212 | 'sr': sr,
213 | 'speed_fps': speed,
214 | 'succ_curve': succ_curve.tolist()}})
215 |
216 | # save performance
217 | with open(report_file, 'w') as f:
218 | json.dump(performance, f, indent=4)
219 | # plot success curves
220 | if plot_curves:
221 | self.plot_curves([report_file], tracker_names)
222 |
223 | return performance
224 |
225 | def show(self, tracker_names, seq_names=None, play_speed=1):
226 | if seq_names is None:
227 | seq_names = self.dataset.seq_names
228 | elif isinstance(seq_names, str):
229 | seq_names = [seq_names]
230 | assert isinstance(tracker_names, (list, tuple))
231 | assert isinstance(seq_names, (list, tuple))
232 |
233 | play_speed = int(round(play_speed))
234 | assert play_speed > 0
235 | self.dataset.return_meta = False
236 |
237 | for s, seq_name in enumerate(seq_names):
238 | print('[%d/%d] Showing results on %s...' % (
239 | s + 1, len(seq_names), seq_name))
240 |
241 | # load all tracking results
242 | records = {}
243 | for name in tracker_names:
244 | record_file = os.path.join(
245 | self.result_dir, name, seq_name,
246 | '%s_001.txt' % seq_name)
247 | records[name] = np.loadtxt(record_file, delimiter=',')
248 |
249 | # loop over the sequence and display results
250 | img_files, anno = self.dataset[seq_name]
251 | for f, img_file in enumerate(img_files):
252 | if not f % play_speed == 0:
253 | continue
254 | image = Image.open(img_file)
255 | boxes = [anno[f]] + [
256 | records[name][f] for name in tracker_names]
257 | show_frame(image, boxes,
258 | legends=['GroundTruth'] + tracker_names,
259 | colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
260 | 'orange', 'purple', 'brown', 'pink'])
261 |
262 | def _record(self, record_file, boxes, times):
263 | # record bounding boxes
264 | record_dir = os.path.dirname(record_file)
265 | if not os.path.isdir(record_dir):
266 | os.makedirs(record_dir)
267 | np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
268 | while not os.path.exists(record_file):
269 | print('warning: recording failed, retrying...')
270 | np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
271 | print(' Results recorded at', record_file)
272 |
273 | # record running times
274 | time_file = record_file[:record_file.rfind('_')] + '_time.txt'
275 | times = times[:, np.newaxis]
276 | if os.path.exists(time_file):
277 | exist_times = np.loadtxt(time_file, delimiter=',')
278 | if exist_times.ndim == 1:
279 | exist_times = exist_times[:, np.newaxis]
280 | times = np.concatenate((exist_times, times), axis=1)
281 | np.savetxt(time_file, times, fmt='%.8f', delimiter=',')
282 |
283 | def _check_deterministic(self, tracker_name, seq_name):
284 | record_dir = os.path.join(
285 | self.result_dir, tracker_name, seq_name)
286 | record_files = sorted(glob.glob(os.path.join(
287 | record_dir, '%s_[0-9]*.txt' % seq_name)))
288 |
289 | if len(record_files) < 3:
290 | return False
291 |
292 | records = []
293 | for record_file in record_files:
294 | with open(record_file, 'r') as f:
295 | records.append(f.read())
296 |
297 | return len(set(records)) == 1
298 |
299 | def _evaluate(self, ious, times):
300 | # AO, SR and tracking speed
301 | ao = np.mean(ious)
302 | sr = np.mean(ious > 0.5)
303 | if len(times) > 0:
304 | # times has to be an array of positive values
305 | speed_fps = np.mean(1. / times)
306 | else:
307 | speed_fps = -1
308 |
309 | # success curve
310 | # thr_iou = np.linspace(0, 1, 101)
311 | thr_iou = np.linspace(0, 1, self.nbins_iou)
312 | bin_iou = np.greater(ious[:, None], thr_iou[None, :])
313 | succ_curve = np.mean(bin_iou, axis=0)
314 |
315 | return ao, sr, speed_fps, succ_curve
316 |
317 | def plot_curves(self, report_files, tracker_names, extension='.png'):
318 | assert isinstance(report_files, list), \
319 | 'Expected "report_files" to be a list, ' \
320 | 'but got %s instead' % type(report_files)
321 |
322 | # assume tracker_names[0] is your tracker
323 | report_dir = os.path.join(self.report_dir, tracker_names[0])
324 | if not os.path.exists(report_dir):
325 | os.makedirs(report_dir)
326 |
327 | performance = {}
328 | for report_file in report_files:
329 | with open(report_file) as f:
330 | performance.update(json.load(f))
331 |
332 | succ_file = os.path.join(report_dir, 'success_plot'+extension)
333 | key = 'overall'
334 |
335 | # filter performance by tracker_names
336 | performance = {k:v for k,v in performance.items() if k in tracker_names}
337 |
338 | # sort trackers by AO
339 | tracker_names = list(performance.keys())
340 | aos = [t[key]['ao'] for t in performance.values()]
341 | inds = np.argsort(aos)[::-1]
342 | tracker_names = [tracker_names[i] for i in inds]
343 |
344 | # markers
345 | markers = ['-', '--', '-.']
346 | markers = [c + m for m in markers for c in [''] * 10]
347 |
348 | # plot success curves
349 | thr_iou = np.linspace(0, 1, self.nbins_iou)
350 | fig, ax = plt.subplots()
351 | lines = []
352 | legends = []
353 | for i, name in enumerate(tracker_names):
354 | line, = ax.plot(thr_iou,
355 | performance[name][key]['succ_curve'],
356 | markers[i % len(markers)])
357 | lines.append(line)
358 | legends.append('%s: [%.3f]' % (
359 | name, performance[name][key]['ao']))
360 | matplotlib.rcParams.update({'font.size': 7.4})
361 | legend = ax.legend(lines, legends, loc='lower left',
362 | bbox_to_anchor=(0., 0.))
363 |
364 | matplotlib.rcParams.update({'font.size': 9})
365 | ax.set(xlabel='Overlap threshold',
366 | ylabel='Success rate',
367 | xlim=(0, 1), ylim=(0, 1),
368 | title='Success plots on GOT-10k')
369 | ax.grid(True)
370 | fig.tight_layout()
371 |
372 | # control ratio
373 | # ax.set_aspect('equal', 'box')
374 |
375 | print('Saving success plots to', succ_file)
376 | fig.savefig(succ_file,
377 | bbox_extra_artists=(legend,),
378 | bbox_inches='tight',
379 | dpi=300)
380 |
--------------------------------------------------------------------------------
/got10k/experiments/lasot.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 | import json
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | import matplotlib
8 |
9 | from .otb import ExperimentOTB
10 | from ..datasets import LaSOT
11 | from ..utils.metrics import rect_iou, center_error, normalized_center_error
12 |
13 |
14 | class ExperimentLaSOT(ExperimentOTB):
15 | r"""Experiment pipeline and evaluation toolkit for LaSOT dataset.
16 |
17 | Args:
18 | root_dir (string): Root directory of LaSOT dataset.
19 | subset (string, optional): Specify ``train`` or ``test``
20 | subset of LaSOT. Default is ``test``.
21 | return_meta (bool, optional): whether to fetch meta info
22 | (occlusion or out-of-view). Default is ``False``.
23 | result_dir (string, optional): Directory for storing tracking
24 | results. Default is ``./results``.
25 | report_dir (string, optional): Directory for storing performance
26 | evaluation results. Default is ``./reports``.
27 | """
28 | def __init__(self, root_dir, subset='test', return_meta=False,
29 | result_dir='results', report_dir='reports'):
30 | # assert subset.upper() in ['TRAIN', 'TEST']
31 | self.dataset = LaSOT(root_dir, subset, return_meta=return_meta)
32 | self.result_dir = result_dir
33 | self.report_dir = report_dir
34 |
35 | # as nbins_iou increases, the success score
36 | # converges to the average overlap (AO)
37 | self.nbins_iou = 21
38 | self.nbins_ce = 51
39 | self.nbins_nce = 51
40 |
41 | def report(self, tracker_names):
42 | assert isinstance(tracker_names, (list, tuple))
43 |
44 | # assume tracker_names[0] is your tracker
45 | report_dir = os.path.join(self.report_dir, tracker_names[0])
46 | if not os.path.isdir(report_dir):
47 | os.makedirs(report_dir)
48 | report_file = os.path.join(report_dir, 'performance.json')
49 |
50 | performance = {}
51 | for name in tracker_names:
52 | print('Evaluating', name)
53 | seq_num = len(self.dataset)
54 | succ_curve = np.zeros((seq_num, self.nbins_iou))
55 | prec_curve = np.zeros((seq_num, self.nbins_ce))
56 | norm_prec_curve = np.zeros((seq_num, self.nbins_nce))
57 | speeds = np.zeros(seq_num)
58 |
59 | performance.update({name: {
60 | 'overall': {},
61 | 'seq_wise': {}}})
62 |
63 | for s, (_, anno) in enumerate(self.dataset):
64 | seq_name = self.dataset.seq_names[s]
65 | record_file = os.path.join(
66 | self.result_dir, name, '%s.txt' % seq_name)
67 | boxes = np.loadtxt(record_file, delimiter=',')
68 | boxes[0] = anno[0]
69 | if not (len(boxes) == len(anno)):
70 | # from IPython import embed;embed()
71 | print('warning: %s anno donnot match boxes'%seq_name)
72 | len_min = min(len(boxes),len(anno))
73 | boxes = boxes[:len_min]
74 | anno = anno[:len_min]
75 | assert len(boxes) == len(anno)
76 |
77 | ious, center_errors, norm_center_errors = self._calc_metrics(boxes, anno)
78 | succ_curve[s], prec_curve[s], norm_prec_curve[s] = self._calc_curves(ious, center_errors, norm_center_errors)
79 |
80 | # calculate average tracking speed
81 | time_file = os.path.join(
82 | self.result_dir, name, 'times/%s_time.txt' % seq_name)
83 | if os.path.isfile(time_file):
84 | times = np.loadtxt(time_file)
85 | times = times[times > 0]
86 | if len(times) > 0:
87 | speeds[s] = np.mean(1. / times)
88 |
89 | # store sequence-wise performance
90 | performance[name]['seq_wise'].update({seq_name: {
91 | 'success_curve': succ_curve[s].tolist(),
92 | 'precision_curve': prec_curve[s].tolist(),
93 | 'normalized_precision_curve': norm_prec_curve[s].tolist(),
94 | 'success_score': np.mean(succ_curve[s]),
95 | 'precision_score': prec_curve[s][20],
96 | 'normalized_precision_score': np.mean(norm_prec_curve[s]),
97 | 'success_rate': succ_curve[s][self.nbins_iou // 2],
98 | 'speed_fps': speeds[s] if speeds[s] > 0 else -1}})
99 |
100 | succ_curve = np.mean(succ_curve, axis=0)
101 | prec_curve = np.mean(prec_curve, axis=0)
102 | norm_prec_curve = np.mean(norm_prec_curve, axis=0)
103 | succ_score = np.mean(succ_curve)
104 | prec_score = prec_curve[20]
105 | norm_prec_score = np.mean(norm_prec_curve)
106 | succ_rate = succ_curve[self.nbins_iou // 2]
107 | if np.count_nonzero(speeds) > 0:
108 | avg_speed = np.sum(speeds) / np.count_nonzero(speeds)
109 | else:
110 | avg_speed = -1
111 |
112 | # store overall performance
113 | performance[name]['overall'].update({
114 | 'success_curve': succ_curve.tolist(),
115 | 'precision_curve': prec_curve.tolist(),
116 | 'normalized_precision_curve': norm_prec_curve.tolist(),
117 | 'success_score': succ_score,
118 | 'precision_score': prec_score,
119 | 'normalized_precision_score': norm_prec_score,
120 | 'success_rate': succ_rate,
121 | 'speed_fps': avg_speed})
122 |
123 | # report the performance
124 | with open(report_file, 'w') as f:
125 | json.dump(performance, f, indent=4)
126 | # plot precision and success curves
127 | self.plot_curves(tracker_names)
128 |
129 | return performance
130 |
131 | def _calc_metrics(self, boxes, anno):
132 | valid = ~np.any(np.isnan(anno), axis=1)
133 | if len(valid) == 0:
134 | print('Warning: no valid annotations')
135 | return None, None, None
136 | else:
137 | ious = rect_iou(boxes[valid, :], anno[valid, :])
138 | center_errors = center_error(
139 | boxes[valid, :], anno[valid, :])
140 | norm_center_errors = normalized_center_error(
141 | boxes[valid, :], anno[valid, :])
142 | return ious, center_errors, norm_center_errors
143 |
144 | def _calc_curves(self, ious, center_errors, norm_center_errors):
145 | ious = np.asarray(ious, float)[:, np.newaxis]
146 | center_errors = np.asarray(center_errors, float)[:, np.newaxis]
147 | norm_center_errors = np.asarray(norm_center_errors, float)[:, np.newaxis]
148 |
149 | thr_iou = np.linspace(0, 1, self.nbins_iou)[np.newaxis, :]
150 | thr_ce = np.arange(0, self.nbins_ce)[np.newaxis, :]
151 | thr_nce = np.linspace(0, 0.5, self.nbins_nce)[np.newaxis, :]
152 |
153 | bin_iou = np.greater(ious, thr_iou)
154 | bin_ce = np.less_equal(center_errors, thr_ce)
155 | bin_nce = np.less_equal(norm_center_errors, thr_nce)
156 |
157 | succ_curve = np.mean(bin_iou, axis=0)
158 | prec_curve = np.mean(bin_ce, axis=0)
159 | norm_prec_curve = np.mean(bin_nce, axis=0)
160 |
161 | return succ_curve, prec_curve, norm_prec_curve
162 |
163 | def plot_curves(self, tracker_names, extension='.png'):
164 | # assume tracker_names[0] is your tracker
165 | report_dir = os.path.join(self.report_dir, tracker_names[0])
166 | assert os.path.exists(report_dir), \
167 | 'No reports found. Run "report" first' \
168 | 'before plotting curves.'
169 | report_file = os.path.join(report_dir, 'performance.json')
170 | assert os.path.exists(report_file), \
171 | 'No reports found. Run "report" first' \
172 | 'before plotting curves.'
173 |
174 | # load pre-computed performance
175 | with open(report_file) as f:
176 | performance = json.load(f)
177 |
178 | succ_file = os.path.join(report_dir, 'success_plots'+extension)
179 | prec_file = os.path.join(report_dir, 'precision_plots'+extension)
180 | norm_prec_file = os.path.join(report_dir, 'norm_precision_plots'+extension)
181 | key = 'overall'
182 |
183 | # markers
184 | markers = ['-', '--', '-.']
185 | markers = [c + m for m in markers for c in [''] * 10]
186 |
187 | # filter performance by tracker_names
188 | performance = {k:v for k,v in performance.items() if k in tracker_names}
189 |
190 | # sort trackers by success score
191 | tracker_names = list(performance.keys())
192 | succ = [t[key]['success_score'] for t in performance.values()]
193 | inds = np.argsort(succ)[::-1]
194 | tracker_names = [tracker_names[i] for i in inds]
195 |
196 | # plot success curves
197 | thr_iou = np.linspace(0, 1, self.nbins_iou)
198 | fig, ax = plt.subplots()
199 | lines = []
200 | legends = []
201 | for i, name in enumerate(tracker_names):
202 | line, = ax.plot(thr_iou,
203 | performance[name][key]['success_curve'],
204 | markers[i % len(markers)])
205 | lines.append(line)
206 | legends.append('%s: [%.3f]' % (name, performance[name][key]['success_score']))
207 | matplotlib.rcParams.update({'font.size': 7.4})
208 | # legend = ax.legend(lines, legends, loc='center left', bbox_to_anchor=(1, 0.5))
209 | legend = ax.legend(lines, legends, loc='lower left', bbox_to_anchor=(0., 0.))
210 |
211 | matplotlib.rcParams.update({'font.size': 9})
212 | ax.set(xlabel='Overlap threshold',
213 | ylabel='Success rate',
214 | xlim=(0, 1), ylim=(0, 1),
215 | title='Success plots on LaSOT')
216 | ax.grid(True)
217 | fig.tight_layout()
218 |
219 | # control ratio
220 | # ax.set_aspect('equal', 'box')
221 |
222 | print('Saving success plots to', succ_file)
223 | fig.savefig(succ_file,
224 | bbox_extra_artists=(legend,),
225 | bbox_inches='tight',
226 | dpi=300)
227 |
228 | # sort trackers by precision score
229 | tracker_names = list(performance.keys())
230 | prec = [t[key]['precision_score'] for t in performance.values()]
231 | inds = np.argsort(prec)[::-1]
232 | tracker_names = [tracker_names[i] for i in inds]
233 |
234 | # plot precision curves
235 | thr_ce = np.arange(0, self.nbins_ce)
236 | fig, ax = plt.subplots()
237 | lines = []
238 | legends = []
239 | for i, name in enumerate(tracker_names):
240 | line, = ax.plot(thr_ce,
241 | performance[name][key]['precision_curve'],
242 | markers[i % len(markers)])
243 | lines.append(line)
244 | legends.append('%s: [%.3f]' % (name, performance[name][key]['precision_score']))
245 | matplotlib.rcParams.update({'font.size': 7.4})
246 | # legend = ax.legend(lines, legends, loc='center left', bbox_to_anchor=(1, 0.5))
247 | legend = ax.legend(lines, legends, loc='lower right', bbox_to_anchor=(1., 0.))
248 |
249 | matplotlib.rcParams.update({'font.size': 9})
250 | ax.set(xlabel='Location error threshold',
251 | ylabel='Precision',
252 | xlim=(0, thr_ce.max()), ylim=(0, 1),
253 | title='Precision plots on LaSOT')
254 | ax.grid(True)
255 | fig.tight_layout()
256 |
257 | # control ratio
258 | # ax.set_aspect('equal', 'box')
259 |
260 | print('Saving precision plots to', prec_file)
261 | fig.savefig(prec_file, dpi=300)
262 |
263 | # added by user
264 | # sort trackers by normalized precision score
265 | tracker_names = list(performance.keys())
266 | prec = [t[key]['normalized_precision_score'] for t in performance.values()]
267 | inds = np.argsort(prec)[::-1]
268 | tracker_names = [tracker_names[i] for i in inds]
269 |
270 | # plot normalized precision curves
271 | thr_nce = np.arange(0, self.nbins_nce)
272 | fig, ax = plt.subplots()
273 | lines = []
274 | legends = []
275 | for i, name in enumerate(tracker_names):
276 | line, = ax.plot(thr_nce,
277 | performance[name][key]['normalized_precision_curve'],
278 | markers[i % len(markers)])
279 | lines.append(line)
280 | legends.append('%s: [%.3f]' % (name, performance[name][key]['normalized_precision_score']))
281 | matplotlib.rcParams.update({'font.size': 7.4})
282 | # legend = ax.legend(lines, legends, loc='center left', bbox_to_anchor=(1, 0.5))
283 | legend = ax.legend(lines, legends, loc='lower right', bbox_to_anchor=(1., 0.))
284 |
285 | matplotlib.rcParams.update({'font.size': 9})
286 | ax.set(xlabel='Normalized location error threshold',
287 | ylabel='Normalized precision',
288 | xlim=(0, thr_ce.max()), ylim=(0, 1),
289 | title='Normalized precision plots on LaSOT')
290 | ax.grid(True)
291 | fig.tight_layout()
292 |
293 | # control ratio
294 | # ax.set_aspect('equal', 'box')
295 |
296 | print('Saving normalized precision plots to', norm_prec_file)
297 | fig.savefig(norm_prec_file, dpi=300)
298 |
299 |
--------------------------------------------------------------------------------
/got10k/experiments/nfs.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 |
5 | from .otb import ExperimentOTB
6 | from ..datasets import NfS
7 |
8 |
9 | class ExperimentNfS(ExperimentOTB):
10 | r"""Experiment pipeline and evaluation toolkit for NfS dataset.
11 |
12 | Args:
13 | root_dir (string): Root directory of NfS dataset.
14 | result_dir (string, optional): Directory for storing tracking
15 | results. Default is ``./results``.
16 | report_dir (string, optional): Directory for storing performance
17 | evaluation results. Default is ``./reports``.
18 | """
19 | def __init__(self, root_dir, fps=240,
20 | result_dir='results', report_dir='reports'):
21 | self.dataset = NfS(root_dir, fps)
22 | self.result_dir = os.path.join(result_dir, 'NfS/%d' % fps)
23 | self.report_dir = os.path.join(report_dir, 'NfS/%d' % fps)
24 | # as nbins_iou increases, the success score
25 | # converges to the average overlap (AO)
26 | self.nbins_iou = 21
27 | self.nbins_ce = 51
28 |
--------------------------------------------------------------------------------
/got10k/experiments/otb.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import os
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | import matplotlib
7 | import json
8 | from PIL import Image
9 |
10 | from ..datasets import OTB
11 | from ..utils.metrics import rect_iou, center_error
12 | from ..utils.viz import show_frame
13 |
14 |
15 | class ExperimentOTB(object):
16 | r"""Experiment pipeline and evaluation toolkit for OTB dataset.
17 |
18 | Args:
19 | root_dir (string): Root directory of OTB dataset.
20 | version (integer or string): Specify the benchmark version, specify as one of
21 | ``2013``, ``2015``, ``tb50`` and ``tb100``. Default is ``2015``.
22 | result_dir (string, optional): Directory for storing tracking
23 | results. Default is ``./results``.
24 | report_dir (string, optional): Directory for storing performance
25 | evaluation results. Default is ``./reports``.
26 | """
27 | def __init__(self, root_dir, version=2015,
28 | result_dir='results', report_dir='reports'):
29 | super(ExperimentOTB, self).__init__()
30 | self.dataset = OTB(root_dir, version, download=True)
31 | self.result_dir = os.path.join(result_dir, 'OTB' + str(version))
32 | self.report_dir = os.path.join(report_dir, 'OTB' + str(version))
33 | # as nbins_iou increases, the success score
34 | # converges to the average overlap (AO)
35 | self.nbins_iou = 21
36 | self.nbins_ce = 51
37 |
38 | def run(self, tracker, visualize=False):
39 | print('Running tracker %s on %s...' % (
40 | tracker.name, type(self.dataset).__name__))
41 |
42 | # loop over the complete dataset
43 | for s, (img_files, anno) in enumerate(self.dataset):
44 | seq_name = self.dataset.seq_names[s]
45 | print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
46 |
47 | # skip if results exist
48 | record_file = os.path.join(
49 | self.result_dir, tracker.name, '%s.txt' % seq_name)
50 | if os.path.exists(record_file):
51 | print(' Found results, skipping', seq_name)
52 | continue
53 |
54 | # tracking loop
55 | boxes, times = tracker.track(
56 | img_files, anno[0, :], visualize=visualize)
57 | assert len(boxes) == len(anno)
58 |
59 | # record results
60 | self._record(record_file, boxes, times)
61 |
62 | def report(self, tracker_names, plot_curves=True):
63 | assert isinstance(tracker_names, (list, tuple))
64 |
65 | # assume tracker_names[0] is your tracker
66 | report_dir = os.path.join(self.report_dir, tracker_names[0])
67 | if not os.path.isdir(report_dir):
68 | os.makedirs(report_dir)
69 | report_file = os.path.join(report_dir, 'performance.json')
70 |
71 | performance = {}
72 | for name in tracker_names:
73 | print('Evaluating', name)
74 | seq_num = len(self.dataset)
75 | succ_curve = np.zeros((seq_num, self.nbins_iou))
76 | prec_curve = np.zeros((seq_num, self.nbins_ce))
77 | speeds = np.zeros(seq_num)
78 |
79 | performance.update({name: {
80 | 'overall': {},
81 | 'seq_wise': {}}})
82 |
83 | for s, (_, anno) in enumerate(self.dataset):
84 | seq_name = self.dataset.seq_names[s]
85 | record_file = os.path.join(
86 | self.result_dir, name, '%s.txt' % seq_name)
87 | boxes = np.loadtxt(record_file, delimiter=',')
88 | boxes[0] = anno[0]
89 | if not (len(boxes) == len(anno)):
90 | print('warning: %s anno donnot match boxes'%seq_name)
91 | len_min = min(len(boxes),len(anno))
92 | boxes = boxes[:len_min]
93 | anno = anno[:len_min]
94 | assert len(boxes) == len(anno)
95 |
96 | ious, center_errors = self._calc_metrics(boxes, anno)
97 | succ_curve[s], prec_curve[s] = self._calc_curves(ious, center_errors)
98 |
99 | # calculate average tracking speed
100 | time_file = os.path.join(
101 | self.result_dir, name, 'times/%s_time.txt' % seq_name)
102 | if os.path.isfile(time_file):
103 | times = np.loadtxt(time_file)
104 | times = times[times > 0]
105 | if len(times) > 0:
106 | speeds[s] = np.mean(1. / times)
107 |
108 | # store sequence-wise performance
109 | performance[name]['seq_wise'].update({seq_name: {
110 | 'success_curve': succ_curve[s].tolist(),
111 | 'precision_curve': prec_curve[s].tolist(),
112 | 'success_score': np.mean(succ_curve[s]),
113 | 'precision_score': prec_curve[s][20],
114 | 'success_rate': succ_curve[s][self.nbins_iou // 2],
115 | 'speed_fps': speeds[s] if speeds[s] > 0 else -1}})
116 |
117 | succ_curve = np.mean(succ_curve, axis=0)
118 | prec_curve = np.mean(prec_curve, axis=0)
119 | succ_score = np.mean(succ_curve)
120 | prec_score = prec_curve[20]
121 | succ_rate = succ_curve[self.nbins_iou // 2]
122 | if np.count_nonzero(speeds) > 0:
123 | avg_speed = np.sum(speeds) / np.count_nonzero(speeds)
124 | else:
125 | avg_speed = -1
126 |
127 | # store overall performance
128 | performance[name]['overall'].update({
129 | 'success_curve': succ_curve.tolist(),
130 | 'precision_curve': prec_curve.tolist(),
131 | 'success_score': succ_score,
132 | 'precision_score': prec_score,
133 | 'success_rate': succ_rate,
134 | 'speed_fps': avg_speed})
135 |
136 | # report the performance
137 | with open(report_file, 'w') as f:
138 | json.dump(performance, f, indent=4)
139 | # plot precision and success curves
140 | if plot_curves:
141 | self.plot_curves(tracker_names)
142 |
143 | return performance
144 |
145 | def show(self, tracker_names, seq_names=None, play_speed=1):
146 | if seq_names is None:
147 | seq_names = self.dataset.seq_names
148 | elif isinstance(seq_names, str):
149 | seq_names = [seq_names]
150 | assert isinstance(tracker_names, (list, tuple))
151 | assert isinstance(seq_names, (list, tuple))
152 |
153 | play_speed = int(round(play_speed))
154 | assert play_speed > 0
155 |
156 | for s, seq_name in enumerate(seq_names):
157 | print('[%d/%d] Showing results on %s...' % (
158 | s + 1, len(seq_names), seq_name))
159 |
160 | # load all tracking results
161 | records = {}
162 | for name in tracker_names:
163 | record_file = os.path.join(
164 | self.result_dir, name, '%s.txt' % seq_name)
165 | records[name] = np.loadtxt(record_file, delimiter=',')
166 |
167 | # loop over the sequence and display results
168 | img_files, anno = self.dataset[seq_name]
169 | for f, img_file in enumerate(img_files):
170 | if not f % play_speed == 0:
171 | continue
172 | image = Image.open(img_file)
173 | boxes = [anno[f]] + [
174 | records[name][f] for name in tracker_names]
175 | show_frame(image, boxes,
176 | legends=['GroundTruth'] + tracker_names,
177 | colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
178 | 'orange', 'purple', 'brown', 'pink'])
179 |
180 | def _record(self, record_file, boxes, times):
181 | # record bounding boxes
182 | record_dir = os.path.dirname(record_file)
183 | if not os.path.isdir(record_dir):
184 | os.makedirs(record_dir)
185 | np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
186 | while not os.path.exists(record_file):
187 | print('warning: recording failed, retrying...')
188 | np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
189 | print(' Results recorded at', record_file)
190 |
191 | # record running times
192 | time_dir = os.path.join(record_dir, 'times')
193 | if not os.path.isdir(time_dir):
194 | os.makedirs(time_dir)
195 | time_file = os.path.join(time_dir, os.path.basename(
196 | record_file).replace('.txt', '_time.txt'))
197 | np.savetxt(time_file, times, fmt='%.8f')
198 |
199 | def _calc_metrics(self, boxes, anno):
200 | # can be modified by children classes
201 | ious = rect_iou(boxes, anno)
202 | center_errors = center_error(boxes, anno)
203 | return ious, center_errors
204 |
205 | def _calc_curves(self, ious, center_errors):
206 | ious = np.asarray(ious, float)[:, np.newaxis]
207 | center_errors = np.asarray(center_errors, float)[:, np.newaxis]
208 |
209 | thr_iou = np.linspace(0, 1, self.nbins_iou)[np.newaxis, :]
210 | thr_ce = np.arange(0, self.nbins_ce)[np.newaxis, :]
211 |
212 | bin_iou = np.greater(ious, thr_iou)
213 | bin_ce = np.less_equal(center_errors, thr_ce)
214 |
215 | succ_curve = np.mean(bin_iou, axis=0)
216 | prec_curve = np.mean(bin_ce, axis=0)
217 |
218 | return succ_curve, prec_curve
219 |
220 | def plot_curves(self, tracker_names):
221 | # assume tracker_names[0] is your tracker
222 | report_dir = os.path.join(self.report_dir, tracker_names[0])
223 | assert os.path.exists(report_dir), \
224 | 'No reports found. Run "report" first' \
225 | 'before plotting curves.'
226 | report_file = os.path.join(report_dir, 'performance.json')
227 | assert os.path.exists(report_file), \
228 | 'No reports found. Run "report" first' \
229 | 'before plotting curves.'
230 |
231 | # load pre-computed performance
232 | with open(report_file) as f:
233 | performance = json.load(f)
234 |
235 | succ_file = os.path.join(report_dir, 'success_plots.png')
236 | prec_file = os.path.join(report_dir, 'precision_plots.png')
237 | key = 'overall'
238 |
239 | # markers
240 | markers = ['-', '--', '-.']
241 | markers = [c + m for m in markers for c in [''] * 10]
242 |
243 | # sort trackers by success score
244 | tracker_names = list(performance.keys())
245 | succ = [t[key]['success_score'] for t in performance.values()]
246 | inds = np.argsort(succ)[::-1]
247 | tracker_names = [tracker_names[i] for i in inds]
248 |
249 | # plot success curves
250 | thr_iou = np.linspace(0, 1, self.nbins_iou)
251 | fig, ax = plt.subplots()
252 | lines = []
253 | legends = []
254 | for i, name in enumerate(tracker_names):
255 | line, = ax.plot(thr_iou,
256 | performance[name][key]['success_curve'],
257 | markers[i % len(markers)])
258 | lines.append(line)
259 | legends.append('%s: [%.3f]' % (name, performance[name][key]['success_score']))
260 | matplotlib.rcParams.update({'font.size': 7.4})
261 | legend = ax.legend(lines, legends, loc='center left',
262 | bbox_to_anchor=(1, 0.5))
263 |
264 | matplotlib.rcParams.update({'font.size': 9})
265 | ax.set(xlabel='Overlap threshold',
266 | ylabel='Success rate',
267 | xlim=(0, 1), ylim=(0, 1),
268 | title='Success plots of OPE')
269 | ax.grid(True)
270 | fig.tight_layout()
271 |
272 | print('Saving success plots to', succ_file)
273 | fig.savefig(succ_file,
274 | bbox_extra_artists=(legend,),
275 | bbox_inches='tight',
276 | dpi=300)
277 |
278 | # sort trackers by precision score
279 | tracker_names = list(performance.keys())
280 | prec = [t[key]['precision_score'] for t in performance.values()]
281 | inds = np.argsort(prec)[::-1]
282 | tracker_names = [tracker_names[i] for i in inds]
283 |
284 | # plot precision curves
285 | thr_ce = np.arange(0, self.nbins_ce)
286 | fig, ax = plt.subplots()
287 | lines = []
288 | legends = []
289 | for i, name in enumerate(tracker_names):
290 | line, = ax.plot(thr_ce,
291 | performance[name][key]['precision_curve'],
292 | markers[i % len(markers)])
293 | lines.append(line)
294 | legends.append('%s: [%.3f]' % (name, performance[name][key]['precision_score']))
295 | matplotlib.rcParams.update({'font.size': 7.4})
296 | legend = ax.legend(lines, legends, loc='center left',
297 | bbox_to_anchor=(1, 0.5))
298 |
299 | matplotlib.rcParams.update({'font.size': 9})
300 | ax.set(xlabel='Location error threshold',
301 | ylabel='Precision',
302 | xlim=(0, thr_ce.max()), ylim=(0, 1),
303 | title='Precision plots of OPE')
304 | ax.grid(True)
305 | fig.tight_layout()
306 |
307 | print('Saving precision plots to', prec_file)
308 | fig.savefig(prec_file, dpi=300)
309 |
--------------------------------------------------------------------------------
/got10k/experiments/tcolor128.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 |
5 | from .otb import ExperimentOTB
6 | from ..datasets import TColor128
7 |
8 |
9 | class ExperimentTColor128(ExperimentOTB):
10 | r"""Experiment pipeline and evaluation toolkit for TColor128 dataset.
11 |
12 | Args:
13 | root_dir (string): Root directory of TColor128 dataset.
14 | result_dir (string, optional): Directory for storing tracking
15 | results. Default is ``./results``.
16 | report_dir (string, optional): Directory for storing performance
17 | evaluation results. Default is ``./reports``.
18 | """
19 | def __init__(self, root_dir,
20 | result_dir='results', report_dir='reports'):
21 | self.dataset = TColor128(root_dir)
22 | self.result_dir = os.path.join(result_dir, 'TColor128')
23 | self.report_dir = os.path.join(report_dir, 'TColor128')
24 | # as nbins_iou increases, the success score
25 | # converges to the average overlap (AO)
26 | self.nbins_iou = 21
27 | self.nbins_ce = 51
28 |
--------------------------------------------------------------------------------
/got10k/experiments/trackingnet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 | import numpy as np
5 |
6 | from .otb import ExperimentOTB
7 | from ..datasets import TrackingNet
8 | from ..utils.metrics import rect_iou, center_error
9 |
10 |
11 | class ExperimentTrackingNet(ExperimentOTB):
12 | r"""Experiment pipeline and evaluation toolkit for TrackingNet dataset.
13 | Only the TEST subset part implemented.
14 |
15 | Args:
16 | root_dir (string): Root directory of LaSOT dataset.
17 | subset (string, optional): Specify ``train`` or ``test``
18 | subset of LaSOT. Default is ``test``.
19 | return_meta (bool, optional): whether to fetch meta info
20 | (occlusion or out-of-view). Default is ``False``.
21 | result_dir (string, optional): Directory for storing tracking
22 | results. Default is ``./results``.
23 | report_dir (string, optional): Directory for storing performance
24 | evaluation results. Default is ``./reports``.
25 | """
26 | def __init__(self, root_dir, subset='test', return_meta=False,
27 | result_dir='results', report_dir='reports'):
28 | assert subset.upper() in ['TRAIN', 'TEST']
29 | self.dataset = TrackingNet(root_dir, subset, return_meta=return_meta)
30 | self.result_dir = result_dir
31 | self.report_dir = report_dir
32 |
33 | # as nbins_iou increases, the success score
34 | # converges to the average overlap (AO)
35 | self.nbins_iou = 21
36 | self.nbins_ce = 51
37 |
38 | # def _calc_metrics(self, boxes, anno):
39 | # valid = ~np.any(np.isnan(anno), axis=1)
40 | # if len(valid) == 0:
41 | # print('Warning: no valid annotations')
42 | # return None, None
43 | # else:
44 | # ious = rect_iou(boxes[valid, :], anno[valid, :])
45 | # center_errors = center_error(
46 | # boxes[valid, :], anno[valid, :])
47 | # return ious, center_errors
48 |
--------------------------------------------------------------------------------
/got10k/experiments/uav123.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import os
4 | import numpy as np
5 |
6 | from .otb import ExperimentOTB
7 | from ..datasets import UAV123
8 | from ..utils.metrics import rect_iou, center_error
9 |
10 |
11 | class ExperimentUAV123(ExperimentOTB):
12 | r"""Experiment pipeline and evaluation toolkit for UAV123 dataset.
13 |
14 | Args:
15 | root_dir (string): Root directory of UAV123 dataset.
16 | result_dir (string, optional): Directory for storing tracking
17 | results. Default is ``./results``.
18 | report_dir (string, optional): Directory for storing performance
19 | evaluation results. Default is ``./reports``.
20 | """
21 | def __init__(self, root_dir, version='UAV123',
22 | result_dir='results', report_dir='reports'):
23 | assert version.upper() in ['UAV123', 'UAV20L']
24 | self.dataset = UAV123(root_dir, version)
25 | self.result_dir = os.path.join(result_dir, version.upper())
26 | self.report_dir = os.path.join(report_dir, version.upper())
27 | # as nbins_iou increases, the success score
28 | # converges to the average overlap (AO)
29 | self.nbins_iou = 21
30 | self.nbins_ce = 51
31 |
32 | def _calc_metrics(self, boxes, anno):
33 | valid = ~np.any(np.isnan(anno), axis=1)
34 | if len(valid) == 0:
35 | print('Warning: no valid annotations')
36 | return None, None
37 | else:
38 | ious = rect_iou(boxes[valid, :], anno[valid, :])
39 | center_errors = center_error(
40 | boxes[valid, :], anno[valid, :])
41 | return ious, center_errors
42 |
--------------------------------------------------------------------------------
/got10k/experiments/vot.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import time
4 | import numpy as np
5 | import os
6 | import glob
7 | import warnings
8 | import json
9 | from PIL import Image
10 |
11 | from ..datasets import VOT
12 | from ..utils.metrics import poly_iou
13 | from ..utils.viz import show_frame
14 |
15 |
16 | class ExperimentVOT(object):
17 | r"""Experiment pipeline and evaluation toolkit for VOT dataset.
18 |
19 | Notes:
20 | - The tracking results of three types of experiments ``supervised``
21 | ``unsupervised`` and ``realtime`` are compatible with the official
22 | VOT toolkit `.
23 | - TODO: The evaluation function for VOT tracking results is still
24 | under development.
25 |
26 | Args:
27 | root_dir (string): Root directory of VOT dataset where sequence
28 | folders exist.
29 | version (integer, optional): Specify the VOT dataset version. Specify as
30 | one of 2013~2018. Default is 2017.
31 | list_file (string, optional): If provided, only run experiments over
32 | sequences specified by the file.
33 | read_image (boolean, optional): If True, return the read PIL image in
34 | each frame. Otherwise only return the image path. Default is True.
35 | experiments (string or tuple): Specify the type(s) of experiments to run.
36 | Default is a tuple (``supervised``, ``unsupervised``, ``realtime``).
37 | result_dir (string, optional): Directory for storing tracking
38 | results. Default is ``./results``.
39 | report_dir (string, optional): Directory for storing performance
40 | evaluation results. Default is ``./reports``.
41 | """
42 | def __init__(self, root_dir, version=2017,
43 | read_image=True, list_file=None,
44 | experiments=('supervised', 'unsupervised', 'realtime'),
45 | result_dir='results', report_dir='reports'):
46 | super(ExperimentVOT, self).__init__()
47 | if isinstance(experiments, str):
48 | experiments = (experiments,)
49 | assert all([e in ['supervised', 'unsupervised', 'realtime']
50 | for e in experiments])
51 | self.dataset = VOT(
52 | root_dir, version, anno_type='default',
53 | download=True, return_meta=True, list_file=list_file)
54 | self.experiments = experiments
55 | if version == 'LT2018':
56 | version = '-' + version
57 | self.read_image = read_image
58 | self.result_dir = os.path.join(result_dir, 'VOT' + str(version))
59 | self.report_dir = os.path.join(report_dir, 'VOT' + str(version))
60 | self.skip_initialize = 5
61 | self.burnin = 10
62 | self.repetitions = 15
63 | self.sensitive = 100
64 | self.nbins_eao = 1500
65 | self.tags = ['camera_motion', 'illum_change', 'occlusion',
66 | 'size_change', 'motion_change', 'empty']
67 |
68 | def run(self, tracker, visualize=False):
69 | print('Running tracker %s on %s...' % (
70 | tracker.name, type(self.dataset).__name__))
71 |
72 | # run all specified experiments
73 | if 'supervised' in self.experiments:
74 | self.run_supervised(tracker, visualize)
75 | if 'unsupervised' in self.experiments:
76 | self.run_unsupervised(tracker, visualize)
77 | if 'realtime' in self.experiments:
78 | self.run_realtime(tracker, visualize)
79 |
80 | def run_supervised(self, tracker, visualize=False):
81 | print('Running supervised experiment...')
82 |
83 | # loop over the complete dataset
84 | for s, (img_files, anno, _) in enumerate(self.dataset):
85 | seq_name = self.dataset.seq_names[s]
86 | print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
87 |
88 | # rectangular bounding boxes
89 | anno_rects = anno.copy()
90 | if anno_rects.shape[1] == 8:
91 | anno_rects = self.dataset._corner2rect(anno_rects)
92 |
93 | # run multiple repetitions for each sequence
94 | for r in range(self.repetitions):
95 | # check if the tracker is deterministic
96 | if r > 0 and tracker.is_deterministic:
97 | break
98 | elif r == 3 and self._check_deterministic('baseline', tracker.name, seq_name):
99 | print(' Detected a deterministic tracker, ' +
100 | 'skipping remaining trials.')
101 | break
102 | print(' Repetition: %d' % (r + 1))
103 |
104 | # skip if results exist
105 | record_file = os.path.join(
106 | self.result_dir, tracker.name, 'baseline', seq_name,
107 | '%s_%03d.txt' % (seq_name, r + 1))
108 | if os.path.exists(record_file):
109 | print(' Found results, skipping', seq_name)
110 | continue
111 |
112 | # state variables
113 | boxes = []
114 | times = []
115 | failure = False
116 | next_start = -1
117 |
118 | # tracking loop
119 | for f, img_file in enumerate(img_files):
120 | image = Image.open(img_file)
121 | if self.read_image:
122 | frame = image
123 | else:
124 | frame = img_file
125 |
126 | start_time = time.time()
127 | if f == 0:
128 | # initial frame
129 | tracker.init(frame, anno_rects[0])
130 | boxes.append([1])
131 | elif failure:
132 | # during failure frames
133 | if f == next_start:
134 | failure = False
135 | tracker.init(frame, anno_rects[f])
136 | boxes.append([1])
137 | else:
138 | start_time = np.NaN
139 | boxes.append([0])
140 | else:
141 | # during success frames
142 | box = tracker.update(frame)
143 | iou = poly_iou(anno[f], box, bound=image.size)
144 | if iou <= 0.0:
145 | # tracking failure
146 | failure = True
147 | next_start = f + self.skip_initialize
148 | boxes.append([2])
149 | else:
150 | # tracking succeed
151 | boxes.append(box)
152 |
153 | # store elapsed time
154 | times.append(time.time() - start_time)
155 |
156 | # visualize if required
157 | if visualize:
158 | if len(boxes[-1]) == 4:
159 | show_frame(image, boxes[-1])
160 | else:
161 | show_frame(image)
162 |
163 | # record results
164 | self._record(record_file, boxes, times)
165 |
166 | def run_unsupervised(self, tracker, visualize=False):
167 | print('Running unsupervised experiment...')
168 |
169 | # loop over the complete dataset
170 | for s, (img_files, anno, _) in enumerate(self.dataset):
171 | seq_name = self.dataset.seq_names[s]
172 | print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
173 |
174 | # skip if results exist
175 | record_file = os.path.join(
176 | self.result_dir, tracker.name, 'unsupervised', seq_name,
177 | '%s_001.txt' % seq_name)
178 | if os.path.exists(record_file):
179 | print(' Found results, skipping', seq_name)
180 | continue
181 |
182 | # rectangular bounding boxes
183 | anno_rects = anno.copy()
184 | if anno_rects.shape[1] == 8:
185 | anno_rects = self.dataset._corner2rect(anno_rects)
186 |
187 | # tracking loop
188 | boxes, times = tracker.track(
189 | img_files, anno_rects[0], visualize=visualize)
190 | assert len(boxes) == len(anno)
191 |
192 | # re-formatting
193 | boxes = list(boxes)
194 | boxes[0] = [1]
195 |
196 | # record results
197 | self._record(record_file, boxes, times)
198 |
199 | def run_realtime(self, tracker, visualize=False):
200 | print('Running real-time experiment...')
201 |
202 | # loop over the complete dataset
203 | for s, (img_files, anno, _) in enumerate(self.dataset):
204 | seq_name = self.dataset.seq_names[s]
205 | print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
206 |
207 | # skip if results exist
208 | record_file = os.path.join(
209 | self.result_dir, tracker.name, 'realtime', seq_name,
210 | '%s_001.txt' % seq_name)
211 | if os.path.exists(record_file):
212 | print(' Found results, skipping', seq_name)
213 | continue
214 |
215 | # rectangular bounding boxes
216 | anno_rects = anno.copy()
217 | if anno_rects.shape[1] == 8:
218 | anno_rects = self.dataset._corner2rect(anno_rects)
219 |
220 | # state variables
221 | boxes = []
222 | times = []
223 | next_start = 0
224 | failure = False
225 | failed_frame = -1
226 | total_time = 0.0
227 | grace = 3 - 1
228 | offset = 0
229 |
230 | # tracking loop
231 | for f, img_file in enumerate(img_files):
232 | image = Image.open(img_file)
233 | if self.read_image:
234 | frame = image
235 | else:
236 | frame = img_file
237 |
238 | start_time = time.time()
239 | if f == next_start:
240 | # during initial frames
241 | tracker.init(frame, anno_rects[f])
242 | boxes.append([1])
243 |
244 | # reset state variables
245 | failure = False
246 | failed_frame = -1
247 | total_time = 0.0
248 | grace = 3 - 1
249 | offset = f
250 | elif not failure:
251 | # during success frames
252 | # calculate current frame
253 | if grace > 0:
254 | total_time += 1000.0 / 25
255 | grace -= 1
256 | else:
257 | total_time += max(1000.0 / 25, last_time * 1000.0)
258 | current = offset + int(np.round(np.floor(total_time * 25) / 1000.0))
259 |
260 | # delayed/tracked bounding box
261 | if f < current:
262 | box = boxes[-1]
263 | elif f == current:
264 | box = tracker.update(frame)
265 |
266 | iou = poly_iou(anno[f], box, bound=image.size)
267 | if iou <= 0.0:
268 | # tracking failure
269 | failure = True
270 | failed_frame = f
271 | next_start = current + self.skip_initialize
272 | boxes.append([2])
273 | else:
274 | # tracking succeed
275 | boxes.append(box)
276 | else:
277 | # during failure frames
278 | if f < current:
279 | # skipping frame due to slow speed
280 | boxes.append([0])
281 | start_time = np.NaN
282 | elif f == current:
283 | # current frame
284 | box = tracker.update(frame)
285 | iou = poly_iou(anno[f], box, bound=image.size)
286 | if iou <= 0.0:
287 | # tracking failure
288 | boxes.append([2])
289 | boxes[failed_frame] = [0]
290 | times[failed_frame] = np.NaN
291 | else:
292 | # tracking succeed
293 | boxes.append(box)
294 | elif f < next_start:
295 | # skipping frame due to failure
296 | boxes.append([0])
297 | start_time = np.NaN
298 |
299 | # store elapsed time
300 | last_time = time.time() - start_time
301 | times.append(last_time)
302 |
303 | # visualize if required
304 | if visualize:
305 | if len(boxes[-1]) == 4:
306 | show_frame(image, boxes[-1])
307 | else:
308 | show_frame(image)
309 |
310 | # record results
311 | self._record(record_file, boxes, times)
312 |
313 | def report(self, tracker_names):
314 | assert isinstance(tracker_names, (list, tuple))
315 |
316 | # function for loading results
317 | def read_record(filename):
318 | with open(filename) as f:
319 | record = f.read().strip().split('\n')
320 | record = [[float(t) for t in line.split(',')]
321 | for line in record]
322 | return record
323 |
324 | # assume tracker_names[0] is your tracker
325 | report_dir = os.path.join(self.report_dir, tracker_names[0])
326 | if not os.path.exists(report_dir):
327 | os.makedirs(report_dir)
328 | report_file = os.path.join(report_dir, 'performance.json')
329 |
330 | performance = {}
331 | for name in tracker_names:
332 | print('Evaluating', name)
333 | ious = {}
334 | ious_full = {}
335 | failures = {}
336 | times = {}
337 | masks = {} # frame masks for attribute tags
338 |
339 | for s, (img_files, anno, meta) in enumerate(self.dataset):
340 | seq_name = self.dataset.seq_names[s]
341 |
342 | # initialize frames scores
343 | frame_num = len(img_files)
344 | ious[seq_name] = np.full(
345 | (self.repetitions, frame_num), np.nan, dtype=float)
346 | ious_full[seq_name] = np.full(
347 | (self.repetitions, frame_num), np.nan, dtype=float)
348 | failures[seq_name] = np.full(
349 | (self.repetitions, frame_num), np.nan, dtype=float)
350 | times[seq_name] = np.full(
351 | (self.repetitions, frame_num), np.nan, dtype=float)
352 |
353 | # read results of all repetitions
354 | record_files = sorted(glob.glob(os.path.join(
355 | self.result_dir, name, 'baseline', seq_name,
356 | '%s_[0-9]*.txt' % seq_name)))
357 | boxes = [read_record(f) for f in record_files]
358 | assert all([len(b) == len(anno) for b in boxes])
359 |
360 | # calculate frame ious with burnin
361 | bound = Image.open(img_files[0]).size
362 | seq_ious = [self._calc_iou(b, anno, bound, burnin=True)
363 | for b in boxes]
364 | ious[seq_name][:len(seq_ious), :] = seq_ious
365 |
366 | # calculate frame ious without burnin
367 | seq_ious_full = [self._calc_iou(b, anno, bound)
368 | for b in boxes]
369 | ious_full[seq_name][:len(seq_ious_full), :] = seq_ious_full
370 |
371 | # calculate frame failures
372 | seq_failures = [
373 | [len(b) == 1 and b[0] == 2 for b in boxes_per_rep]
374 | for boxes_per_rep in boxes]
375 | failures[seq_name][:len(seq_failures), :] = seq_failures
376 |
377 | # collect frame runtimes
378 | time_file = os.path.join(
379 | self.result_dir, name, 'baseline', seq_name,
380 | '%s_time.txt' % seq_name)
381 | if os.path.exists(time_file):
382 | seq_times = np.loadtxt(time_file, delimiter=',').T
383 | times[seq_name][:len(seq_times), :] = seq_times
384 |
385 | # collect attribute masks
386 | tag_num = len(self.tags)
387 | masks[seq_name] = np.zeros((tag_num, frame_num), bool)
388 | for i, tag in enumerate(self.tags):
389 | if tag in meta:
390 | masks[seq_name][i, :] = meta[tag]
391 | # frames with no tags
392 | if 'empty' in self.tags:
393 | tag_frames = np.array([
394 | v for k, v in meta.items()
395 | if not 'practical' in k], dtype=bool)
396 | ind = self.tags.index('empty')
397 | masks[seq_name][ind, :] = \
398 | ~np.logical_or.reduce(tag_frames, axis=0)
399 |
400 | # concatenate frames
401 | seq_names = self.dataset.seq_names
402 | masks = np.concatenate(
403 | [masks[s] for s in seq_names], axis=1)
404 | ious = np.concatenate(
405 | [ious[s] for s in seq_names], axis=1)
406 | failures = np.concatenate(
407 | [failures[s] for s in seq_names], axis=1)
408 |
409 | with warnings.catch_warnings():
410 | # average over repetitions
411 | warnings.simplefilter('ignore', category=RuntimeWarning)
412 | ious = np.nanmean(ious, axis=0)
413 | failures = np.nanmean(failures, axis=0)
414 |
415 | # calculate average overlaps and failures for each tag
416 | tag_ious = np.array(
417 | [np.nanmean(ious[m]) for m in masks])
418 | tag_failures = np.array(
419 | [np.nansum(failures[m]) for m in masks])
420 | tag_frames = masks.sum(axis=1)
421 |
422 | # remove nan values
423 | tag_ious[np.isnan(tag_ious)] = 0.0
424 | tag_weights = tag_frames / tag_frames.sum()
425 |
426 | # calculate weighted accuracy and robustness
427 | accuracy = np.sum(tag_ious * tag_weights)
428 | robustness = np.sum(tag_failures * tag_weights)
429 |
430 | # calculate tracking speed
431 | times = np.concatenate([
432 | t.reshape(-1) for t in times.values()])
433 | # remove invalid values
434 | times = times[~np.isnan(times)]
435 | times = times[times > 0]
436 | if len(times) > 0:
437 | speed = np.mean(1. / times)
438 | else:
439 | speed = -1
440 |
441 | performance.update({name: {
442 | 'accuracy': accuracy,
443 | 'robustness': robustness,
444 | 'speed_fps': speed}})
445 |
446 | # save performance
447 | with open(report_file, 'w') as f:
448 | json.dump(performance, f, indent=4)
449 | print('Performance saved at', report_file)
450 |
451 | return performance
452 |
453 | def show(self, tracker_names, seq_names=None, play_speed=1,
454 | experiment='supervised'):
455 | if seq_names is None:
456 | seq_names = self.dataset.seq_names
457 | elif isinstance(seq_names, str):
458 | seq_names = [seq_names]
459 | assert isinstance(tracker_names, (list, tuple))
460 | assert isinstance(seq_names, (list, tuple))
461 | assert experiment in ['supervised', 'unsupervised', 'realtime']
462 |
463 | play_speed = int(round(play_speed))
464 | assert play_speed > 0
465 |
466 | # "supervised" experiment results are stored in "baseline" folder
467 | if experiment == 'supervised':
468 | experiment = 'baseline'
469 |
470 | # function for loading results
471 | def read_record(filename):
472 | with open(filename) as f:
473 | record = f.read().strip().split('\n')
474 | record = [[float(t) for t in line.split(',')]
475 | for line in record]
476 | for i, r in enumerate(record):
477 | if len(r) == 4:
478 | record[i] = np.array(r)
479 | elif len(r) == 8:
480 | r = np.array(r)[np.newaxis, :]
481 | r = self.dataset._corner2rect(r)
482 | record[i] = r[0]
483 | else:
484 | record[i] = np.zeros(4)
485 | return record
486 |
487 | for s, seq_name in enumerate(seq_names):
488 | print('[%d/%d] Showing results on %s...' % (
489 | s + 1, len(seq_names), seq_name))
490 |
491 | # load all tracking results
492 | records = {}
493 | for name in tracker_names:
494 | record_file = os.path.join(
495 | self.result_dir, name, experiment, seq_name,
496 | '%s_001.txt' % seq_name)
497 | records[name] = read_record(record_file)
498 |
499 | # loop over the sequence and display results
500 | img_files, anno, _ = self.dataset[seq_name]
501 | if anno.shape[1] == 8:
502 | anno = self.dataset._corner2rect(anno)
503 | for f, img_file in enumerate(img_files):
504 | if not f % play_speed == 0:
505 | continue
506 | image = Image.open(img_file)
507 | boxes = [anno[f]] + [
508 | records[name][f] for name in tracker_names]
509 | show_frame(image, boxes,
510 | legends=['GroundTruth'] + tracker_names,
511 | colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
512 | 'orange', 'purple', 'brown', 'pink'])
513 |
514 | def _record(self, record_file, boxes, times):
515 | # convert boxes to string
516 | lines = []
517 | for box in boxes:
518 | if len(box) == 1:
519 | lines.append('%d' % box[0])
520 | else:
521 | lines.append(str.join(',', ['%.4f' % t for t in box]))
522 |
523 | # record bounding boxes
524 | record_dir = os.path.dirname(record_file)
525 | if not os.path.isdir(record_dir):
526 | os.makedirs(record_dir)
527 | with open(record_file, 'w') as f:
528 | f.write(str.join('\n', lines))
529 | print(' Results recorded at', record_file)
530 |
531 | # convert times to string
532 | lines = ['%.4f' % t for t in times]
533 | lines = [t.replace('nan', 'NaN') for t in lines]
534 |
535 | # record running times
536 | time_file = record_file[:record_file.rfind('_')] + '_time.txt'
537 | if os.path.exists(time_file):
538 | with open(time_file) as f:
539 | exist_lines = f.read().strip().split('\n')
540 | lines = [t + ',' + s for t, s in zip(exist_lines, lines)]
541 | with open(time_file, 'w') as f:
542 | f.write(str.join('\n', lines))
543 |
544 | def _check_deterministic(self, exp, tracker_name, seq_name):
545 | record_dir = os.path.join(
546 | self.result_dir, tracker_name, exp, seq_name)
547 | record_files = sorted(glob.glob(os.path.join(
548 | record_dir, '%s_[0-9]*.txt' % seq_name)))
549 |
550 | if len(record_files) < 3:
551 | return False
552 |
553 | records = []
554 | for record_file in record_files:
555 | with open(record_file, 'r') as f:
556 | records.append(f.read())
557 |
558 | return len(set(records)) == 1
559 |
560 | def _calc_iou(self, boxes, anno, bound, burnin=False):
561 | # skip initialization frames
562 | if burnin:
563 | boxes = boxes.copy()
564 | init_inds = [i for i, box in enumerate(boxes)
565 | if box == [1.0]]
566 | for ind in init_inds:
567 | boxes[ind:ind + self.burnin] = [[0]] * self.burnin
568 | # calculate polygon ious
569 | ious = np.array([poly_iou(np.array(a), b, bound)
570 | if len(a) > 1 else np.NaN
571 | for a, b in zip(boxes, anno)])
572 | return ious
573 |
--------------------------------------------------------------------------------
/got10k/trackers/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import time
5 | from PIL import Image
6 |
7 | from ..utils.viz import show_frame
8 |
9 |
10 | class Tracker(object):
11 |
12 | def __init__(self, name, is_deterministic=False):
13 | self.name = name
14 | self.is_deterministic = is_deterministic
15 |
16 | def init(self, image, box):
17 | raise NotImplementedError()
18 |
19 | def update(self, image):
20 | raise NotImplementedError()
21 |
22 | def track(self, img_files, box, visualize=False):
23 | frame_num = len(img_files)
24 | boxes = np.zeros((frame_num, 4))
25 | boxes[0] = box
26 | times = np.zeros(frame_num)
27 |
28 | for f, img_file in enumerate(img_files):
29 | image = Image.open(img_file)
30 | if not image.mode == 'RGB':
31 | image = image.convert('RGB')
32 |
33 | start_time = time.time()
34 | if f == 0:
35 | self.init(image, box)
36 | else:
37 | boxes[f, :] = self.update(image)
38 | times[f] = time.time() - start_time
39 |
40 | if visualize:
41 | show_frame(image, boxes[f, :])
42 |
43 | return boxes, times
44 |
45 |
46 | from .identity_tracker import IdentityTracker
47 |
--------------------------------------------------------------------------------
/got10k/trackers/identity_tracker.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from . import Tracker
4 |
5 |
6 | class IdentityTracker(Tracker):
7 |
8 | def __init__(self):
9 | super(IdentityTracker, self).__init__(
10 | name='IdentityTracker',
11 | is_deterministic=True)
12 |
13 | def init(self, image, box):
14 | self.box = box
15 |
16 | def update(self, image):
17 | return self.box
18 |
--------------------------------------------------------------------------------
/got10k/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/got-10k/toolkit/956e7286fdf209cbb125adac9a46376bd8297ffb/got10k/utils/__init__.py
--------------------------------------------------------------------------------
/got10k/utils/ioutils.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division
2 |
3 | import wget
4 | import os
5 | import shutil
6 | import zipfile
7 | import sys
8 |
9 |
10 | def download(url, filename):
11 | r"""Download file from the internet.
12 |
13 | Args:
14 | url (string): URL of the internet file.
15 | filename (string): Path to store the downloaded file.
16 | """
17 | return wget.download(url, out=filename)
18 |
19 |
20 | def extract(filename, extract_dir):
21 | r"""Extract zip file.
22 |
23 | Args:
24 | filename (string): Path of the zip file.
25 | extract_dir (string): Directory to store the extracted results.
26 | """
27 | if os.path.splitext(filename)[1] == '.zip':
28 | if not os.path.isdir(extract_dir):
29 | os.makedirs(extract_dir)
30 | with zipfile.ZipFile(filename) as z:
31 | z.extractall(extract_dir)
32 | else:
33 | raise Exception('Unsupport extension {} of the compressed file {}.'.format(
34 | os.path.splitext(filename)[1]), filename)
35 |
36 |
37 | def compress(dirname, save_file):
38 | """Compress a folder to a zip file.
39 |
40 | Arguments:
41 | dirname {string} -- Directory of all files to be compressed.
42 | save_file {string} -- Path to store the zip file.
43 | """
44 | shutil.make_archive(save_file, 'zip', dirname)
45 |
--------------------------------------------------------------------------------
/got10k/utils/metrics.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division
2 |
3 | import numpy as np
4 | from shapely.geometry import box, Polygon
5 |
6 |
7 | def center_error(rects1, rects2):
8 | r"""Center error.
9 |
10 | Args:
11 | rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
12 | (left, top, width, height).
13 | rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
14 | (left, top, width, height).
15 | """
16 | centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
17 | centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
18 | errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
19 |
20 | return errors
21 |
22 | def normalized_center_error(rects1, rects2):
23 | r"""Center error normalized by the size of ground truth.
24 |
25 | Args:
26 | rects1 (numpy.ndarray): prediction box. An N x 4 numpy array, each line represent a rectangle
27 | (left, top, width, height).
28 | rects2 (numpy.ndarray): groudn truth box. An N x 4 numpy array, each line represent a rectangle
29 | (left, top, width, height).
30 | """
31 | centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
32 | centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
33 | errors = np.sqrt(np.sum(np.power((centers1 - centers2)/np.maximum(np.array([[1.,1.]]), rects2[:, 2:]), 2), axis=-1))
34 |
35 | return errors
36 |
37 |
38 | def rect_iou(rects1, rects2, bound=None):
39 | r"""Intersection over union.
40 |
41 | Args:
42 | rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
43 | (left, top, width, height).
44 | rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
45 | (left, top, width, height).
46 | bound (numpy.ndarray): A 4 dimensional array, denotes the bound
47 | (min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
48 | """
49 | assert rects1.shape == rects2.shape
50 | if bound is not None:
51 | # bounded rects1
52 | rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
53 | rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
54 | rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
55 | rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
56 | # bounded rects2
57 | rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
58 | rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
59 | rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
60 | rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
61 |
62 | rects_inter = _intersection(rects1, rects2)
63 | areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
64 |
65 | areas1 = np.prod(rects1[..., 2:], axis=-1)
66 | areas2 = np.prod(rects2[..., 2:], axis=-1)
67 | areas_union = areas1 + areas2 - areas_inter
68 |
69 | eps = np.finfo(float).eps
70 | ious = areas_inter / (areas_union + eps)
71 | ious = np.clip(ious, 0.0, 1.0)
72 |
73 | return ious
74 |
75 |
76 | def _intersection(rects1, rects2):
77 | r"""Rectangle intersection.
78 |
79 | Args:
80 | rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
81 | (left, top, width, height).
82 | rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
83 | (left, top, width, height).
84 | """
85 | assert rects1.shape == rects2.shape
86 | x1 = np.maximum(rects1[..., 0], rects2[..., 0])
87 | y1 = np.maximum(rects1[..., 1], rects2[..., 1])
88 | x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
89 | rects2[..., 0] + rects2[..., 2])
90 | y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
91 | rects2[..., 1] + rects2[..., 3])
92 |
93 | w = np.maximum(x2 - x1, 0)
94 | h = np.maximum(y2 - y1, 0)
95 |
96 | return np.stack([x1, y1, w, h]).T
97 |
98 |
99 | def poly_iou(polys1, polys2, bound=None):
100 | r"""Intersection over union of polygons.
101 |
102 | Args:
103 | polys1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
104 | (left, top, width, height); or an N x 8 numpy array, each line represent
105 | the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
106 | polys2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
107 | (left, top, width, height); or an N x 8 numpy array, each line represent
108 | the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
109 | bound (numpy.ndarray, optional): A 2 dimensional array, denotes the image bound
110 | (width, height) for ``rects1`` and ``rects2``.
111 | """
112 | assert polys1.ndim in [1, 2]
113 | if polys1.ndim == 1:
114 | polys1 = np.array([polys1])
115 | polys2 = np.array([polys2])
116 | assert len(polys1) == len(polys2)
117 |
118 | polys1 = _to_polygon(polys1)
119 | polys2 = _to_polygon(polys2)
120 | if bound is not None:
121 | bound = box(0, 0, bound[0], bound[1])
122 | polys1 = [p.intersection(bound) for p in polys1]
123 | polys2 = [p.intersection(bound) for p in polys2]
124 |
125 | eps = np.finfo(float).eps
126 | ious = []
127 | for poly1, poly2 in zip(polys1, polys2):
128 | area_inter = poly1.intersection(poly2).area
129 | area_union = poly1.union(poly2).area
130 | ious.append(area_inter / (area_union + eps))
131 | ious = np.clip(ious, 0.0, 1.0)
132 |
133 | return ious
134 |
135 |
136 | def _to_polygon(polys):
137 | r"""Convert 4 or 8 dimensional array to Polygons
138 |
139 | Args:
140 | polys (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
141 | (left, top, width, height); or an N x 8 numpy array, each line represent
142 | the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
143 | """
144 | def to_polygon(x):
145 | assert len(x) in [4, 8]
146 | if len(x) == 4:
147 | return box(x[0], x[1], x[0] + x[2], x[1] + x[3])
148 | elif len(x) == 8:
149 | return Polygon([(x[2 * i], x[2 * i + 1]) for i in range(4)])
150 |
151 | if polys.ndim == 1:
152 | return to_polygon(polys)
153 | else:
154 | return [to_polygon(t) for t in polys]
155 |
--------------------------------------------------------------------------------
/got10k/utils/viz.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import matplotlib
5 | import matplotlib.pyplot as plt
6 | import matplotlib.patches as patches
7 | import matplotlib.colors as mcolors
8 | from PIL import Image
9 |
10 |
11 | fig_dict = {}
12 | patch_dict = {}
13 |
14 |
15 | def show_frame(image, boxes=None, fig_n=1, pause=0.001,
16 | linewidth=3, cmap=None, colors=None, legends=None):
17 | r"""Visualize an image w/o drawing rectangle(s).
18 |
19 | Args:
20 | image (numpy.ndarray or PIL.Image): Image to show.
21 | boxes (numpy.array or a list of numpy.ndarray, optional): A 4 dimensional array
22 | specifying rectangle [left, top, width, height] to draw, or a list of arrays
23 | representing multiple rectangles. Default is ``None``.
24 | fig_n (integer, optional): Figure ID. Default is 1.
25 | pause (float, optional): Time delay for the plot. Default is 0.001 second.
26 | linewidth (int, optional): Thickness for drawing the rectangle. Default is 3 pixels.
27 | cmap (string): Color map. Default is None.
28 | color (tuple): Color of drawed rectanlge. Default is None.
29 | """
30 | if isinstance(image, np.ndarray):
31 | image = Image.fromarray(image[..., ::-1])
32 |
33 | if not fig_n in fig_dict or \
34 | fig_dict[fig_n].get_size() != image.size[::-1]:
35 | fig = plt.figure(fig_n)
36 | plt.axis('off')
37 | fig.tight_layout()
38 | fig_dict[fig_n] = plt.imshow(image, cmap=cmap)
39 | else:
40 | fig_dict[fig_n].set_data(image)
41 |
42 | if boxes is not None:
43 | if not isinstance(boxes, (list, tuple)):
44 | boxes = [boxes]
45 |
46 | if colors is None:
47 | colors = ['r', 'g', 'b', 'c', 'm', 'y'] + \
48 | list(mcolors.CSS4_COLORS.keys())
49 | elif isinstance(colors, str):
50 | colors = [colors]
51 |
52 | if not fig_n in patch_dict:
53 | patch_dict[fig_n] = []
54 | for i, box in enumerate(boxes):
55 | patch_dict[fig_n].append(patches.Rectangle(
56 | (box[0], box[1]), box[2], box[3], linewidth=linewidth,
57 | edgecolor=colors[i % len(colors)], facecolor='none',
58 | alpha=0.7 if len(boxes) > 1 else 1.0))
59 | for patch in patch_dict[fig_n]:
60 | fig_dict[fig_n].axes.add_patch(patch)
61 | else:
62 | for patch, box in zip(patch_dict[fig_n], boxes):
63 | patch.set_xy((box[0], box[1]))
64 | patch.set_width(box[2])
65 | patch.set_height(box[3])
66 |
67 | if legends is not None:
68 | fig_dict[fig_n].axes.legend(
69 | patch_dict[fig_n], legends, loc=1,
70 | prop={'size': 8}, fancybox=True, framealpha=0.5)
71 |
72 | plt.pause(pause)
73 | plt.draw()
74 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | cycler==0.10.0
2 | kiwisolver==1.0.1
3 | matplotlib==3.0.2
4 | numpy==1.15.4
5 | Pillow==6.2.0
6 | pyparsing==2.3.0
7 | python-dateutil==2.7.5
8 | Shapely==1.6.4.post2
9 | six==1.11.0
10 |
--------------------------------------------------------------------------------
/resources/sample_batch_run.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/got-10k/toolkit/956e7286fdf209cbb125adac9a46376bd8297ffb/resources/sample_batch_run.jpg
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 |
4 | setup(name='got10k',
5 | version='0.1.3',
6 | description='GOT-10k benchmark official API',
7 | author='Lianghua Huang',
8 | author_email='lianghua.huang.cs@gmail.com',
9 | url='https://github.com/got-10k/toolkit',
10 | license='MIT',
11 | install_requires=[
12 | 'numpy', 'matplotlib', 'Pillow', 'Shapely', 'fire', 'wget'],
13 | packages=find_packages(),
14 | include_package_data=True,
15 | keywords=[
16 | 'GOT-10k',
17 | 'Generic Object Tracking',
18 | 'Benchmark',])
19 |
--------------------------------------------------------------------------------
/tests/test_datasets.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import unittest
4 | import os
5 | import random
6 |
7 | from got10k.datasets import GOT10k, OTB, VOT, DTB70, TColor128, \
8 | UAV123, NfS, LaSOT, TrackingNet, ImageNetVID
9 |
10 |
11 | class TestDatasets(unittest.TestCase):
12 |
13 | def setUp(self):
14 | self.data_dir = os.path.expanduser('~/data')
15 |
16 | def tearDown(self):
17 | pass
18 |
19 | def test_got10k(self):
20 | root_dir = os.path.join(self.data_dir, 'GOT-10k')
21 | # without meta
22 | for subset in ['train', 'val', 'test']:
23 | dataset = GOT10k(root_dir, subset=subset)
24 | self._check_dataset(dataset)
25 | # with meta
26 | for subset in ['train', 'val', 'test']:
27 | dataset = GOT10k(root_dir, subset=subset, return_meta=True)
28 | self._check_dataset(dataset)
29 |
30 | def test_otb(self):
31 | root_dir = os.path.join(self.data_dir, 'OTB')
32 | dataset = OTB(root_dir)
33 | self._check_dataset(dataset)
34 |
35 | def test_vot(self):
36 | root_dir = os.path.join(self.data_dir, 'vot2018')
37 | # without meta
38 | dataset = VOT(root_dir, anno_type='rect')
39 | self._check_dataset(dataset)
40 | # with meta
41 | dataset = VOT(root_dir, anno_type='rect', return_meta=True)
42 | self._check_dataset(dataset)
43 |
44 | def test_dtb70(self):
45 | root_dir = os.path.join(self.data_dir, 'DTB70')
46 | dataset = DTB70(root_dir)
47 | self._check_dataset(dataset)
48 |
49 | def test_tcolor128(self):
50 | root_dir = os.path.join(self.data_dir, 'Temple-color-128')
51 | dataset = TColor128(root_dir)
52 | self._check_dataset(dataset)
53 |
54 | def test_uav123(self):
55 | root_dir = os.path.join(self.data_dir, 'UAV123')
56 | for version in ['UAV123', 'UAV20L']:
57 | dataset = UAV123(root_dir, version)
58 | self._check_dataset(dataset)
59 |
60 | def test_nfs(self):
61 | root_dir = os.path.join(self.data_dir, 'nfs')
62 | for fps in [30, 240]:
63 | dataset = NfS(root_dir, fps)
64 | self._check_dataset(dataset)
65 |
66 | def test_lasot(self):
67 | root_dir = os.path.join(self.data_dir, 'LaSOTBenchmark')
68 | for subset in ['train', 'test']:
69 | dataset = LaSOT(root_dir, subset)
70 | self._check_dataset(dataset)
71 |
72 | def test_trackingnet(self):
73 | root_dir = os.path.join(self.data_dir, 'TrackingNet')
74 | for subset in ['train', 'test']:
75 | dataset = TrackingNet(root_dir, subset)
76 | self._check_dataset(dataset)
77 |
78 | def test_vid(self):
79 | root_dir = os.path.join(self.data_dir, 'ILSVRC')
80 | dataset = ImageNetVID(root_dir, subset=('train', 'val'))
81 | self._check_dataset(dataset)
82 |
83 | def _check_dataset(self, dataset):
84 | n = len(dataset)
85 | self.assertGreater(n, 0)
86 | inds = random.sample(range(n), min(n, 100))
87 | for i in inds:
88 | img_files, anno = dataset[i][:2]
89 | self.assertEqual(len(img_files), len(anno))
90 |
91 |
92 | if __name__ == '__main__':
93 | unittest.main()
94 |
--------------------------------------------------------------------------------
/tests/test_experiments.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import unittest
4 | import os
5 |
6 | from got10k.trackers import IdentityTracker
7 | from got10k.experiments import ExperimentGOT10k, ExperimentOTB, \
8 | ExperimentVOT, ExperimentDTB70, ExperimentTColor128, \
9 | ExperimentUAV123, ExperimentNfS
10 |
11 |
12 | class TestExperiments(unittest.TestCase):
13 |
14 | def setUp(self):
15 | self.data_dir = 'data'
16 | self.tracker = IdentityTracker()
17 |
18 | def tearDown(self):
19 | pass
20 |
21 | def test_got10k(self):
22 | root_dir = os.path.join(self.data_dir, 'GOT-10k')
23 | # run experiment
24 | experiment = ExperimentGOT10k(root_dir)
25 | experiment.run(self.tracker, visualize=False)
26 | # report performance
27 | experiment.report([self.tracker.name])
28 |
29 | def test_otb(self):
30 | root_dir = os.path.join(self.data_dir, 'OTB')
31 | # run experiment
32 | experiment = ExperimentOTB(root_dir)
33 | experiment.run(self.tracker, visualize=False)
34 | # report performance
35 | experiment.report([self.tracker.name])
36 |
37 | def test_vot(self):
38 | root_dir = os.path.join(self.data_dir, 'vot2018')
39 | # run experiment
40 | experiment = ExperimentVOT(root_dir)
41 | experiment.run(self.tracker, visualize=False)
42 | # report performance
43 | experiment.report([self.tracker.name])
44 |
45 | def test_dtb70(self):
46 | root_dir = os.path.join(self.data_dir, 'DTB70')
47 | # run experiment
48 | experiment = ExperimentDTB70(root_dir)
49 | experiment.run(self.tracker, visualize=False)
50 | # report performance
51 | experiment.report([self.tracker.name])
52 |
53 | def test_uav123(self):
54 | root_dir = os.path.join(self.data_dir, 'UAV123')
55 | for version in ['UAV123', 'UAV20L']:
56 | # run experiment
57 | experiment = ExperimentUAV123(root_dir, version)
58 | experiment.run(self.tracker, visualize=False)
59 | # report performance
60 | experiment.report([self.tracker.name])
61 |
62 | def test_nfs(self):
63 | root_dir = os.path.join(self.data_dir, 'nfs')
64 | for fps in [30, 240]:
65 | # run experiment
66 | experiment = ExperimentNfS(root_dir, fps)
67 | experiment.run(self.tracker, visualize=False)
68 | # report performance
69 | experiment.report([self.tracker.name])
70 |
71 | def test_tcolor128(self):
72 | root_dir = os.path.join(self.data_dir, 'Temple-color-128')
73 | # run experiment
74 | experiment = ExperimentTColor128(root_dir)
75 | experiment.run(self.tracker, visualize=False)
76 | # report performance
77 | experiment.report([self.tracker.name])
78 |
79 |
80 | if __name__ == '__main__':
81 | unittest.main()
82 |
--------------------------------------------------------------------------------
/tests/test_trackers.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import unittest
4 | import os
5 | import random
6 |
7 | from got10k.trackers import IdentityTracker
8 | from got10k.datasets import GOT10k
9 |
10 |
11 | class TestTrackers(unittest.TestCase):
12 |
13 | def setUp(self):
14 | self.data_dir = 'data'
15 | self.tracker = IdentityTracker()
16 |
17 | def tearDown(self):
18 | pass
19 |
20 | def test_identity_tracker(self):
21 | # setup dataset
22 | root_dir = os.path.join(self.data_dir, 'GOT-10k')
23 | dataset = GOT10k(root_dir, subset='val')
24 | # run experiment
25 | img_files, anno = random.choice(dataset)
26 | boxes, times = self.tracker.track(
27 | img_files, anno[0], visualize=True)
28 | self.assertEqual(boxes.shape, anno.shape)
29 | self.assertEqual(len(times), len(anno))
30 |
31 |
32 | if __name__ == '__main__':
33 | unittest.main()
34 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import unittest
4 | import numpy as np
5 |
6 | from got10k.utils.metrics import rect_iou, poly_iou
7 |
8 |
9 | class TestUtils(unittest.TestCase):
10 |
11 | def setUp(self):
12 | pass
13 |
14 | def tearDown(self):
15 | pass
16 |
17 | def test_iou(self):
18 | rects1 = np.random.rand(1000, 4) * 100
19 | rects2 = np.random.rand(1000, 4) * 100
20 | bound = (50, 100)
21 | ious1 = rect_iou(rects1, rects2, bound=bound)
22 | ious2 = poly_iou(rects1, rects2, bound=bound)
23 | self.assertTrue((ious1 - ious2).max() < 1e-14)
24 |
25 | polys1 = self._rect2corner(rects1)
26 | polys2 = self._rect2corner(rects2)
27 | ious3 = poly_iou(polys1, polys2, bound=bound)
28 | self.assertTrue((ious1 - ious3).max() < 1e-14)
29 |
30 | def _rect2corner(self, rects):
31 | x1, y1, w, h = rects.T
32 | x2, y2 = x1 + w, y1 + h
33 | corners = np.array([x1, y1, x1, y2, x2, y2, x2, y1]).T
34 |
35 | return corners
36 |
37 |
38 | if __name__ == '__main__':
39 | unittest.main()
40 |
--------------------------------------------------------------------------------